4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * implementation of iam format for fixed size records, variable sized keys.
35 * Author: Nikita Danilov <nikita@clusterfs.com>
38 #include <linux/types.h>
39 #include "osd_internal.h"
46 /* This is duplicated in lustre/utils/create_iam.c */
47 IAM_LVAR_LEAF_MAGIC = 0x1973
50 /* This is duplicated in lustre/utils/create_iam.c */
51 struct lvar_leaf_header {
52 __le16 vlh_magic; /* magic number IAM_LVAR_LEAF_MAGIC */
53 __le16 vlh_used; /* used bytes, including header */
57 * Format of leaf entry:
63 * Entries are ordered in key order.
66 /* This is duplicated in lustre/utils/create_iam.c */
67 typedef u32 lvar_hash_t;
69 /* This is duplicated in lustre/utils/create_iam.c */
70 struct lvar_leaf_entry {
76 #define PDIFF(ptr0, ptr1) (((char *)(ptr0)) - ((char *)(ptr1)))
79 static inline int blocksize(const struct iam_leaf *leaf)
81 return iam_leaf_container(leaf)->ic_object->i_sb->s_blocksize;
84 static inline const char *kchar(const struct iam_key *key)
89 static inline struct iam_lentry *lvar_lentry(const struct lvar_leaf_entry *ent)
91 return (struct iam_lentry *)ent;
94 static inline struct lvar_leaf_entry *lentry_lvar(const struct iam_lentry *lent)
96 return (struct lvar_leaf_entry *)lent;
100 static inline int e_keysize(const struct lvar_leaf_entry *ent)
102 return le16_to_cpu(ent->vle_keysize);
105 /* This is duplicated in lustre/utils/create_iam.c */
108 LVAR_ROUND = LVAR_PAD - 1
111 static inline int getsize(const struct iam_leaf *leaf, int namelen, int recsize)
113 BUILD_BUG_ON((LVAR_PAD & (LVAR_PAD - 1)));
115 return (offsetof(struct lvar_leaf_entry, vle_key) +
116 namelen + recsize + LVAR_ROUND) & ~LVAR_ROUND;
119 static inline int rec_size(const struct iam_rec *rec)
121 return *(const char *)rec;
124 static inline struct iam_rec *e_rec(const struct lvar_leaf_entry *ent)
126 return ((void *)ent) +
127 offsetof(struct lvar_leaf_entry, vle_key) + e_keysize(ent);
130 static inline int e_size(const struct iam_leaf *leaf,
131 const struct lvar_leaf_entry *ent)
133 return getsize(leaf, e_keysize(ent), rec_size(e_rec(ent)));
136 static inline char *e_char(const struct lvar_leaf_entry *ent)
138 return (char *)&ent->vle_key;
141 static inline struct iam_key *e_key(const struct lvar_leaf_entry *ent)
143 return (struct iam_key *)e_char(ent);
146 static inline lvar_hash_t e_hash(const struct lvar_leaf_entry *ent)
148 return le32_to_cpu(ent->vle_hash);
151 static inline struct lvar_leaf_entry *e_next(const struct iam_leaf *leaf,
152 const struct lvar_leaf_entry *ent)
154 return ((void *)ent) + e_size(leaf, ent);
157 #define LVAR_HASH_SANDWICH (0)
158 #define LVAR_HASH_TEA (1)
159 #define LVAR_HASH_R5 (0)
160 #define LVAR_HASH_PREFIX (0)
162 #ifdef HAVE_LDISKFSFS_DIRHASH_WITH_DIR
163 #define e_ldiskfsfs_dirhash(dir, name, len, info) \
164 ldiskfsfs_dirhash((dir), (name), (len), (info))
166 #define e_ldiskfsfs_dirhash(dir, name, len, info) \
167 ldiskfsfs_dirhash((name), (len), (info))
170 static u32 hash_build0(const struct inode *dir, const char *name, int namelen)
176 if (strncmp(name, ".", 1) == 0 && namelen == 1)
178 if (strncmp(name, "..", 2) == 0 && namelen == 2)
181 if (LVAR_HASH_PREFIX) {
183 strncpy((void *)&result,
184 name, min_t(int, namelen, sizeof(result)));
186 struct ldiskfs_dx_hash_info hinfo;
188 hinfo.hash_version = LDISKFS_DX_HASH_TEA;
190 e_ldiskfsfs_dirhash(dir, name, namelen, &hinfo);
192 if (LVAR_HASH_SANDWICH) {
195 hinfo.hash_version = LDISKFS_DX_HASH_TEA;
197 e_ldiskfsfs_dirhash(dir, name, namelen, &hinfo);
198 result2 = hinfo.hash;
199 result = (0xfc000000 & result2) | (0x03ffffff & result);
206 HASH_GRAY_AREA = 1024,
207 HASH_MAX_SIZE = 0x7fffffffUL
210 static u32 hash_build(const struct inode *dir, const char *name, int namelen)
214 hash = (hash_build0(dir, name, namelen) << 1) & HASH_MAX_SIZE;
215 if (hash > HASH_MAX_SIZE - HASH_GRAY_AREA)
216 hash &= HASH_GRAY_AREA - 1;
220 static inline lvar_hash_t get_hash(const struct inode *dir,
221 const char *name, int namelen)
223 return hash_build(dir, name, namelen);
226 static inline lvar_hash_t iam_get_hash(const struct iam_leaf *leaf,
227 const char *name, int namelen)
229 struct iam_path *iam_path = iam_leaf_path(leaf);
231 return get_hash(iam_path_obj(iam_path), name, namelen);
234 static inline int e_eq(const struct lvar_leaf_entry *ent,
235 const char *name, int namelen)
237 return namelen == e_keysize(ent) && !memcmp(e_char(ent), name, namelen);
240 static inline int e_cmp(const struct iam_leaf *leaf,
241 const struct lvar_leaf_entry *ent, lvar_hash_t hash)
246 return ehash == hash ? 0 : (ehash < hash ? -1 : 1);
249 static struct lvar_leaf_header *n_head(const struct iam_leaf *l)
251 return (struct lvar_leaf_header *)l->il_bh->b_data;
254 static int h_used(const struct lvar_leaf_header *hdr)
256 return le16_to_cpu(hdr->vlh_used);
259 static void h_used_adj(const struct iam_leaf *leaf,
260 struct lvar_leaf_header *hdr, int adj)
264 used = h_used(hdr) + adj;
265 assert_corr(sizeof(*hdr) <= used && used <= blocksize(leaf));
266 hdr->vlh_used = cpu_to_le16(used);
269 static struct lvar_leaf_entry *n_start(const struct iam_leaf *leaf)
271 return (void *)leaf->il_bh->b_data + sizeof(struct lvar_leaf_header);
274 static struct lvar_leaf_entry *n_end(const struct iam_leaf *l)
276 return (void *)l->il_bh->b_data + h_used(n_head(l));
279 static struct lvar_leaf_entry *n_cur(const struct iam_leaf *l)
281 return lentry_lvar(l->il_at);
284 #if LDISKFS_CORRECTNESS_ON
285 static int n_at_rec(const struct iam_leaf *folio)
287 return n_start(folio) <= lentry_lvar(folio->il_at) &&
288 lentry_lvar(folio->il_at) < n_end(folio);
291 #if LDISKFS_INVARIANT_ON
292 static int n_invariant(const struct iam_leaf *leaf)
294 struct iam_path *path;
296 struct lvar_leaf_entry *scan;
297 struct lvar_leaf_entry *end;
299 lvar_hash_t nexthash;
300 lvar_hash_t starthash;
304 path = leaf->il_path;
306 if (h_used(n_head(leaf)) > blocksize(leaf))
309 dir = iam_path_obj(iam_path);
311 * Delimiting key in the parent index node. Clear least bit to account
312 * for hash collision marker.
314 starthash = *(lvar_hash_t *)iam_ikey_at(path, path->ip_frame->at) & ~1;
315 for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
316 nexthash = e_hash(scan);
317 if (nexthash != get_hash(dir, e_char(scan), e_keysize(scan))) {
321 if (nexthash < hash) {
333 /* LDISKFS_INVARIANT_ON */
336 /* LDISKFS_CORRECTNESS_ON */
339 static struct iam_ikey *lvar_ikey(const struct iam_leaf *l,
340 struct iam_ikey *key)
344 assert_corr(n_at_rec(l));
347 *hash = e_hash(n_cur(l));
351 static struct iam_key *lvar_key(const struct iam_leaf *l)
353 return e_key(n_cur(l));
356 static int lvar_key_size(const struct iam_leaf *l)
358 return e_keysize(n_cur(l));
361 static void lvar_start(struct iam_leaf *l)
363 l->il_at = lvar_lentry(n_start(l));
366 static int lvar_init(struct iam_leaf *l)
370 struct lvar_leaf_header *head;
372 assert_corr(l->il_bh != NULL);
376 if (le16_to_cpu(head->vlh_magic) == IAM_LVAR_LEAF_MAGIC &&
377 used <= blocksize(l)) {
378 l->il_at = l->il_entries = lvar_lentry(n_start(l));
383 obj = iam_leaf_container(l)->ic_object;
385 "Bad magic in node %llu (#%lu): %#x != %#x or wrong used: %d\n",
386 (unsigned long long)l->il_bh->b_blocknr, obj->i_ino,
387 le16_to_cpu(head->vlh_magic), IAM_LVAR_LEAF_MAGIC,
394 static void lvar_fini(struct iam_leaf *l)
396 l->il_entries = l->il_at = NULL;
399 static struct iam_rec *lvar_rec(const struct iam_leaf *l)
401 assert_corr(n_at_rec(l));
402 return e_rec(n_cur(l));
405 static void lvar_next(struct iam_leaf *l)
407 assert_corr(n_at_rec(l));
408 assert_corr(iam_leaf_is_locked(l));
409 l->il_at = lvar_lentry(e_next(l, n_cur(l)));
412 static int lvar_lookup(struct iam_leaf *leaf, const struct iam_key *k)
414 struct lvar_leaf_entry *found;
415 struct lvar_leaf_entry *scan;
416 struct lvar_leaf_entry *end;
424 assert_inv(n_invariant(leaf));
428 namelen = strlen(name);
429 hash = iam_get_hash(leaf, name, namelen);
434 for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
435 lvar_hash_t scan_hash;
437 scan_hash = e_hash(scan);
438 if (scan_hash < hash)
440 else if (scan_hash == hash) {
441 if (e_eq(scan, name, namelen)) {
445 leaf->il_at = lvar_lentry(scan);
446 return IAM_LOOKUP_EXACT;
447 } else if (!found_equal) {
458 * @k is less than all hashes in the leaf.
461 result = IAM_LOOKUP_BEFORE;
463 leaf->il_at = lvar_lentry(found);
464 result = IAM_LOOKUP_OK;
465 assert_corr(n_at_rec(leaf));
468 result |= IAM_LOOKUP_LAST;
469 assert_inv(n_invariant(leaf));
474 static int lvar_ilookup(struct iam_leaf *leaf, const struct iam_ikey *ik)
476 struct lvar_leaf_entry *scan;
477 struct lvar_leaf_entry *end;
480 assert_inv(n_invariant(leaf));
482 hash = *(const lvar_hash_t *)ik;
485 for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
486 lvar_hash_t scan_hash;
488 scan_hash = e_hash(scan);
489 if (scan_hash > hash)
490 return scan == n_start(leaf) ?
491 IAM_LOOKUP_BEFORE : IAM_LOOKUP_OK;
492 leaf->il_at = lvar_lentry(scan);
493 if (scan_hash == hash)
494 return IAM_LOOKUP_EXACT;
496 assert_inv(n_invariant(leaf));
498 * @ik is greater than any key in the node. Return last record in the
501 return IAM_LOOKUP_OK;
504 static void __lvar_key_set(struct iam_leaf *l, const struct iam_key *k)
506 memcpy(e_key(n_cur(l)), k, e_keysize(n_cur(l)));
509 static void lvar_key_set(struct iam_leaf *l, const struct iam_key *k)
511 assert_corr(n_at_rec(l));
512 assert_corr(strlen(kchar(k)) == e_keysize(n_cur(l)));
513 assert_corr(iam_leaf_is_locked(l));
514 __lvar_key_set(l, k);
515 assert_inv(n_invariant(l));
518 static int lvar_key_cmp(const struct iam_leaf *l, const struct iam_key *k)
525 hash = iam_get_hash(l, name, strlen(name));
526 return e_cmp(l, n_cur(l), hash);
529 static int lvar_key_eq(const struct iam_leaf *l, const struct iam_key *k)
534 return e_eq(n_cur(l), name, strlen(name));
537 static void __lvar_rec_set(struct iam_leaf *l, const struct iam_rec *r)
539 memcpy(e_rec(n_cur(l)), r, rec_size(r));
542 static void lvar_rec_set(struct iam_leaf *l, const struct iam_rec *r)
544 assert_corr(n_at_rec(l));
545 assert_corr(iam_leaf_is_locked(l));
546 __lvar_rec_set(l, r);
547 assert_inv(n_invariant(l));
550 static int lvar_rec_eq(const struct iam_leaf *l, const struct iam_rec *r)
552 struct iam_rec *rec = e_rec(n_cur(l));
554 if (rec_size(rec) != rec_size(r))
556 return !memcmp(rec, r, rec_size(r));
559 static void lvar_rec_get(const struct iam_leaf *l, struct iam_rec *r)
563 rec = e_rec(n_cur(l));
564 assert_corr(n_at_rec(l));
565 assert_corr(iam_leaf_is_locked(l));
566 memcpy(r, rec, rec_size(rec));
567 assert_inv(n_invariant(l));
570 static int lvar_can_add(const struct iam_leaf *l,
571 const struct iam_key *k, const struct iam_rec *r)
573 assert_corr(iam_leaf_is_locked(l));
574 return h_used(n_head(l)) +
575 getsize(l, strlen(kchar(k)), rec_size(r)) <= blocksize(l);
578 static int lvar_at_end(const struct iam_leaf *folio)
580 assert_corr(iam_leaf_is_locked(folio));
581 return n_cur(folio) == n_end(folio);
584 static void lvar_rec_add(struct iam_leaf *leaf,
585 const struct iam_key *k, const struct iam_rec *r)
594 assert_corr(lvar_can_add(leaf, k, r));
595 assert_inv(n_invariant(leaf));
596 assert_corr(iam_leaf_is_locked(leaf));
600 shift = getsize(leaf, ksize, rec_size(r));
602 if (!lvar_at_end(leaf)) {
603 assert_corr(n_cur(leaf) < n_end(leaf));
605 if (lvar_key_cmp(leaf, k) <= 0)
609 * Another exceptional case: insertion with the key
610 * less than least key in the leaf.
612 assert_corr(leaf->il_at == leaf->il_entries);
615 diff = PDIFF(end, start);
616 assert_corr(diff >= 0);
617 memmove(start + shift, start, diff);
619 h_used_adj(leaf, n_head(leaf), shift);
620 n_cur(leaf)->vle_keysize = cpu_to_le16(ksize);
621 n_cur(leaf)->vle_hash = cpu_to_le32(iam_get_hash(leaf, key, ksize));
622 __lvar_key_set(leaf, k);
623 __lvar_rec_set(leaf, r);
624 assert_corr(n_at_rec(leaf));
625 assert_inv(n_invariant(leaf));
628 static void lvar_rec_del(struct iam_leaf *leaf, int shift)
634 assert_corr(n_at_rec(leaf));
635 assert_inv(n_invariant(leaf));
636 assert_corr(iam_leaf_is_locked(leaf));
639 next = e_next(leaf, n_cur(leaf));
640 nob = e_size(leaf, n_cur(leaf));
641 memmove(leaf->il_at, next, end - next);
642 h_used_adj(leaf, n_head(leaf), -nob);
643 assert_inv(n_invariant(leaf));
646 static void lvar_init_new(struct iam_container *c, struct buffer_head *bh)
648 struct lvar_leaf_header *hdr;
650 hdr = (struct lvar_leaf_header *)bh->b_data;
651 hdr->vlh_magic = cpu_to_le16(IAM_LVAR_LEAF_MAGIC);
652 hdr->vlh_used = sizeof(*hdr);
655 static struct lvar_leaf_entry *find_pivot(const struct iam_leaf *leaf,
656 struct lvar_leaf_entry **prev)
663 threshold = blocksize(leaf) / 2;
664 for (scan = start = n_start(leaf); scan - start <= threshold;
665 *prev = scan, scan = e_next(leaf, scan)) {
671 static void lvar_split(struct iam_leaf *leaf, struct buffer_head **bh,
674 struct lvar_leaf_entry *first_to_move;
675 struct lvar_leaf_entry *last_to_stay;
676 struct iam_path *path;
677 struct lvar_leaf_header *hdr;
678 struct buffer_head *new_leaf;
682 assert_inv(n_invariant(leaf));
683 assert_corr(iam_leaf_is_locked(leaf));
686 path = iam_leaf_path(leaf);
688 hdr = (void *)new_leaf->b_data;
690 first_to_move = find_pivot(leaf, &last_to_stay);
691 assert_corr(last_to_stay != NULL);
692 assert_corr(e_next(leaf, last_to_stay) == first_to_move);
694 hash = e_hash(first_to_move);
695 if (hash == e_hash(last_to_stay))
701 tomove = PDIFF(n_end(leaf), first_to_move);
702 memmove(hdr + 1, first_to_move, tomove);
704 h_used_adj(leaf, hdr, tomove);
705 h_used_adj(leaf, n_head(leaf), -tomove);
707 assert_corr(n_end(leaf) == first_to_move);
709 if (n_cur(leaf) >= first_to_move) {
711 * insertion point moves into new leaf.
715 shift = PDIFF(leaf->il_at, first_to_move);
717 leaf->il_bh = new_leaf;
718 leaf->il_curidx = new_blknr;
720 assert_corr(iam_leaf_is_locked(leaf));
723 * init cannot fail, as node was just initialized.
725 assert_corr(result == 0);
726 leaf->il_at = ((void *)leaf->il_at) + shift;
729 * Insert pointer to the new node (together with the least key in
730 * the node) into index node.
732 iam_insert_key_lock(path, path->ip_frame, (struct iam_ikey *)&hash,
734 assert_corr(n_cur(leaf) < n_end(leaf));
735 assert_inv(n_invariant(leaf));
738 static int lvar_leaf_empty(struct iam_leaf *leaf)
740 return h_used(n_head(leaf)) == sizeof(struct lvar_leaf_header);
743 static const struct iam_leaf_operations lvar_leaf_ops = {
745 .init_new = lvar_init_new,
752 .key_set = lvar_key_set,
753 .key_cmp = lvar_key_cmp,
754 .key_eq = lvar_key_eq,
755 .key_size = lvar_key_size,
756 .rec_set = lvar_rec_set,
757 .rec_eq = lvar_rec_eq,
758 .rec_get = lvar_rec_get,
759 .lookup = lvar_lookup,
760 .ilookup = lvar_ilookup,
761 .at_end = lvar_at_end,
762 .rec_add = lvar_rec_add,
763 .rec_del = lvar_rec_del,
764 .can_add = lvar_can_add,
766 .leaf_empty = lvar_leaf_empty,
774 /* This is duplicated in lustre/utils/create_iam.c */
775 /* egrep -i '^o?x?[olabcdef]*$' /usr/share/dict/words */
776 IAM_LVAR_ROOT_MAGIC = 0xb01dface
779 /* This is duplicated in lustre/utils/create_iam.c */
784 u8 vr_indirect_levels;
789 static u32 lvar_root_ptr(struct iam_container *c)
794 static int lvar_node_init(struct iam_container *c, struct buffer_head *bh,
800 static struct iam_entry *lvar_root_inc(struct iam_container *c,
801 struct iam_path *path,
802 struct iam_frame *frame)
804 struct lvar_root *root;
805 struct iam_entry *entries;
807 assert_corr(iam_frame_is_locked(path, frame));
808 entries = frame->entries;
810 dx_set_count(entries, 2);
811 assert_corr(dx_get_limit(entries) == dx_root_limit(path));
813 root = (void *)frame->bh->b_data;
814 assert_corr(le64_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC);
815 root->vr_indirect_levels++;
816 frame->at = entries = iam_entry_shift(path, entries, 1);
817 memset(iam_ikey_at(path, entries), 0,
818 iam_path_descr(path)->id_ikey_size);
822 static int lvar_node_check(struct iam_path *path, struct iam_frame *frame)
826 unsigned int limit_correct;
827 struct iam_entry *entries;
829 entries = dx_node_get_entries(path, frame);
831 if (frame == path->ip_frames) {
832 struct lvar_root *root;
834 root = (void *)frame->bh->b_data;
835 if (le32_to_cpu(root->vr_magic) != IAM_LVAR_ROOT_MAGIC)
837 limit_correct = dx_root_limit(path);
839 limit_correct = dx_node_limit(path);
840 count = dx_get_count(entries);
841 limit = dx_get_limit(entries);
844 if (limit != limit_correct)
849 static int lvar_node_load(struct iam_path *path, struct iam_frame *frame)
851 struct iam_entry *entries;
854 entries = dx_node_get_entries(path, frame);
855 data = frame->bh->b_data;
857 if (frame == path->ip_frames) {
858 struct lvar_root *root;
862 name = kchar(path->ip_key_target);
863 path->ip_indirect = root->vr_indirect_levels;
864 if (path->ip_ikey_target == NULL) {
865 path->ip_ikey_target = iam_path_ikey(path, 4);
866 *(lvar_hash_t *)path->ip_ikey_target =
867 get_hash(iam_path_obj(path), name,
871 frame->entries = frame->at = entries;
875 static int lvar_ikeycmp(const struct iam_container *c,
876 const struct iam_ikey *k1, const struct iam_ikey *k2)
878 lvar_hash_t p1 = le32_to_cpu(*(lvar_hash_t *)k1);
879 lvar_hash_t p2 = le32_to_cpu(*(lvar_hash_t *)k2);
881 return p1 > p2 ? 1 : (p1 < p2 ? -1 : 0);
884 static struct iam_path_descr *lvar_ipd_alloc(const struct iam_container *c,
887 return iam_ipd_alloc(area, c->ic_descr->id_ikey_size);
890 static void lvar_root(void *buf,
891 int blocksize, int keysize, int ptrsize, int recsize)
893 struct lvar_root *root;
894 struct dx_countlimit *limit;
898 isize = sizeof(lvar_hash_t) + ptrsize;
900 *root = (typeof(*root)) {
901 .vr_magic = cpu_to_le32(IAM_LVAR_ROOT_MAGIC),
902 .vr_recsize = cpu_to_le16(recsize),
903 .vr_ptrsize = cpu_to_le16(ptrsize),
904 .vr_indirect_levels = 0
907 limit = (void *)(root + 1);
908 *limit = (typeof(*limit)){
910 * limit itself + one pointer to the leaf.
912 .count = cpu_to_le16(2),
913 .limit = iam_root_limit(sizeof(struct lvar_root), blocksize,
914 sizeof(lvar_hash_t) + ptrsize)
917 /* To guarantee that the padding "keysize + ptrsize"
918 * covers the "dx_countlimit" and the "idle_blocks". */
919 LASSERT((keysize + ptrsize) >=
920 (sizeof(struct dx_countlimit) + sizeof(u32)));
922 entry = (void *)(limit + 1);
923 /* Put "idle_blocks" just after the limit. There was padding after
924 * the limit, the "idle_blocks" re-uses part of the padding, so no
925 * compatibility issues with old layout.
932 entry = (void *)(root + 1) + isize;
935 * Entry format is <key> followed by <ptr>. In the minimal tree
936 * consisting of a root and single node, <key> is a minimal possible
939 *(lvar_hash_t *)entry = 0;
940 entry += sizeof(lvar_hash_t);
941 /* now @entry points to <ptr> */
943 *(u_int32_t *)entry = cpu_to_le32(1);
945 *(u_int64_t *)entry = cpu_to_le64(1);
948 static int lvar_esize(int namelen, int recsize)
950 return (offsetof(struct lvar_leaf_entry, vle_key) +
951 namelen + recsize + LVAR_ROUND) & ~LVAR_ROUND;
954 static void lvar_leaf(void *buf,
955 int blocksize, int keysize, int ptrsize, int recsize)
957 struct lvar_leaf_header *head;
958 struct lvar_leaf_entry *entry;
962 *head = (typeof(*head)) {
963 .vlh_magic = cpu_to_le16(IAM_LVAR_LEAF_MAGIC),
964 .vlh_used = cpu_to_le16(sizeof(*head) + lvar_esize(0, recsize))
966 entry = (void *)(head + 1);
967 *entry = (typeof(*entry)) {
971 memset(e_rec(entry), 0, recsize);
972 *(char *)e_rec(entry) = recsize;
975 int iam_lvar_create(struct inode *obj,
976 int keysize, int ptrsize, int recsize, handle_t *handle)
978 struct buffer_head *root_node;
979 struct buffer_head *leaf_node;
980 struct super_block *sb;
986 assert_corr(obj->i_size == 0);
989 bsize = sb->s_blocksize;
990 root_node = osd_ldiskfs_append(handle, obj, &blknr);
991 if (IS_ERR(root_node))
992 GOTO(out, result = PTR_ERR(root_node));
994 leaf_node = osd_ldiskfs_append(handle, obj, &blknr);
995 if (IS_ERR(leaf_node))
996 GOTO(out_root, result = PTR_ERR(leaf_node));
998 lvar_root(root_node->b_data, bsize, keysize, ptrsize, recsize);
999 lvar_leaf(leaf_node->b_data, bsize, keysize, ptrsize, recsize);
1000 ldiskfs_mark_inode_dirty(handle, obj);
1001 result = ldiskfs_handle_dirty_metadata(handle, NULL, root_node);
1003 result = ldiskfs_handle_dirty_metadata(handle, NULL, leaf_node);
1005 ldiskfs_std_error(sb, result);
1009 GOTO(out_root, result);
1017 static const struct iam_operations lvar_ops = {
1018 .id_root_ptr = lvar_root_ptr,
1019 .id_node_read = iam_node_read,
1020 .id_node_init = lvar_node_init,
1021 .id_node_check = lvar_node_check,
1022 .id_node_load = lvar_node_load,
1023 .id_ikeycmp = lvar_ikeycmp,
1024 .id_root_inc = lvar_root_inc,
1025 .id_ipd_alloc = lvar_ipd_alloc,
1026 .id_ipd_free = iam_ipd_free,
1030 int iam_lvar_guess(struct iam_container *c)
1033 struct buffer_head *bh;
1034 const struct lvar_root *root;
1036 assert_corr(c->ic_object != NULL);
1038 result = iam_node_read(c, lvar_root_ptr(c), NULL, &bh);
1040 root = (void *)bh->b_data;
1042 if (le32_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC) {
1043 struct iam_descr *descr;
1045 descr = c->ic_descr;
1046 descr->id_key_size = LDISKFS_NAME_LEN;
1047 descr->id_ikey_size = sizeof(lvar_hash_t);
1048 descr->id_rec_size = le16_to_cpu(root->vr_recsize);
1049 descr->id_ptr_size = le16_to_cpu(root->vr_ptrsize);
1050 descr->id_root_gap = sizeof(*root);
1051 descr->id_node_gap = 0;
1052 descr->id_ops = &lvar_ops;
1053 descr->id_leaf_ops = &lvar_leaf_ops;