Whamcloud - gitweb
LU-17705 ptlrpc: replace synchronize_rcu() with rcu_barrier()
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam_lvar.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * iam_lvar.c
32  *
33  * implementation of iam format for fixed size records, variable sized keys.
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  */
37
38 #include <linux/types.h>
39 #include "osd_internal.h"
40
41 /*
42  * Leaf operations.
43  */
44
45 enum {
46         /* This is duplicated in lustre/utils/create_iam.c */
47         IAM_LVAR_LEAF_MAGIC = 0x1973
48 };
49
50 /* This is duplicated in lustre/utils/create_iam.c */
51 struct lvar_leaf_header {
52         __le16 vlh_magic; /* magic number IAM_LVAR_LEAF_MAGIC */
53         __le16 vlh_used;  /* used bytes, including header */
54 };
55
56 /*
57  * Format of leaf entry:
58  *
59  * __le16 keysize
60  *     u8 key[keysize]
61  *     u8 record[rec_size]
62  *
63  * Entries are ordered in key order.
64  */
65
66 /* This is duplicated in lustre/utils/create_iam.c */
67 typedef u32 lvar_hash_t;
68
69 /* This is duplicated in lustre/utils/create_iam.c */
70 struct lvar_leaf_entry {
71         __le32 vle_hash;
72         __le16 vle_keysize;
73         u8 vle_key[0];
74 };
75
76 #define PDIFF(ptr0, ptr1) (((char *)(ptr0)) - ((char *)(ptr1)))
77
78
79 static inline int blocksize(const struct iam_leaf *leaf)
80 {
81         return iam_leaf_container(leaf)->ic_object->i_sb->s_blocksize;
82 }
83
84 static inline const char *kchar(const struct iam_key *key)
85 {
86         return (void *)key;
87 }
88
89 static inline struct iam_lentry *lvar_lentry(const struct lvar_leaf_entry *ent)
90 {
91         return (struct iam_lentry *)ent;
92 }
93
94 static inline struct lvar_leaf_entry *lentry_lvar(const struct iam_lentry *lent)
95 {
96         return (struct lvar_leaf_entry *)lent;
97 }
98
99
100 static inline int e_keysize(const struct lvar_leaf_entry *ent)
101 {
102         return le16_to_cpu(ent->vle_keysize);
103 }
104
105 /* This is duplicated in lustre/utils/create_iam.c */
106 enum {
107         LVAR_PAD   = 4,
108         LVAR_ROUND = LVAR_PAD - 1
109 };
110
111 static inline int getsize(const struct iam_leaf *leaf, int namelen, int recsize)
112 {
113         BUILD_BUG_ON((LVAR_PAD & (LVAR_PAD - 1)));
114
115         return (offsetof(struct lvar_leaf_entry, vle_key) +
116                         namelen + recsize + LVAR_ROUND) & ~LVAR_ROUND;
117 }
118
119 static inline int rec_size(const struct iam_rec *rec)
120 {
121         return *(const char *)rec;
122 }
123
124 static inline struct iam_rec *e_rec(const struct lvar_leaf_entry *ent)
125 {
126         return ((void *)ent) +
127                 offsetof(struct lvar_leaf_entry, vle_key) + e_keysize(ent);
128 }
129
130 static inline int e_size(const struct iam_leaf *leaf,
131                          const struct lvar_leaf_entry *ent)
132 {
133         return getsize(leaf, e_keysize(ent), rec_size(e_rec(ent)));
134 }
135
136 static inline char *e_char(const struct lvar_leaf_entry *ent)
137 {
138         return (char *)&ent->vle_key;
139 }
140
141 static inline struct iam_key *e_key(const struct lvar_leaf_entry *ent)
142 {
143         return (struct iam_key *)e_char(ent);
144 }
145
146 static inline lvar_hash_t e_hash(const struct lvar_leaf_entry *ent)
147 {
148         return le32_to_cpu(ent->vle_hash);
149 }
150
151 static void e_print(const struct lvar_leaf_entry *ent)
152 {
153         CERROR("        %p %8.8x \"%*.*s\"\n", ent, e_hash(ent),
154                         e_keysize(ent), e_keysize(ent), e_char(ent));
155 }
156
157 static inline struct lvar_leaf_entry *e_next(const struct iam_leaf *leaf,
158                                              const struct lvar_leaf_entry *ent)
159 {
160         return ((void *)ent) + e_size(leaf, ent);
161 }
162
163 #define LVAR_HASH_SANDWICH  (0)
164 #define LVAR_HASH_TEA       (1)
165 #define LVAR_HASH_R5        (0)
166 #define LVAR_HASH_PREFIX    (0)
167
168 #ifdef HAVE_LDISKFSFS_DIRHASH_WITH_DIR
169 #define e_ldiskfsfs_dirhash(dir, name, len, info) \
170                 ldiskfsfs_dirhash((dir), (name), (len), (info))
171 #else
172 #define e_ldiskfsfs_dirhash(dir, name, len, info) \
173                 ldiskfsfs_dirhash((name), (len), (info))
174 #endif
175
176 static u32 hash_build0(const struct inode *dir, const char *name, int namelen)
177 {
178         u32 result;
179
180         if (namelen == 0)
181                 return 0;
182         if (strncmp(name, ".", 1) == 0 && namelen == 1)
183                 return 1;
184         if (strncmp(name, "..", 2) == 0 && namelen == 2)
185                 return 2;
186
187         if (LVAR_HASH_PREFIX) {
188                 result = 0;
189                 strncpy((void *)&result,
190                         name, min_t(int, namelen, sizeof(result)));
191         } else {
192                 struct ldiskfs_dx_hash_info hinfo;
193
194                 hinfo.hash_version = LDISKFS_DX_HASH_TEA;
195                 hinfo.seed = NULL;
196                 e_ldiskfsfs_dirhash(dir, name, namelen, &hinfo);
197                 result = hinfo.hash;
198                 if (LVAR_HASH_SANDWICH) {
199                         u32 result2;
200
201                         hinfo.hash_version = LDISKFS_DX_HASH_TEA;
202                         hinfo.seed = NULL;
203                         e_ldiskfsfs_dirhash(dir, name, namelen, &hinfo);
204                         result2 = hinfo.hash;
205                         result = (0xfc000000 & result2) | (0x03ffffff & result);
206                 }
207         }
208         return result;
209 }
210
211 enum {
212         HASH_GRAY_AREA = 1024,
213         HASH_MAX_SIZE  = 0x7fffffffUL
214 };
215
216 static u32 hash_build(const struct inode *dir, const char *name, int namelen)
217 {
218         u32 hash;
219
220         hash = (hash_build0(dir, name, namelen) << 1) & HASH_MAX_SIZE;
221         if (hash > HASH_MAX_SIZE - HASH_GRAY_AREA)
222                 hash &= HASH_GRAY_AREA - 1;
223         return hash;
224 }
225
226 static inline lvar_hash_t get_hash(const struct inode *dir,
227                                    const char *name, int namelen)
228 {
229         return hash_build(dir, name, namelen);
230 }
231
232 static inline lvar_hash_t iam_get_hash(const struct iam_leaf *leaf,
233                                        const char *name, int namelen)
234 {
235         struct iam_path *iam_path = iam_leaf_path(leaf);
236
237         return get_hash(iam_path_obj(iam_path), name, namelen);
238 }
239
240 static inline int e_eq(const struct lvar_leaf_entry *ent,
241                        const char *name, int namelen)
242 {
243         return namelen == e_keysize(ent) && !memcmp(e_char(ent), name, namelen);
244 }
245
246 static inline int e_cmp(const struct iam_leaf *leaf,
247                         const struct lvar_leaf_entry *ent, lvar_hash_t hash)
248 {
249         lvar_hash_t ehash;
250
251         ehash = e_hash(ent);
252         return ehash == hash ? 0 : (ehash < hash ? -1 : 1);
253 }
254
255 static struct lvar_leaf_header *n_head(const struct iam_leaf *l)
256 {
257         return (struct lvar_leaf_header *)l->il_bh->b_data;
258 }
259
260 static int h_used(const struct lvar_leaf_header *hdr)
261 {
262         return le16_to_cpu(hdr->vlh_used);
263 }
264
265 static void h_used_adj(const struct iam_leaf *leaf,
266                        struct lvar_leaf_header *hdr, int adj)
267 {
268         int used;
269
270         used = h_used(hdr) + adj;
271         assert_corr(sizeof(*hdr) <= used && used <= blocksize(leaf));
272         hdr->vlh_used = cpu_to_le16(used);
273 }
274
275 static struct lvar_leaf_entry *n_start(const struct iam_leaf *leaf)
276 {
277         return (void *)leaf->il_bh->b_data + sizeof(struct lvar_leaf_header);
278 }
279
280 static struct lvar_leaf_entry *n_end(const struct iam_leaf *l)
281 {
282         return (void *)l->il_bh->b_data + h_used(n_head(l));
283 }
284
285 static struct lvar_leaf_entry *n_cur(const struct iam_leaf *l)
286 {
287         return lentry_lvar(l->il_at);
288 }
289
290 void n_print(const struct iam_leaf *l)
291 {
292         struct lvar_leaf_entry *scan;
293
294         CERROR("used: %d\n", h_used(n_head(l)));
295         for (scan = n_start(l); scan < n_end(l); scan = e_next(l, scan))
296                 e_print(scan);
297 }
298
299 #if LDISKFS_CORRECTNESS_ON
300 static int n_at_rec(const struct iam_leaf *folio)
301 {
302         return n_start(folio) <= lentry_lvar(folio->il_at) &&
303                 lentry_lvar(folio->il_at) < n_end(folio);
304 }
305
306 #if LDISKFS_INVARIANT_ON
307 static int n_invariant(const struct iam_leaf *leaf)
308 {
309         struct iam_path *path;
310         struct inode *dir;
311         struct lvar_leaf_entry *scan;
312         struct lvar_leaf_entry *end;
313         lvar_hash_t hash;
314         lvar_hash_t nexthash;
315         lvar_hash_t starthash;
316
317         end  = n_end(leaf);
318         hash = 0;
319         path = leaf->il_path;
320
321         if (h_used(n_head(leaf)) > blocksize(leaf))
322                 return 0;
323
324         dir = iam_path_obj(iam_path);
325         /*
326          * Delimiting key in the parent index node. Clear least bit to account
327          * for hash collision marker.
328          */
329         starthash = *(lvar_hash_t *)iam_ikey_at(path, path->ip_frame->at) & ~1;
330         for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
331                 nexthash = e_hash(scan);
332                 if (nexthash != get_hash(dir, e_char(scan), e_keysize(scan))) {
333                         BREAKPOINT();
334                         return 0;
335                 }
336                 if (nexthash < hash) {
337                         BREAKPOINT();
338                         return 0;
339                 }
340                 hash = nexthash;
341         }
342         if (scan != end) {
343                 BREAKPOINT();
344                 return 0;
345         }
346         return 1;
347 }
348 /* LDISKFS_INVARIANT_ON */
349 #endif
350
351 /* LDISKFS_CORRECTNESS_ON */
352 #endif
353
354 static struct iam_ikey *lvar_ikey(const struct iam_leaf *l,
355                                   struct iam_ikey *key)
356 {
357         lvar_hash_t *hash;
358
359         assert_corr(n_at_rec(l));
360
361         hash = (void *)key;
362         *hash = e_hash(n_cur(l));
363         return key;
364 }
365
366 static struct iam_key *lvar_key(const struct iam_leaf *l)
367 {
368         return e_key(n_cur(l));
369 }
370
371 static int lvar_key_size(const struct iam_leaf *l)
372 {
373         return e_keysize(n_cur(l));
374 }
375
376 static void lvar_start(struct iam_leaf *l)
377 {
378         l->il_at = lvar_lentry(n_start(l));
379 }
380
381 static int lvar_init(struct iam_leaf *l)
382 {
383         int result;
384         int used;
385         struct lvar_leaf_header *head;
386
387         assert_corr(l->il_bh != NULL);
388
389         head = n_head(l);
390         used = h_used(head);
391         if (le16_to_cpu(head->vlh_magic) == IAM_LVAR_LEAF_MAGIC &&
392                         used <= blocksize(l)) {
393                 l->il_at = l->il_entries = lvar_lentry(n_start(l));
394                 result = 0;
395         } else {
396                 struct inode *obj;
397
398                 obj = iam_leaf_container(l)->ic_object;
399                 CERROR(
400                 "Bad magic in node %llu (#%lu): %#x != %#x or wrong used: %d\n",
401                 (unsigned long long)l->il_bh->b_blocknr, obj->i_ino,
402                 le16_to_cpu(head->vlh_magic), IAM_LVAR_LEAF_MAGIC,
403                 used);
404                 result = -EIO;
405         }
406         return result;
407 }
408
409 static void lvar_fini(struct iam_leaf *l)
410 {
411         l->il_entries = l->il_at = NULL;
412 }
413
414 static struct iam_rec *lvar_rec(const struct iam_leaf *l)
415 {
416         assert_corr(n_at_rec(l));
417         return e_rec(n_cur(l));
418 }
419
420 static void lvar_next(struct iam_leaf *l)
421 {
422         assert_corr(n_at_rec(l));
423         assert_corr(iam_leaf_is_locked(l));
424         l->il_at = lvar_lentry(e_next(l, n_cur(l)));
425 }
426
427 static int lvar_lookup(struct iam_leaf *leaf, const struct iam_key *k)
428 {
429         struct lvar_leaf_entry *found;
430         struct lvar_leaf_entry *scan;
431         struct lvar_leaf_entry *end;
432         int result;
433         const char *name;
434         int namelen;
435         int found_equal;
436         lvar_hash_t hash;
437         int last;
438
439         assert_inv(n_invariant(leaf));
440         end = n_end(leaf);
441
442         name = kchar(k);
443         namelen = strlen(name);
444         hash = iam_get_hash(leaf, name, namelen);
445         found = NULL;
446         found_equal = 0;
447         last = 1;
448
449         for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
450                 lvar_hash_t scan_hash;
451
452                 scan_hash = e_hash(scan);
453                 if (scan_hash < hash)
454                         found = scan;
455                 else if (scan_hash == hash) {
456                         if (e_eq(scan, name, namelen)) {
457                                 /*
458                                  * perfect match
459                                  */
460                                 leaf->il_at = lvar_lentry(scan);
461                                 return IAM_LOOKUP_EXACT;
462                         } else if (!found_equal) {
463                                 found = scan;
464                                 found_equal = 1;
465                         }
466                 } else {
467                         last = 0;
468                         break;
469                 }
470         }
471         if (found == NULL) {
472                 /*
473                  * @k is less than all hashes in the leaf.
474                  */
475                 lvar_start(leaf);
476                 result = IAM_LOOKUP_BEFORE;
477         } else {
478                 leaf->il_at = lvar_lentry(found);
479                 result = IAM_LOOKUP_OK;
480                 assert_corr(n_at_rec(leaf));
481         }
482         if (last)
483                 result |= IAM_LOOKUP_LAST;
484         assert_inv(n_invariant(leaf));
485
486         return result;
487 }
488
489 static int lvar_ilookup(struct iam_leaf *leaf, const struct iam_ikey *ik)
490 {
491         struct lvar_leaf_entry *scan;
492         struct lvar_leaf_entry *end;
493         lvar_hash_t hash;
494
495         assert_inv(n_invariant(leaf));
496         end  = n_end(leaf);
497         hash = *(const lvar_hash_t *)ik;
498
499         lvar_start(leaf);
500         for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
501                 lvar_hash_t scan_hash;
502
503                 scan_hash = e_hash(scan);
504                 if (scan_hash > hash)
505                         return scan == n_start(leaf) ?
506                                 IAM_LOOKUP_BEFORE : IAM_LOOKUP_OK;
507                 leaf->il_at = lvar_lentry(scan);
508                 if (scan_hash == hash)
509                         return IAM_LOOKUP_EXACT;
510         }
511         assert_inv(n_invariant(leaf));
512         /*
513          * @ik is greater than any key in the node. Return last record in the
514          * node.
515          */
516         return IAM_LOOKUP_OK;
517 }
518
519 static void __lvar_key_set(struct iam_leaf *l, const struct iam_key *k)
520 {
521         memcpy(e_key(n_cur(l)), k, e_keysize(n_cur(l)));
522 }
523
524 static void lvar_key_set(struct iam_leaf *l, const struct iam_key *k)
525 {
526         assert_corr(n_at_rec(l));
527         assert_corr(strlen(kchar(k)) == e_keysize(n_cur(l)));
528         assert_corr(iam_leaf_is_locked(l));
529         __lvar_key_set(l, k);
530         assert_inv(n_invariant(l));
531 }
532
533 static int lvar_key_cmp(const struct iam_leaf *l, const struct iam_key *k)
534 {
535         lvar_hash_t hash;
536         const char *name;
537
538         name = kchar(k);
539
540         hash = iam_get_hash(l, name, strlen(name));
541         return e_cmp(l, n_cur(l), hash);
542 }
543
544 static int lvar_key_eq(const struct iam_leaf *l, const struct iam_key *k)
545 {
546         const char *name;
547
548         name = kchar(k);
549         return e_eq(n_cur(l), name, strlen(name));
550 }
551
552 static void __lvar_rec_set(struct iam_leaf *l, const struct iam_rec *r)
553 {
554         memcpy(e_rec(n_cur(l)), r, rec_size(r));
555 }
556
557 static void lvar_rec_set(struct iam_leaf *l, const struct iam_rec *r)
558 {
559         assert_corr(n_at_rec(l));
560         assert_corr(iam_leaf_is_locked(l));
561         __lvar_rec_set(l, r);
562         assert_inv(n_invariant(l));
563 }
564
565 static int lvar_rec_eq(const struct iam_leaf *l, const struct iam_rec *r)
566 {
567         struct iam_rec *rec = e_rec(n_cur(l));
568
569         if (rec_size(rec) != rec_size(r))
570                 return 0;
571         return !memcmp(rec, r, rec_size(r));
572 }
573
574 static void lvar_rec_get(const struct iam_leaf *l, struct iam_rec *r)
575 {
576         struct iam_rec *rec;
577
578         rec = e_rec(n_cur(l));
579         assert_corr(n_at_rec(l));
580         assert_corr(iam_leaf_is_locked(l));
581         memcpy(r, rec, rec_size(rec));
582         assert_inv(n_invariant(l));
583 }
584
585 static int lvar_can_add(const struct iam_leaf *l,
586                         const struct iam_key *k, const struct iam_rec *r)
587 {
588         assert_corr(iam_leaf_is_locked(l));
589         return h_used(n_head(l)) +
590                 getsize(l, strlen(kchar(k)), rec_size(r)) <= blocksize(l);
591 }
592
593 static int lvar_at_end(const struct iam_leaf *folio)
594 {
595         assert_corr(iam_leaf_is_locked(folio));
596         return n_cur(folio) == n_end(folio);
597 }
598
599 static void lvar_rec_add(struct iam_leaf *leaf,
600                          const struct iam_key *k, const struct iam_rec *r)
601 {
602         const char *key;
603         int ksize;
604         int shift;
605         void *end;
606         void *start;
607         ptrdiff_t diff;
608
609         assert_corr(lvar_can_add(leaf, k, r));
610         assert_inv(n_invariant(leaf));
611         assert_corr(iam_leaf_is_locked(leaf));
612
613         key   = kchar(k);
614         ksize = strlen(key);
615         shift = getsize(leaf, ksize, rec_size(r));
616
617         if (!lvar_at_end(leaf)) {
618                 assert_corr(n_cur(leaf) < n_end(leaf));
619                 end = n_end(leaf);
620                 if (lvar_key_cmp(leaf, k) <= 0)
621                         lvar_next(leaf);
622                 else
623                         /*
624                          * Another exceptional case: insertion with the key
625                          * less than least key in the leaf.
626                          */
627                         assert_corr(leaf->il_at == leaf->il_entries);
628
629                 start = leaf->il_at;
630                 diff  = PDIFF(end, start);
631                 assert_corr(diff >= 0);
632                 memmove(start + shift, start, diff);
633         }
634         h_used_adj(leaf, n_head(leaf), shift);
635         n_cur(leaf)->vle_keysize = cpu_to_le16(ksize);
636         n_cur(leaf)->vle_hash = cpu_to_le32(iam_get_hash(leaf, key, ksize));
637         __lvar_key_set(leaf, k);
638         __lvar_rec_set(leaf, r);
639         assert_corr(n_at_rec(leaf));
640         assert_inv(n_invariant(leaf));
641 }
642
643 static void lvar_rec_del(struct iam_leaf *leaf, int shift)
644 {
645         void *next;
646         void *end;
647         int nob;
648
649         assert_corr(n_at_rec(leaf));
650         assert_inv(n_invariant(leaf));
651         assert_corr(iam_leaf_is_locked(leaf));
652
653         end  = n_end(leaf);
654         next = e_next(leaf, n_cur(leaf));
655         nob  = e_size(leaf, n_cur(leaf));
656         memmove(leaf->il_at, next, end - next);
657         h_used_adj(leaf, n_head(leaf), -nob);
658         assert_inv(n_invariant(leaf));
659 }
660
661 static void lvar_init_new(struct iam_container *c, struct buffer_head *bh)
662 {
663         struct lvar_leaf_header *hdr;
664
665         hdr = (struct lvar_leaf_header *)bh->b_data;
666         hdr->vlh_magic = cpu_to_le16(IAM_LVAR_LEAF_MAGIC);
667         hdr->vlh_used  = sizeof(*hdr);
668 }
669
670 static struct lvar_leaf_entry *find_pivot(const struct iam_leaf *leaf,
671                                           struct lvar_leaf_entry **prev)
672 {
673         void *scan;
674         void *start;
675         int threshold;
676
677         *prev = NULL;
678         threshold = blocksize(leaf) / 2;
679         for (scan = start = n_start(leaf); scan - start <= threshold;
680                         *prev = scan, scan = e_next(leaf, scan)) {
681                 ;
682         }
683         return scan;
684 }
685
686 static void lvar_split(struct iam_leaf *leaf, struct buffer_head **bh,
687                        iam_ptr_t new_blknr)
688 {
689         struct lvar_leaf_entry *first_to_move;
690         struct lvar_leaf_entry *last_to_stay;
691         struct iam_path *path;
692         struct lvar_leaf_header *hdr;
693         struct buffer_head *new_leaf;
694         ptrdiff_t tomove;
695         lvar_hash_t hash;
696
697         assert_inv(n_invariant(leaf));
698         assert_corr(iam_leaf_is_locked(leaf));
699
700         new_leaf = *bh;
701         path = iam_leaf_path(leaf);
702
703         hdr = (void *)new_leaf->b_data;
704
705         first_to_move = find_pivot(leaf, &last_to_stay);
706         assert_corr(last_to_stay != NULL);
707         assert_corr(e_next(leaf, last_to_stay) == first_to_move);
708
709         hash = e_hash(first_to_move);
710         if (hash == e_hash(last_to_stay))
711                 /*
712                  * Duplicate hash.
713                  */
714                 hash |= 1;
715
716         tomove = PDIFF(n_end(leaf), first_to_move);
717         memmove(hdr + 1, first_to_move, tomove);
718
719         h_used_adj(leaf, hdr, tomove);
720         h_used_adj(leaf, n_head(leaf), -tomove);
721
722         assert_corr(n_end(leaf) == first_to_move);
723
724         if (n_cur(leaf) >= first_to_move) {
725                 /*
726                  * insertion point moves into new leaf.
727                  */
728                 ptrdiff_t shift;
729
730                 shift = PDIFF(leaf->il_at, first_to_move);
731                 *bh = leaf->il_bh;
732                 leaf->il_bh = new_leaf;
733                 leaf->il_curidx = new_blknr;
734
735                 assert_corr(iam_leaf_is_locked(leaf));
736                 lvar_init(leaf);
737                 /*
738                  * init cannot fail, as node was just initialized.
739                  */
740                 assert_corr(result == 0);
741                 leaf->il_at = ((void *)leaf->il_at) + shift;
742         }
743         /*
744          * Insert pointer to the new node (together with the least key in
745          * the node) into index node.
746          */
747         iam_insert_key_lock(path, path->ip_frame, (struct iam_ikey *)&hash,
748                             new_blknr);
749         assert_corr(n_cur(leaf) < n_end(leaf));
750         assert_inv(n_invariant(leaf));
751 }
752
753 static int lvar_leaf_empty(struct iam_leaf *leaf)
754 {
755         return h_used(n_head(leaf)) == sizeof(struct lvar_leaf_header);
756 }
757
758 static const struct iam_leaf_operations lvar_leaf_ops = {
759         .init           = lvar_init,
760         .init_new       = lvar_init_new,
761         .fini           = lvar_fini,
762         .start          = lvar_start,
763         .next           = lvar_next,
764         .key            = lvar_key,
765         .ikey           = lvar_ikey,
766         .rec            = lvar_rec,
767         .key_set        = lvar_key_set,
768         .key_cmp        = lvar_key_cmp,
769         .key_eq         = lvar_key_eq,
770         .key_size       = lvar_key_size,
771         .rec_set        = lvar_rec_set,
772         .rec_eq         = lvar_rec_eq,
773         .rec_get        = lvar_rec_get,
774         .lookup         = lvar_lookup,
775         .ilookup        = lvar_ilookup,
776         .at_end         = lvar_at_end,
777         .rec_add        = lvar_rec_add,
778         .rec_del        = lvar_rec_del,
779         .can_add        = lvar_can_add,
780         .split          = lvar_split,
781         .leaf_empty     = lvar_leaf_empty,
782 };
783
784 /*
785  * Index operations.
786  */
787
788 enum {
789         /* This is duplicated in lustre/utils/create_iam.c */
790         /* egrep -i '^o?x?[olabcdef]*$' /usr/share/dict/words */
791         IAM_LVAR_ROOT_MAGIC = 0xb01dface
792 };
793
794 /* This is duplicated in lustre/utils/create_iam.c */
795 struct lvar_root {
796         __le32 vr_magic;
797         __le16 vr_recsize;
798         __le16 vr_ptrsize;
799         u8 vr_indirect_levels;
800         u8 vr_padding0;
801         __le16 vr_padding1;
802 };
803
804 static u32 lvar_root_ptr(struct iam_container *c)
805 {
806         return 0;
807 }
808
809 static int lvar_node_init(struct iam_container *c, struct buffer_head *bh,
810                           int root)
811 {
812         return 0;
813 }
814
815 static struct iam_entry *lvar_root_inc(struct iam_container *c,
816                                        struct iam_path *path,
817                                        struct iam_frame *frame)
818 {
819         struct lvar_root *root;
820         struct iam_entry *entries;
821
822         assert_corr(iam_frame_is_locked(path, frame));
823         entries = frame->entries;
824
825         dx_set_count(entries, 2);
826         assert_corr(dx_get_limit(entries) == dx_root_limit(path));
827
828         root = (void *)frame->bh->b_data;
829         assert_corr(le64_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC);
830         root->vr_indirect_levels++;
831         frame->at = entries = iam_entry_shift(path, entries, 1);
832         memset(iam_ikey_at(path, entries), 0,
833                iam_path_descr(path)->id_ikey_size);
834         return entries;
835 }
836
837 static int lvar_node_check(struct iam_path *path, struct iam_frame *frame)
838 {
839         unsigned int count;
840         unsigned int limit;
841         unsigned int limit_correct;
842         struct iam_entry *entries;
843
844         entries = dx_node_get_entries(path, frame);
845
846         if (frame == path->ip_frames) {
847                 struct lvar_root *root;
848
849                 root = (void *)frame->bh->b_data;
850                 if (le32_to_cpu(root->vr_magic) != IAM_LVAR_ROOT_MAGIC)
851                         return -EIO;
852                 limit_correct = dx_root_limit(path);
853         } else
854                 limit_correct = dx_node_limit(path);
855         count = dx_get_count(entries);
856         limit = dx_get_limit(entries);
857         if (count > limit)
858                 return -EIO;
859         if (limit != limit_correct)
860                 return -EIO;
861         return 0;
862 }
863
864 static int lvar_node_load(struct iam_path *path, struct iam_frame *frame)
865 {
866         struct iam_entry *entries;
867         void *data;
868
869         entries = dx_node_get_entries(path, frame);
870         data = frame->bh->b_data;
871
872         if (frame == path->ip_frames) {
873                 struct lvar_root *root;
874                 const char *name;
875
876                 root = data;
877                 name = kchar(path->ip_key_target);
878                 path->ip_indirect = root->vr_indirect_levels;
879                 if (path->ip_ikey_target == NULL) {
880                         path->ip_ikey_target = iam_path_ikey(path, 4);
881                         *(lvar_hash_t *)path->ip_ikey_target =
882                                 get_hash(iam_path_obj(path), name,
883                                          strlen(name));
884                 }
885         }
886         frame->entries = frame->at = entries;
887         return 0;
888 }
889
890 static int lvar_ikeycmp(const struct iam_container *c,
891                         const struct iam_ikey *k1, const struct iam_ikey *k2)
892 {
893         lvar_hash_t p1 = le32_to_cpu(*(lvar_hash_t *)k1);
894         lvar_hash_t p2 = le32_to_cpu(*(lvar_hash_t *)k2);
895
896         return p1 > p2 ? 1 : (p1 < p2 ? -1 : 0);
897 }
898
899 static struct iam_path_descr *lvar_ipd_alloc(const struct iam_container *c,
900                                              void *area)
901 {
902         return iam_ipd_alloc(area, c->ic_descr->id_ikey_size);
903 }
904
905 static void lvar_root(void *buf,
906                       int blocksize, int keysize, int ptrsize, int recsize)
907 {
908         struct lvar_root *root;
909         struct dx_countlimit *limit;
910         void *entry;
911         int isize;
912
913         isize = sizeof(lvar_hash_t) + ptrsize;
914         root = buf;
915         *root = (typeof(*root)) {
916                 .vr_magic            = cpu_to_le32(IAM_LVAR_ROOT_MAGIC),
917                 .vr_recsize          = cpu_to_le16(recsize),
918                 .vr_ptrsize          = cpu_to_le16(ptrsize),
919                 .vr_indirect_levels  = 0
920         };
921
922         limit = (void *)(root + 1);
923         *limit = (typeof(*limit)){
924                 /*
925                  * limit itself + one pointer to the leaf.
926                  */
927                 .count = cpu_to_le16(2),
928                 .limit = iam_root_limit(sizeof(struct lvar_root), blocksize,
929                                         sizeof(lvar_hash_t) + ptrsize)
930         };
931
932         /* To guarantee that the padding "keysize + ptrsize"
933          * covers the "dx_countlimit" and the "idle_blocks". */
934         LASSERT((keysize + ptrsize) >=
935                 (sizeof(struct dx_countlimit) + sizeof(u32)));
936
937         entry = (void *)(limit + 1);
938         /* Put "idle_blocks" just after the limit. There was padding after
939          * the limit, the "idle_blocks" re-uses part of the padding, so no
940          * compatibility issues with old layout.
941          */
942         *(u32 *)entry = 0;
943
944         /*
945          * Skip over @limit.
946          */
947         entry = (void *)(root + 1) + isize;
948
949         /*
950          * Entry format is <key> followed by <ptr>. In the minimal tree
951          * consisting of a root and single node, <key> is a minimal possible
952          * key.
953          */
954         *(lvar_hash_t *)entry = 0;
955         entry += sizeof(lvar_hash_t);
956         /* now @entry points to <ptr> */
957         if (ptrsize == 4)
958                 *(u_int32_t *)entry = cpu_to_le32(1);
959         else
960                 *(u_int64_t *)entry = cpu_to_le64(1);
961 }
962
963 static int lvar_esize(int namelen, int recsize)
964 {
965         return (offsetof(struct lvar_leaf_entry, vle_key) +
966                         namelen + recsize + LVAR_ROUND) & ~LVAR_ROUND;
967 }
968
969 static void lvar_leaf(void *buf,
970                       int blocksize, int keysize, int ptrsize, int recsize)
971 {
972         struct lvar_leaf_header *head;
973         struct lvar_leaf_entry *entry;
974
975         /* form leaf */
976         head = buf;
977         *head = (typeof(*head)) {
978                 .vlh_magic = cpu_to_le16(IAM_LVAR_LEAF_MAGIC),
979                 .vlh_used  = cpu_to_le16(sizeof(*head) + lvar_esize(0, recsize))
980         };
981         entry = (void *)(head + 1);
982         *entry = (typeof(*entry)) {
983                 .vle_hash    = 0,
984                 .vle_keysize = 0
985         };
986         memset(e_rec(entry), 0, recsize);
987         *(char *)e_rec(entry) = recsize;
988 }
989
990 int iam_lvar_create(struct inode *obj,
991                     int keysize, int ptrsize, int recsize, handle_t *handle)
992 {
993         struct buffer_head *root_node;
994         struct buffer_head *leaf_node;
995         struct super_block *sb;
996
997         u32 blknr;
998         int result = 0;
999         unsigned long bsize;
1000
1001         assert_corr(obj->i_size == 0);
1002
1003         sb = obj->i_sb;
1004         bsize = sb->s_blocksize;
1005         root_node = osd_ldiskfs_append(handle, obj, &blknr);
1006         if (IS_ERR(root_node))
1007                 GOTO(out, result = PTR_ERR(root_node));
1008
1009         leaf_node = osd_ldiskfs_append(handle, obj, &blknr);
1010         if (IS_ERR(leaf_node))
1011                 GOTO(out_root, result = PTR_ERR(leaf_node));
1012
1013         lvar_root(root_node->b_data, bsize, keysize, ptrsize, recsize);
1014         lvar_leaf(leaf_node->b_data, bsize, keysize, ptrsize, recsize);
1015         ldiskfs_mark_inode_dirty(handle, obj);
1016         result = ldiskfs_handle_dirty_metadata(handle, NULL, root_node);
1017         if (result == 0)
1018                 result = ldiskfs_handle_dirty_metadata(handle, NULL, leaf_node);
1019         if (result != 0)
1020                 ldiskfs_std_error(sb, result);
1021
1022         brelse(leaf_node);
1023
1024         GOTO(out_root, result);
1025
1026 out_root:
1027         brelse(root_node);
1028 out:
1029         return result;
1030 }
1031
1032 static const struct iam_operations lvar_ops = {
1033         .id_root_ptr    = lvar_root_ptr,
1034         .id_node_read   = iam_node_read,
1035         .id_node_init   = lvar_node_init,
1036         .id_node_check  = lvar_node_check,
1037         .id_node_load   = lvar_node_load,
1038         .id_ikeycmp     = lvar_ikeycmp,
1039         .id_root_inc    = lvar_root_inc,
1040         .id_ipd_alloc   = lvar_ipd_alloc,
1041         .id_ipd_free    = iam_ipd_free,
1042         .id_name        = "lvar"
1043 };
1044
1045 int iam_lvar_guess(struct iam_container *c)
1046 {
1047         int result;
1048         struct buffer_head *bh;
1049         const struct lvar_root *root;
1050
1051         assert_corr(c->ic_object != NULL);
1052
1053         result = iam_node_read(c, lvar_root_ptr(c), NULL, &bh);
1054         if (result == 0) {
1055                 root = (void *)bh->b_data;
1056
1057                 if (le32_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC) {
1058                         struct iam_descr *descr;
1059
1060                         descr = c->ic_descr;
1061                         descr->id_key_size  = LDISKFS_NAME_LEN;
1062                         descr->id_ikey_size = sizeof(lvar_hash_t);
1063                         descr->id_rec_size  = le16_to_cpu(root->vr_recsize);
1064                         descr->id_ptr_size  = le16_to_cpu(root->vr_ptrsize);
1065                         descr->id_root_gap  = sizeof(*root);
1066                         descr->id_node_gap  = 0;
1067                         descr->id_ops       = &lvar_ops;
1068                         descr->id_leaf_ops  = &lvar_leaf_ops;
1069                         c->ic_root_bh = bh;
1070                 } else {
1071                         result = -EBADF;
1072                         brelse(bh);
1073                 }
1074         }
1075         return result;
1076 }