Whamcloud - gitweb
LU-13783 procfs: fix improper prop_ops fields
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam_lfix.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * iam_lfix.c
32  * implementation of iam format for fixed size records.
33  *
34  * Author: Wang Di <wangdi@clusterfs.com>
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  */
37
38 #include <linux/types.h>
39 #include "osd_internal.h"
40
41 /*
42  * Leaf operations.
43  */
44
45 enum {
46         IAM_LEAF_HEADER_MAGIC = 0x1976 /* This is duplicated in
47                                         * lustre/utils/create_iam.c */
48 };
49
50 /* This is duplicated in lustre/utils/create_iam.c */
51 struct iam_leaf_head {
52         __le16 ill_magic;
53         __le16 ill_count;
54 };
55
56 static inline int iam_lfix_entry_size(const struct iam_leaf *l)
57 {
58         return iam_leaf_descr(l)->id_key_size + iam_leaf_descr(l)->id_rec_size;
59 }
60
61 static inline struct iam_lentry *
62 iam_lfix_shift(const struct iam_leaf *l, struct iam_lentry *entry, int shift)
63 {
64         return (void *)entry + shift * iam_lfix_entry_size(l);
65 }
66
67 static inline struct iam_key *iam_leaf_key_at(struct iam_lentry *entry)
68 {
69         return (struct iam_key *)entry;
70 }
71
72 static inline int lfix_keycmp(const struct iam_container *c,
73                               const struct iam_key *k1,
74                               const struct iam_key *k2)
75 {
76         return memcmp(k1, k2, c->ic_descr->id_key_size);
77 }
78
79 static struct iam_leaf_head *iam_get_head(const struct iam_leaf *l)
80 {
81         return (struct iam_leaf_head *)l->il_bh->b_data;
82 }
83
84 static struct iam_lentry *iam_entries(const struct buffer_head *bh)
85 {
86         return (void *)bh->b_data + sizeof(struct iam_leaf_head);
87 }
88
89 static struct iam_lentry *iam_get_lentries(const struct iam_leaf *l)
90 {
91         return iam_entries(l->il_bh);
92 }
93
94 static int leaf_count_limit(const struct iam_leaf *leaf)
95 {
96         int free_space;
97
98         free_space = iam_leaf_container(leaf)->ic_object->i_sb->s_blocksize;
99         free_space -= sizeof(struct iam_leaf_head);
100         return free_space / iam_lfix_entry_size(leaf);
101 }
102
103 static int lentry_count_get(const struct iam_leaf *leaf)
104 {
105         return le16_to_cpu(iam_get_head(leaf)->ill_count);
106 }
107
108 static void lentry_count_set(struct iam_leaf *leaf, unsigned count)
109 {
110         assert_corr(0 <= count && count <= leaf_count_limit(leaf));
111         iam_get_head(leaf)->ill_count = cpu_to_le16(count);
112 }
113
114 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l);
115
116 #if LDISKFS_CORRECTNESS_ON || LDISKFS_INVARIANT_ON
117 static int iam_leaf_at_rec(const struct iam_leaf *folio)
118 {
119         return iam_get_lentries(folio) <= folio->il_at &&
120                 folio->il_at < iam_lfix_get_end(folio);
121 }
122 #endif
123
124 static struct iam_ikey *iam_lfix_ikey(const struct iam_leaf *l,
125                                       struct iam_ikey *key)
126 {
127         void *ie = l->il_at;
128
129         assert_corr(iam_leaf_at_rec(l));
130         return (struct iam_ikey *)ie;
131 }
132
133 static struct iam_key *iam_lfix_key(const struct iam_leaf *l)
134 {
135         void *ie = l->il_at;
136
137         assert_corr(iam_leaf_at_rec(l));
138         return (struct iam_key *)ie;
139 }
140
141 static int iam_lfix_key_size(const struct iam_leaf *l)
142 {
143         return iam_leaf_descr(l)->id_key_size;
144 }
145
146 static void iam_lfix_start(struct iam_leaf *l)
147 {
148         l->il_at = iam_get_lentries(l);
149 }
150
151 static inline ptrdiff_t iam_lfix_diff(const struct iam_leaf *l,
152                                       const struct iam_lentry *e1,
153                                       const struct iam_lentry *e2)
154 {
155         ptrdiff_t diff;
156         int esize;
157
158         esize = iam_lfix_entry_size(l);
159         diff = (void *)e1 - (void *)e2;
160         assert_corr(diff / esize * esize == diff);
161         return diff / esize;
162 }
163
164 static int iam_lfix_init(struct iam_leaf *l)
165 {
166         int result;
167         struct iam_leaf_head *ill;
168         int count;
169
170         assert_corr(l->il_bh != NULL);
171
172         ill = iam_get_head(l);
173         count = le16_to_cpu(ill->ill_count);
174         if (le16_to_cpu(ill->ill_magic) == IAM_LEAF_HEADER_MAGIC &&
175                         0 <= count && count <= leaf_count_limit(l)) {
176                 l->il_at = l->il_entries = iam_get_lentries(l);
177                 result = 0;
178         } else {
179                 struct inode *obj;
180
181                 result = -EIO;
182                 obj = iam_leaf_container(l)->ic_object;
183                 CERROR(
184                 "Bad magic in node %llu #%lu: %#x != %#x or bad cnt: %d %d: rc = %d\n",
185                         (unsigned long long)l->il_bh->b_blocknr, obj->i_ino,
186                         le16_to_cpu(ill->ill_magic), IAM_LEAF_HEADER_MAGIC,
187                         count, leaf_count_limit(l), result);
188         }
189         return result;
190 }
191
192 static void iam_lfix_fini(struct iam_leaf *l)
193 {
194         l->il_entries = l->il_at = NULL;
195 }
196
197 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l)
198 {
199         int count = lentry_count_get(l);
200         struct iam_lentry *ile = iam_lfix_shift(l, l->il_entries, count);
201
202         return ile;
203 }
204
205 static struct iam_rec *iam_lfix_rec(const struct iam_leaf *l)
206 {
207         void *e = l->il_at;
208
209         assert_corr(iam_leaf_at_rec(l));
210         return e + iam_leaf_descr(l)->id_key_size;
211 }
212
213 static void iam_lfix_next(struct iam_leaf *l)
214 {
215         assert_corr(iam_leaf_at_rec(l));
216         l->il_at = iam_lfix_shift(l, l->il_at, 1);
217 }
218
219 /*
220  * Bug chasing.
221  */
222 int lfix_dump = 0;
223
224 static char hdigit(char ch)
225 {
226         static const char d[] = "0123456789abcdef";
227
228         return d[ch & 0xf];
229 }
230
231 static char *hex(char ch, char *area)
232 {
233         area[0] = hdigit(ch >> 4);
234         area[1] = hdigit(ch);
235         area[2] = 0;
236         return area;
237 }
238
239 static void l_print(struct iam_leaf *leaf, struct iam_lentry *entry)
240 {
241         int i;
242         char *area;
243         char h[3];
244
245         area = (char *)entry;
246         printk(KERN_EMERG "[");
247         for (i = iam_lfix_key_size(leaf); i > 0; --i, ++area)
248                 printk("%s", hex(*area, h));
249         printk("]-(");
250         for (i = iam_leaf_descr(leaf)->id_rec_size; i > 0; --i, ++area)
251                 printk("%s", hex(*area, h));
252         printk(")\n");
253 }
254
255 static void lfix_print(struct iam_leaf *leaf)
256 {
257         struct iam_lentry *entry;
258         int count;
259         int i;
260
261         entry = leaf->il_entries;
262         count = lentry_count_get(leaf);
263         printk(KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
264         for (i = 0; i < count; ++i, entry = iam_lfix_shift(leaf, entry, 1))
265                 l_print(leaf, entry);
266 }
267
268 static int iam_lfix_lookup(struct iam_leaf *l, const struct iam_key *k)
269 {
270         struct iam_lentry *p, *q, *m, *t;
271         struct iam_container *c;
272         int count;
273         int result;
274
275         count = lentry_count_get(l);
276         if (count == 0)
277                 return IAM_LOOKUP_EMPTY;
278
279         result = IAM_LOOKUP_OK;
280         c = iam_leaf_container(l);
281
282         p = l->il_entries;
283         q = iam_lfix_shift(l, p, count - 1);
284         if (lfix_keycmp(c, k, iam_leaf_key_at(p)) < 0) {
285                 /*
286                  * @k is less than the least key in the leaf
287                  */
288                 l->il_at = p;
289                 result = IAM_LOOKUP_BEFORE;
290         } else if (lfix_keycmp(c, iam_leaf_key_at(q), k) <= 0) {
291                 l->il_at = q;
292         } else {
293                 /*
294                  * EWD1293
295                  */
296                 while (iam_lfix_shift(l, p, 1) != q) {
297                         m = iam_lfix_shift(l, p, iam_lfix_diff(l, q, p) / 2);
298                         assert_corr(p < m && m < q);
299                         if (lfix_keycmp(c, iam_leaf_key_at(m), k) <= 0)
300                                 p = m;
301                         else
302                                 q = m;
303                 }
304                 assert_corr(lfix_keycmp(c, iam_leaf_key_at(p), k) <= 0 &&
305                             lfix_keycmp(c, k, iam_leaf_key_at(q)) < 0);
306                 /*
307                  * skip over records with duplicate keys.
308                  */
309                 while (p > l->il_entries) {
310                         t = iam_lfix_shift(l, p, -1);
311                         if (lfix_keycmp(c, iam_leaf_key_at(t), k) == 0)
312                                 p = t;
313                         else
314                                 break;
315                 }
316                 l->il_at = p;
317         }
318         assert_corr(iam_leaf_at_rec(l));
319
320         if (lfix_keycmp(c, iam_leaf_key_at(l->il_at), k) == 0)
321                 result = IAM_LOOKUP_EXACT;
322
323         if (lfix_dump)
324                 lfix_print(l);
325
326         return result;
327 }
328
329 static int iam_lfix_ilookup(struct iam_leaf *l, const struct iam_ikey *ik)
330 {
331         return iam_lfix_lookup(l, (const struct iam_key *)ik);
332 }
333
334 static void iam_lfix_key_set(struct iam_leaf *l, const struct iam_key *k)
335 {
336         assert_corr(iam_leaf_at_rec(l));
337         memcpy(iam_leaf_key_at(l->il_at), k, iam_leaf_descr(l)->id_key_size);
338 }
339
340 static int iam_lfix_key_cmp(const struct iam_leaf *l, const struct iam_key *k)
341 {
342         return lfix_keycmp(iam_leaf_container(l), iam_leaf_key_at(l->il_at), k);
343 }
344
345 static int iam_lfix_key_eq(const struct iam_leaf *l, const struct iam_key *k)
346 {
347         return !lfix_keycmp(iam_leaf_container(l),
348                             iam_leaf_key_at(l->il_at), k);
349 }
350
351 static void iam_lfix_rec_set(struct iam_leaf *l, const struct iam_rec *r)
352 {
353         assert_corr(iam_leaf_at_rec(l));
354         memcpy(iam_lfix_rec(l), r, iam_leaf_descr(l)->id_rec_size);
355 }
356
357 static inline int lfix_reccmp(const struct iam_container *c,
358                               const struct iam_rec *r1,
359                               const struct iam_rec *r2)
360 {
361         return memcmp(r1, r2, c->ic_descr->id_rec_size);
362 }
363
364 static int iam_lfix_rec_eq(const struct iam_leaf *l, const struct iam_rec *r)
365 {
366         return !lfix_reccmp(iam_leaf_container(l), iam_lfix_rec(l), r);
367 }
368
369 static void iam_lfix_rec_get(const struct iam_leaf *l, struct iam_rec *r)
370 {
371         assert_corr(iam_leaf_at_rec(l));
372         memcpy(r, iam_lfix_rec(l), iam_leaf_descr(l)->id_rec_size);
373 }
374
375 static void iam_lfix_rec_add(struct iam_leaf *leaf,
376                              const struct iam_key *k, const struct iam_rec *r)
377 {
378         struct iam_lentry *end;
379         struct iam_lentry *cur;
380         struct iam_lentry *start;
381         ptrdiff_t diff;
382         int count;
383
384         assert_corr(iam_leaf_can_add(leaf, k, r));
385
386         count = lentry_count_get(leaf);
387         /*
388          * This branch handles two exceptional cases:
389          *
390          *   - leaf positioned beyond last record, and
391          *
392          *   - empty leaf.
393          */
394         if (!iam_leaf_at_end(leaf)) {
395                 end   = iam_lfix_get_end(leaf);
396                 cur   = leaf->il_at;
397                 if (lfix_keycmp(iam_leaf_container(leaf),
398                                 k, iam_leaf_key_at(cur)) >= 0)
399                         iam_lfix_next(leaf);
400                 else
401                         /*
402                          * Another exceptional case: insertion with the key
403                          * less than least key in the leaf.
404                          */
405                         assert_corr(cur == leaf->il_entries);
406
407                 start = leaf->il_at;
408                 diff  = (void *)end - (void *)start;
409                 assert_corr(diff >= 0);
410                 memmove(iam_lfix_shift(leaf, start, 1), start, diff);
411         }
412         lentry_count_set(leaf, count + 1);
413         iam_lfix_key_set(leaf, k);
414         iam_lfix_rec_set(leaf, r);
415         assert_corr(iam_leaf_at_rec(leaf));
416 }
417
418 static void iam_lfix_rec_del(struct iam_leaf *leaf, int shift)
419 {
420         struct iam_lentry *next, *end;
421         int count;
422         ptrdiff_t diff;
423
424         assert_corr(iam_leaf_at_rec(leaf));
425
426         count = lentry_count_get(leaf);
427         end = iam_lfix_get_end(leaf);
428         next = iam_lfix_shift(leaf, leaf->il_at, 1);
429         diff = (void *)end - (void *)next;
430         memmove(leaf->il_at, next, diff);
431
432         lentry_count_set(leaf, count - 1);
433 }
434
435 static int iam_lfix_can_add(const struct iam_leaf *l,
436                             const struct iam_key *k, const struct iam_rec *r)
437 {
438         return lentry_count_get(l) < leaf_count_limit(l);
439 }
440
441 static int iam_lfix_at_end(const struct iam_leaf *folio)
442 {
443         return folio->il_at == iam_lfix_get_end(folio);
444 }
445
446 static void iam_lfix_init_new(struct iam_container *c, struct buffer_head *bh)
447 {
448         struct iam_leaf_head *hdr;
449
450         hdr = (struct iam_leaf_head *)bh->b_data;
451         hdr->ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC);
452         hdr->ill_count = cpu_to_le16(0);
453 }
454
455 static void iam_lfix_split(struct iam_leaf *l, struct buffer_head **bh,
456                            iam_ptr_t new_blknr)
457 {
458         struct iam_path *path;
459         struct iam_leaf_head *hdr;
460         const struct iam_ikey *pivot;
461         struct buffer_head *new_leaf;
462
463         unsigned int count;
464         unsigned int split;
465
466         void *start;
467         void *finis;
468
469         new_leaf = *bh;
470         path = iam_leaf_path(l);
471
472         hdr = (void *)new_leaf->b_data;
473
474         count = lentry_count_get(l);
475         split = count / 2;
476
477         start = iam_lfix_shift(l, iam_get_lentries(l), split);
478         finis = iam_lfix_shift(l, iam_get_lentries(l), count);
479
480         pivot = (const struct iam_ikey *)iam_leaf_key_at(start);
481
482         memmove(iam_entries(new_leaf), start, finis - start);
483         hdr->ill_count = cpu_to_le16(count - split);
484         lentry_count_set(l, split);
485         if ((void *)l->il_at >= start) {
486                 /*
487                  * insertion point moves into new leaf.
488                  */
489                 int shift;
490
491                 shift = iam_lfix_diff(l, l->il_at, start);
492                 *bh = l->il_bh;
493                 l->il_bh = new_leaf;
494                 l->il_curidx = new_blknr;
495                 iam_lfix_init(l);
496                 /*
497                  * init cannot fail, as node was just initialized.
498                  */
499                 assert_corr(result == 0);
500                 l->il_at = iam_lfix_shift(l, iam_get_lentries(l), shift);
501         }
502         /*
503          * Insert pointer to the new node (together with the least key in
504          * the node) into index node.
505          */
506         iam_insert_key_lock(path, path->ip_frame, pivot, new_blknr);
507 }
508
509 static int iam_lfix_leaf_empty(struct iam_leaf *leaf)
510 {
511         return lentry_count_get(leaf) == 0;
512 }
513
514 static const struct iam_leaf_operations iam_lfix_leaf_ops = {
515         .init           = iam_lfix_init,
516         .init_new       = iam_lfix_init_new,
517         .fini           = iam_lfix_fini,
518         .start          = iam_lfix_start,
519         .next           = iam_lfix_next,
520         .key            = iam_lfix_key,
521         .ikey           = iam_lfix_ikey,
522         .rec            = iam_lfix_rec,
523         .key_set        = iam_lfix_key_set,
524         .key_cmp        = iam_lfix_key_cmp,
525         .key_eq         = iam_lfix_key_eq,
526         .key_size       = iam_lfix_key_size,
527         .rec_set        = iam_lfix_rec_set,
528         .rec_eq         = iam_lfix_rec_eq,
529         .rec_get        = iam_lfix_rec_get,
530         .lookup         = iam_lfix_lookup,
531         .ilookup        = iam_lfix_ilookup,
532         .at_end         = iam_lfix_at_end,
533         .rec_add        = iam_lfix_rec_add,
534         .rec_del        = iam_lfix_rec_del,
535         .can_add        = iam_lfix_can_add,
536         .split          = iam_lfix_split,
537         .leaf_empty     = iam_lfix_leaf_empty,
538 };
539
540 /*
541  * Index operations.
542  */
543
544 enum {
545         /* This is duplicated in lustre/utils/create_iam.c */
546         /*
547          * Then shalt thou see the dew-BEDABBLED wretch
548          * Turn, and return, indenting with the way;
549          * Each envious brier his weary legs doth scratch,
550          * Each shadow makes him stop, each murmur stay:
551          * For misery is trodden on by many,
552          * And being low never relieved by any.
553          */
554         IAM_LFIX_ROOT_MAGIC = 0xbedabb1edULL /* d01efull */
555 };
556
557 /* This is duplicated in lustre/utils/create_iam.c */
558 struct iam_lfix_root {
559         __le64  ilr_magic;
560         __le16  ilr_keysize;
561         __le16  ilr_recsize;
562         __le16  ilr_ptrsize;
563         u8      ilr_indirect_levels;
564         u8      ilr_padding;
565 };
566
567 static __u32 iam_lfix_root_ptr(struct iam_container *c)
568 {
569         return 0;
570 }
571
572 static int iam_lfix_node_init(struct iam_container *c, struct buffer_head *bh,
573                               int root)
574 {
575         return 0;
576 }
577
578 static struct iam_entry *iam_lfix_root_inc(struct iam_container *c,
579                                            struct iam_path *path,
580                                            struct iam_frame *frame)
581 {
582         struct iam_lfix_root *root;
583         struct iam_entry *entries;
584
585         entries = frame->entries;
586
587         dx_set_count(entries, 2);
588         assert_corr(dx_get_limit(entries) == dx_root_limit(path));
589
590         root = (void *)frame->bh->b_data;
591         assert_corr(le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC);
592         root->ilr_indirect_levels++;
593         frame->at = entries = iam_entry_shift(path, entries, 1);
594         memset(iam_ikey_at(path, entries), 0,
595                iam_path_descr(path)->id_ikey_size);
596         return entries;
597 }
598
599 static int iam_lfix_node_check(struct iam_path *path, struct iam_frame *frame)
600 {
601         unsigned int count;
602         unsigned int limit;
603         unsigned int limit_correct;
604         struct iam_entry *entries;
605
606         entries = dx_node_get_entries(path, frame);
607
608         if (frame == path->ip_frames) {
609                 struct iam_lfix_root *root;
610
611                 root = (void *)frame->bh->b_data;
612                 if (le64_to_cpu(root->ilr_magic) != IAM_LFIX_ROOT_MAGIC)
613                         return -EIO;
614                 limit_correct = dx_root_limit(path);
615         } else {
616                 limit_correct = dx_node_limit(path);
617         }
618         count = dx_get_count(entries);
619         limit = dx_get_limit(entries);
620         if (count > limit)
621                 return -EIO;
622         if (limit != limit_correct)
623                 return -EIO;
624         return 0;
625 }
626
627 static int iam_lfix_node_load(struct iam_path *path, struct iam_frame *frame)
628 {
629         struct iam_entry *entries;
630         void *data;
631
632         entries = dx_node_get_entries(path, frame);
633         data = frame->bh->b_data;
634
635         if (frame == path->ip_frames) {
636                 struct iam_lfix_root *root;
637
638                 root = data;
639                 path->ip_indirect = root->ilr_indirect_levels;
640                 if (path->ip_ikey_target == NULL)
641                         path->ip_ikey_target =
642                                 (struct iam_ikey *)path->ip_key_target;
643         }
644         frame->entries = frame->at = entries;
645         return 0;
646 }
647
648 static int iam_lfix_ikeycmp(const struct iam_container *c,
649                             const struct iam_ikey *k1,
650                             const struct iam_ikey *k2)
651 {
652         return memcmp(k1, k2, c->ic_descr->id_ikey_size);
653 }
654
655 static struct iam_path_descr *iam_lfix_ipd_alloc(const struct iam_container *c,
656                                                  void *area)
657 {
658         return iam_ipd_alloc(area, c->ic_descr->id_ikey_size);
659 }
660
661 static const struct iam_operations iam_lfix_ops = {
662         .id_root_ptr    = iam_lfix_root_ptr,
663         .id_node_read   = iam_node_read,
664         .id_node_init   = iam_lfix_node_init,
665         .id_node_check  = iam_lfix_node_check,
666         .id_node_load   = iam_lfix_node_load,
667         .id_ikeycmp     = iam_lfix_ikeycmp,
668         .id_root_inc    = iam_lfix_root_inc,
669         .id_ipd_alloc   = iam_lfix_ipd_alloc,
670         .id_ipd_free    = iam_ipd_free,
671         .id_name        = "lfix"
672 };
673
674 int iam_lfix_guess(struct iam_container *c)
675 {
676         int result;
677         struct buffer_head *bh;
678         const struct iam_lfix_root *root;
679
680         assert_corr(c->ic_object != NULL);
681
682         result = iam_node_read(c, iam_lfix_root_ptr(c), NULL, &bh);
683         if (result == 0) {
684                 root = (void *)bh->b_data;
685                 if (le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC) {
686                         struct iam_descr *descr;
687
688                         descr = c->ic_descr;
689                         descr->id_key_size  = le16_to_cpu(root->ilr_keysize);
690                         descr->id_ikey_size = le16_to_cpu(root->ilr_keysize);
691                         descr->id_rec_size  = le16_to_cpu(root->ilr_recsize);
692                         descr->id_ptr_size  = le16_to_cpu(root->ilr_ptrsize);
693                         descr->id_root_gap  = sizeof(struct iam_lfix_root);
694                         descr->id_node_gap  = 0;
695                         descr->id_ops       = &iam_lfix_ops;
696                         descr->id_leaf_ops  = &iam_lfix_leaf_ops;
697                         c->ic_root_bh = bh;
698                 } else {
699                         result = -EBADF;
700                         brelse(bh);
701                 }
702         }
703         return result;
704 }
705
706 /*
707  * Debugging aid.
708  */
709
710 #define KEYSIZE (8)
711 #define RECSIZE (8)
712 #define PTRSIZE (4)
713
714 #define LFIX_ROOT_RECNO \
715         ((4096 - sizeof(struct iam_lfix_root)) / (KEYSIZE + PTRSIZE))
716
717 #define LFIX_INDEX_RECNO (4096 / (KEYSIZE + PTRSIZE))
718
719 #define LFIX_LEAF_RECNO \
720         ((4096 - sizeof(struct iam_leaf_head)) / (KEYSIZE + RECSIZE))
721
722         struct lfix_root {
723         struct iam_lfix_root lr_root;
724         struct {
725                 char key[KEYSIZE];
726                 char ptr[PTRSIZE];
727         } lr_entry[LFIX_ROOT_RECNO];
728 };
729
730         struct lfix_index {
731         struct dx_countlimit li_cl;
732         char   li_padding[KEYSIZE + PTRSIZE - sizeof(struct dx_countlimit)];
733         struct {
734                 char key[KEYSIZE];
735                 char ptr[PTRSIZE];
736         } li_entry[LFIX_INDEX_RECNO - 1];
737 };
738
739         struct lfix_leaf {
740                 struct iam_leaf_head ll_head;
741                 struct {
742                         char key[KEYSIZE];
743                         char rec[RECSIZE];
744                 } ll_entry[LFIX_LEAF_RECNO];
745         };
746
747 #define STORE_UNALIGNED(val, dst)                       \
748 ({                                                      \
749         typeof(val) __val = (val);                      \
750         BUILD_BUG_ON(sizeof(val) != sizeof(*(dst)));    \
751         memcpy(dst, &__val, sizeof(*(dst)));            \
752 })
753
754 static void lfix_root(void *buf,
755                       int blocksize, int keysize, int ptrsize, int recsize)
756 {
757         struct iam_lfix_root *root;
758         struct dx_countlimit *limit;
759         void *entry;
760
761         root = buf;
762         *root = (typeof(*root)) {
763                 .ilr_magic           = cpu_to_le64(IAM_LFIX_ROOT_MAGIC),
764                 .ilr_keysize         = cpu_to_le16(keysize),
765                 .ilr_recsize         = cpu_to_le16(recsize),
766                 .ilr_ptrsize         = cpu_to_le16(ptrsize),
767                 .ilr_indirect_levels = 0
768         };
769
770         limit = (void *)(root + 1);
771         *limit = (typeof(*limit)){
772                 /*
773                  * limit itself + one pointer to the leaf.
774                  */
775                 .count = cpu_to_le16(2),
776                 .limit = iam_root_limit(sizeof(struct iam_lfix_root),
777                                         blocksize, keysize + ptrsize)
778         };
779
780         /* To guarantee that the padding "keysize + ptrsize"
781          * covers the "dx_countlimit" and the "idle_blocks". */
782         LASSERT((keysize + ptrsize) >=
783                 (sizeof(struct dx_countlimit) + sizeof(__u32)));
784
785         entry = (void *)(limit + 1);
786         /* Put "idle_blocks" just after the limit. There was padding after
787          * the limit, the "idle_blocks" re-uses part of the padding, so no
788          * compatibility issues with old layout.
789          */
790         *(__u32 *)entry = 0;
791
792         /*
793          * Skip over @limit.
794          */
795         entry = (void *)(root + 1) + keysize + ptrsize;
796
797         /*
798          * Entry format is <key> followed by <ptr>. In the minimal tree
799          * consisting of a root and single node, <key> is a minimal possible
800          * key.
801          *
802          * XXX: this key is hard-coded to be a sequence of 0's.
803          */
804
805         memset(entry, 0, keysize);
806         entry += keysize;
807         /* now @entry points to <ptr> */
808         if (ptrsize == 4)
809                 STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
810         else
811                 STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
812 }
813
814 static void lfix_leaf(void *buf,
815                       int blocksize, int keysize, int ptrsize, int recsize)
816 {
817         struct iam_leaf_head *head;
818         void *entry;
819
820         /* form leaf */
821         head = buf;
822         *head = (struct iam_leaf_head) {
823                 .ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC),
824                 /*
825                  * Leaf contains an entry with the smallest possible key
826                  * (created by zeroing).
827                  */
828                 .ill_count = cpu_to_le16(1),
829         };
830
831         entry = (void *)(head + 1);
832         memset(entry, 0, keysize + recsize);
833 }
834
835 int iam_lfix_create(struct inode *obj,
836                     int keysize, int ptrsize, int recsize, handle_t *handle)
837 {
838         struct buffer_head *root_node;
839         struct buffer_head *leaf_node;
840         struct super_block *sb;
841         u32 blknr;
842         int result = 0;
843         unsigned long bsize;
844
845         assert_corr(obj->i_size == 0);
846
847         sb = obj->i_sb;
848         bsize = sb->s_blocksize;
849         root_node = osd_ldiskfs_append(handle, obj, &blknr);
850         if (IS_ERR(root_node))
851                 GOTO(out, result = PTR_ERR(root_node));
852
853         leaf_node = osd_ldiskfs_append(handle, obj, &blknr);
854         if (IS_ERR(leaf_node))
855                 GOTO(out_root, result = PTR_ERR(leaf_node));
856
857         lfix_root(root_node->b_data, bsize, keysize, ptrsize, recsize);
858         lfix_leaf(leaf_node->b_data, bsize, keysize, ptrsize, recsize);
859         ldiskfs_mark_inode_dirty(handle, obj);
860         result = ldiskfs_handle_dirty_metadata(handle, NULL, root_node);
861         if (result == 0)
862                 result = ldiskfs_handle_dirty_metadata(handle, NULL, leaf_node);
863         if (result != 0)
864                 ldiskfs_std_error(sb, result);
865
866         brelse(leaf_node);
867
868         GOTO(out_root, result);
869
870 out_root:
871         brelse(root_node);
872 out:
873         return result;
874 }