Whamcloud - gitweb
LU-1347 build: remove the vim/emacs modelines
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam_lfix.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  *
34  * iam_lfix.c
35  * implementation of iam format for fixed size records.
36  *
37  * Author: Wang Di <wangdi@clusterfs.com>
38  * Author: Nikita Danilov <nikita@clusterfs.com>
39  */
40
41 #include <linux/types.h>
42 #include "osd_internal.h"
43
44 /*
45  * Leaf operations.
46  */
47
48 enum {
49         IAM_LEAF_HEADER_MAGIC = 0x1976 /* This is duplicated in
50                                         * lustre/utils/create_iam.c */
51 };
52
53 /* This is duplicated in lustre/utils/create_iam.c */
54 struct iam_leaf_head {
55         __le16 ill_magic;
56         __le16 ill_count;
57 };
58
59 static inline int iam_lfix_entry_size(const struct iam_leaf *l)
60 {
61         return iam_leaf_descr(l)->id_key_size + iam_leaf_descr(l)->id_rec_size;
62 }
63
64 static inline struct iam_lentry *
65 iam_lfix_shift(const struct iam_leaf *l, struct iam_lentry *entry, int shift)
66 {
67         return (void *)entry + shift * iam_lfix_entry_size(l);
68 }
69
70 static inline struct iam_key *iam_leaf_key_at(struct iam_lentry *entry)
71 {
72         return (struct iam_key *)entry;
73 }
74
75 static inline int lfix_keycmp(const struct iam_container *c,
76                               const struct iam_key *k1,
77                               const struct iam_key *k2)
78 {
79         return memcmp(k1, k2, c->ic_descr->id_key_size);
80 }
81
82 static struct iam_leaf_head *iam_get_head(const struct iam_leaf *l)
83 {
84         return (struct iam_leaf_head *)l->il_bh->b_data;
85 }
86
87 static struct iam_lentry *iam_entries(const struct buffer_head *bh)
88 {
89         return (void *)bh->b_data + sizeof(struct iam_leaf_head);
90 }
91
92 static struct iam_lentry *iam_get_lentries(const struct iam_leaf *l)
93 {
94         return iam_entries(l->il_bh);
95 }
96
97 static int leaf_count_limit(const struct iam_leaf *leaf)
98 {
99         int free_space;
100
101         free_space = iam_leaf_container(leaf)->ic_object->i_sb->s_blocksize;
102         free_space -= sizeof(struct iam_leaf_head);
103         return free_space / iam_lfix_entry_size(leaf);
104 }
105
106 static int lentry_count_get(const struct iam_leaf *leaf)
107 {
108         return le16_to_cpu(iam_get_head(leaf)->ill_count);
109 }
110
111 static void lentry_count_set(struct iam_leaf *leaf, unsigned count)
112 {
113         assert_corr(0 <= count && count <= leaf_count_limit(leaf));
114         iam_get_head(leaf)->ill_count = cpu_to_le16(count);
115 }
116
117 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l);
118
119 #if LDISKFS_CORRECTNESS_ON || LDISKFS_INVARIANT_ON
120 static int iam_leaf_at_rec(const struct iam_leaf *folio)
121 {
122         return
123                 iam_get_lentries(folio) <= folio->il_at &&
124                 folio->il_at < iam_lfix_get_end(folio);
125 }
126 #endif
127
128 static struct iam_ikey *iam_lfix_ikey(const struct iam_leaf *l,
129                                       struct iam_ikey *key)
130 {
131         void *ie = l->il_at;
132         assert_corr(iam_leaf_at_rec(l));
133         return (struct iam_ikey*)ie;
134 }
135
136 static struct iam_key *iam_lfix_key(const struct iam_leaf *l)
137 {
138         void *ie = l->il_at;
139         assert_corr(iam_leaf_at_rec(l));
140         return (struct iam_key*)ie;
141 }
142
143 static int iam_lfix_key_size(const struct iam_leaf *l)
144 {
145         return iam_leaf_descr(l)->id_key_size;
146 }
147
148 static void iam_lfix_start(struct iam_leaf *l)
149 {
150         l->il_at = iam_get_lentries(l);
151 }
152
153 static inline ptrdiff_t iam_lfix_diff(const struct iam_leaf *l,
154                                       const struct iam_lentry *e1,
155                                       const struct iam_lentry *e2)
156 {
157         ptrdiff_t diff;
158         int esize;
159
160         esize = iam_lfix_entry_size(l);
161         diff = (void *)e1 - (void *)e2;
162         assert_corr(diff / esize * esize == diff);
163         return diff / esize;
164 }
165
166 static int iam_lfix_init(struct iam_leaf *l)
167 {
168         int result;
169         struct iam_leaf_head *ill;
170         int count;
171
172         assert_corr(l->il_bh != NULL);
173
174         ill = iam_get_head(l);
175         count = le16_to_cpu(ill->ill_count);
176         if (ill->ill_magic == le16_to_cpu(IAM_LEAF_HEADER_MAGIC) &&
177             0 <= count && count <= leaf_count_limit(l)) {
178                 l->il_at = l->il_entries = iam_get_lentries(l);
179                 result = 0;
180         } else {
181                 struct inode *obj;
182
183                 obj = iam_leaf_container(l)->ic_object;
184                 CERROR("Wrong magic in node %llu (#%lu): %#x != %#x or "
185                        "wrong count: %d (%d)\n",
186                        (unsigned long long)l->il_bh->b_blocknr, obj->i_ino,
187                        ill->ill_magic, le16_to_cpu(IAM_LEAF_HEADER_MAGIC),
188                        count, leaf_count_limit(l));
189                 result = -EIO;
190         }
191         return result;
192 }
193
194 static void iam_lfix_fini(struct iam_leaf *l)
195 {
196         l->il_entries = l->il_at = NULL;
197 }
198
199 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l)
200 {
201         int count = lentry_count_get(l);
202         struct iam_lentry *ile = iam_lfix_shift(l, l->il_entries, count);
203
204         return ile;
205 }
206
207 struct iam_rec *iam_lfix_rec(const struct iam_leaf *l)
208 {
209         void *e = l->il_at;
210         assert_corr(iam_leaf_at_rec(l));
211         return e + iam_leaf_descr(l)->id_key_size;
212 }
213
214 static void iam_lfix_next(struct iam_leaf *l)
215 {
216         assert_corr(iam_leaf_at_rec(l));
217         l->il_at = iam_lfix_shift(l, l->il_at, 1);
218 }
219
220 /*
221  * Bug chasing.
222  */
223 int lfix_dump = 0;
224 EXPORT_SYMBOL(lfix_dump);
225
226 static char hdigit(char ch)
227 {
228         static char d[] = "0123456789abcdef";
229         return d[ch & 0xf];
230 }
231
232 static char *hex(char ch, char *area)
233 {
234         area[0] = hdigit(ch >> 4);
235         area[1] = hdigit(ch);
236         area[2] = 0;
237         return area;
238 }
239
240 static void l_print(struct iam_leaf *leaf, struct iam_lentry *entry)
241 {
242         int i;
243         char *area;
244         char h[3];
245
246         area = (char *)entry;
247         printk(CFS_KERN_EMERG "[");
248         for (i = iam_lfix_key_size(leaf); i > 0; --i, ++area)
249                 printk("%s", hex(*area, h));
250         printk("]-(");
251         for (i = iam_leaf_descr(leaf)->id_rec_size; i > 0; --i, ++area)
252                 printk("%s", hex(*area, h));
253         printk(")\n");
254 }
255
256 static void lfix_print(struct iam_leaf *leaf)
257 {
258         struct iam_lentry *entry;
259         int count;
260         int i;
261
262         entry = leaf->il_entries;
263         count = lentry_count_get(leaf);
264         printk(CFS_KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
265         for (i = 0; i < count; ++i, entry = iam_lfix_shift(leaf, entry, 1))
266                 l_print(leaf, entry);
267 }
268
269 static int iam_lfix_lookup(struct iam_leaf *l, const struct iam_key *k)
270 {
271         struct iam_lentry *p, *q, *m, *t;
272         struct iam_container *c;
273         int count;
274         int result;
275
276         count = lentry_count_get(l);
277         if (count == 0)
278                 return IAM_LOOKUP_EMPTY;
279
280         result = IAM_LOOKUP_OK;
281         c = iam_leaf_container(l);
282
283         p = l->il_entries;
284         q = iam_lfix_shift(l, p, count - 1);
285         if (lfix_keycmp(c, k, iam_leaf_key_at(p)) < 0) {
286                 /*
287                  * @k is less than the least key in the leaf
288                  */
289                 l->il_at = p;
290                 result = IAM_LOOKUP_BEFORE;
291         } else if (lfix_keycmp(c, iam_leaf_key_at(q), k) <= 0) {
292                 l->il_at = q;
293         } else {
294                 /*
295                  * EWD1293
296                  */
297                 while (iam_lfix_shift(l, p, 1) != q) {
298                         m = iam_lfix_shift(l, p, iam_lfix_diff(l, q, p) / 2);
299                         assert_corr(p < m && m < q);
300                         if (lfix_keycmp(c, iam_leaf_key_at(m), k) <= 0)
301                                 p = m;
302                         else
303                                 q = m;
304                 }
305                 assert_corr(lfix_keycmp(c, iam_leaf_key_at(p), k) <= 0 &&
306                             lfix_keycmp(c, k, iam_leaf_key_at(q)) < 0);
307                 /*
308                  * skip over records with duplicate keys.
309                  */
310                 while (p > l->il_entries) {
311                         t = iam_lfix_shift(l, p, -1);
312                         if (lfix_keycmp(c, iam_leaf_key_at(t), k) == 0)
313                                 p = t;
314                         else
315                                 break;
316                 }
317                 l->il_at = p;
318         }
319         assert_corr(iam_leaf_at_rec(l));
320
321         if (lfix_keycmp(c, iam_leaf_key_at(l->il_at), k) == 0)
322                 result = IAM_LOOKUP_EXACT;
323
324         if (lfix_dump)
325                 lfix_print(l);
326
327         return result;
328 }
329
330 static int iam_lfix_ilookup(struct iam_leaf *l, const struct iam_ikey *ik)
331 {
332         assert(0);
333         return IAM_LOOKUP_OK;
334 }
335
336 static void iam_lfix_key_set(struct iam_leaf *l, const struct iam_key *k)
337 {
338         assert_corr(iam_leaf_at_rec(l));
339         memcpy(iam_leaf_key_at(l->il_at), k, iam_leaf_descr(l)->id_key_size);
340 }
341
342 static int iam_lfix_key_cmp(const struct iam_leaf *l, const struct iam_key *k)
343 {
344         return lfix_keycmp(iam_leaf_container(l), iam_leaf_key_at(l->il_at), k);
345 }
346
347 static int iam_lfix_key_eq(const struct iam_leaf *l, const struct iam_key *k)
348 {
349         return !lfix_keycmp(iam_leaf_container(l),
350                             iam_leaf_key_at(l->il_at), k);
351 }
352
353 static void iam_lfix_rec_set(struct iam_leaf *l, const struct iam_rec *r)
354 {
355         assert_corr(iam_leaf_at_rec(l));
356         memcpy(iam_lfix_rec(l), r, iam_leaf_descr(l)->id_rec_size);
357 }
358
359 static void iam_lfix_rec_get(const struct iam_leaf *l, struct iam_rec *r)
360 {
361         assert_corr(iam_leaf_at_rec(l));
362         memcpy(r, iam_lfix_rec(l), iam_leaf_descr(l)->id_rec_size);
363 }
364
365 static void iam_lfix_rec_add(struct iam_leaf *leaf,
366                              const struct iam_key *k, const struct iam_rec *r)
367 {
368         struct iam_lentry *end;
369         struct iam_lentry *cur;
370         struct iam_lentry *start;
371         ptrdiff_t diff;
372         int count;
373
374         assert_corr(iam_leaf_can_add(leaf, k, r));
375
376         count = lentry_count_get(leaf);
377         /*
378          * This branch handles two exceptional cases:
379          *
380          *   - leaf positioned beyond last record, and
381          *
382          *   - empty leaf.
383          */
384         if (!iam_leaf_at_end(leaf)) {
385                 end   = iam_lfix_get_end(leaf);
386                 cur   = leaf->il_at;
387                 if (lfix_keycmp(iam_leaf_container(leaf),
388                                k, iam_leaf_key_at(cur)) >= 0)
389                         iam_lfix_next(leaf);
390                 else
391                         /*
392                          * Another exceptional case: insertion with the key
393                          * less than least key in the leaf.
394                          */
395                         assert_corr(cur == leaf->il_entries);
396
397                 start = leaf->il_at;
398                 diff  = (void *)end - (void *)start;
399                 assert_corr(diff >= 0);
400                 memmove(iam_lfix_shift(leaf, start, 1), start, diff);
401         }
402         lentry_count_set(leaf, count + 1);
403         iam_lfix_key_set(leaf, k);
404         iam_lfix_rec_set(leaf, r);
405         assert_corr(iam_leaf_at_rec(leaf));
406 }
407
408 static void iam_lfix_rec_del(struct iam_leaf *leaf, int shift)
409 {
410         struct iam_lentry *next, *end;
411         int count;
412         ptrdiff_t diff;
413
414         assert_corr(iam_leaf_at_rec(leaf));
415
416         count = lentry_count_get(leaf);
417         end = iam_lfix_get_end(leaf);
418         next = iam_lfix_shift(leaf, leaf->il_at, 1);
419         diff = (void *)end - (void *)next;
420         memmove(leaf->il_at, next, diff);
421
422         lentry_count_set(leaf, count - 1);
423 }
424
425 static int iam_lfix_can_add(const struct iam_leaf *l,
426                             const struct iam_key *k, const struct iam_rec *r)
427 {
428         return lentry_count_get(l) < leaf_count_limit(l);
429 }
430
431 static int iam_lfix_at_end(const struct iam_leaf *folio)
432 {
433         return folio->il_at == iam_lfix_get_end(folio);
434 }
435
436 static void iam_lfix_init_new(struct iam_container *c, struct buffer_head *bh)
437 {
438         struct iam_leaf_head *hdr;
439
440         hdr = (struct iam_leaf_head*)bh->b_data;
441         hdr->ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC);
442         hdr->ill_count = cpu_to_le16(0);
443 }
444
445 static void iam_lfix_split(struct iam_leaf *l, struct buffer_head **bh,
446                            iam_ptr_t new_blknr)
447 {
448         struct iam_path       *path;
449         struct iam_leaf_head  *hdr;
450         const struct iam_ikey *pivot;
451         struct buffer_head    *new_leaf;
452
453         unsigned count;
454         unsigned split;
455
456         void *start;
457         void *finis;
458
459         new_leaf = *bh;
460         path = iam_leaf_path(l);
461
462         hdr = (void *)new_leaf->b_data;
463
464         count = lentry_count_get(l);
465         split = count / 2;
466
467         start = iam_lfix_shift(l, iam_get_lentries(l), split);
468         finis = iam_lfix_shift(l, iam_get_lentries(l), count);
469
470         pivot = (const struct iam_ikey *)iam_leaf_key_at(start);
471
472         memmove(iam_entries(new_leaf), start, finis - start);
473         hdr->ill_count = count - split;
474         lentry_count_set(l, split);
475         if ((void *)l->il_at >= start) {
476                 /*
477                  * insertion point moves into new leaf.
478                  */
479                 int shift;
480                 int result;
481
482                 shift = iam_lfix_diff(l, l->il_at, start);
483                 *bh = l->il_bh;
484                 l->il_bh = new_leaf;
485                 l->il_curidx = new_blknr;
486                 result = iam_lfix_init(l);
487                 /*
488                  * init cannot fail, as node was just initialized.
489                  */
490                 assert_corr(result == 0);
491                 l->il_at = iam_lfix_shift(l, iam_get_lentries(l), shift);
492         }
493         /*
494          * Insert pointer to the new node (together with the least key in
495          * the node) into index node.
496          */
497         iam_insert_key_lock(path, path->ip_frame, pivot, new_blknr);
498 }
499
500 static struct iam_leaf_operations iam_lfix_leaf_ops = {
501         .init           = iam_lfix_init,
502         .init_new       = iam_lfix_init_new,
503         .fini           = iam_lfix_fini,
504         .start          = iam_lfix_start,
505         .next           = iam_lfix_next,
506         .key            = iam_lfix_key,
507         .ikey           = iam_lfix_ikey,
508         .rec            = iam_lfix_rec,
509         .key_set        = iam_lfix_key_set,
510         .key_cmp        = iam_lfix_key_cmp,
511         .key_eq         = iam_lfix_key_eq,
512         .key_size       = iam_lfix_key_size,
513         .rec_set        = iam_lfix_rec_set,
514         .rec_get        = iam_lfix_rec_get,
515         .lookup         = iam_lfix_lookup,
516         .ilookup        = iam_lfix_ilookup,
517         .at_end         = iam_lfix_at_end,
518         .rec_add        = iam_lfix_rec_add,
519         .rec_del        = iam_lfix_rec_del,
520         .can_add        = iam_lfix_can_add,
521         .split          = iam_lfix_split
522 };
523
524 /*
525  * Index operations.
526  */
527
528 enum {
529         /* This is duplicated in lustre/utils/create_iam.c */
530         /*
531          * Then shalt thou see the dew-BEDABBLED wretch
532          * Turn, and return, indenting with the way;
533          * Each envious brier his weary legs doth scratch,
534          * Each shadow makes him stop, each murmur stay:
535          * For misery is trodden on by many,
536          * And being low never relieved by any.
537          */
538         IAM_LFIX_ROOT_MAGIC = 0xbedabb1edULL // d01efull
539 };
540
541 /* This is duplicated in lustre/utils/create_iam.c */
542 struct iam_lfix_root {
543         __le64  ilr_magic;
544         __le16  ilr_keysize;
545         __le16  ilr_recsize;
546         __le16  ilr_ptrsize;
547         u8      ilr_indirect_levels;
548         u8      ilr_padding;
549 };
550
551 static __u32 iam_lfix_root_ptr(struct iam_container *c)
552 {
553         return 0;
554 }
555
556 static int iam_lfix_node_init(struct iam_container *c, struct buffer_head *bh,
557                               int root)
558 {
559         return 0;
560 }
561
562 static struct iam_entry *iam_lfix_root_inc(struct iam_container *c,
563                                            struct iam_path *path,
564                                            struct iam_frame *frame)
565 {
566         struct iam_lfix_root *root;
567         struct iam_entry     *entries;
568
569         entries = frame->entries;
570
571         dx_set_count(entries, 2);
572         assert_corr(dx_get_limit(entries) == dx_root_limit(path));
573
574         root = (void *)frame->bh->b_data;
575         assert_corr(le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC);
576         root->ilr_indirect_levels ++;
577         frame->at = entries = iam_entry_shift(path, entries, 1);
578         memset(iam_ikey_at(path, entries), 0,
579                iam_path_descr(path)->id_ikey_size);
580         return entries;
581 }
582
583 static int iam_lfix_node_check(struct iam_path *path, struct iam_frame *frame)
584 {
585         unsigned count;
586         unsigned limit;
587         unsigned limit_correct;
588         struct iam_entry *entries;
589
590         entries = dx_node_get_entries(path, frame);
591
592         if (frame == path->ip_frames) {
593                 struct iam_lfix_root *root;
594
595                 root = (void *)frame->bh->b_data;
596                 if (le64_to_cpu(root->ilr_magic) != IAM_LFIX_ROOT_MAGIC) {
597                         return -EIO;
598                 }
599                 limit_correct = dx_root_limit(path);
600         } else
601                 limit_correct = dx_node_limit(path);
602         count = dx_get_count(entries);
603         limit = dx_get_limit(entries);
604         if (count > limit) {
605                 return -EIO;
606         }
607         if (limit != limit_correct) {
608                 return -EIO;
609         }
610         return 0;
611 }
612
613 static int iam_lfix_node_load(struct iam_path *path, struct iam_frame *frame)
614 {
615         struct iam_entry *entries;
616         void *data;
617         entries = dx_node_get_entries(path, frame);
618
619         data = frame->bh->b_data;
620
621         if (frame == path->ip_frames) {
622                 struct iam_lfix_root *root;
623
624                 root = data;
625                 path->ip_indirect = root->ilr_indirect_levels;
626                 if (path->ip_ikey_target == NULL)
627                         path->ip_ikey_target =
628                                 (struct iam_ikey *)path->ip_key_target;
629         }
630         frame->entries = frame->at = entries;
631         return 0;
632 }
633
634 static int iam_lfix_ikeycmp(const struct iam_container *c,
635                             const struct iam_ikey *k1,
636                             const struct iam_ikey *k2)
637 {
638         return memcmp(k1, k2, c->ic_descr->id_ikey_size);
639 }
640
641 static struct iam_path_descr *iam_lfix_ipd_alloc(const struct iam_container *c,
642                                                  void *area)
643 {
644         return iam_ipd_alloc(area, c->ic_descr->id_ikey_size);
645 }
646
647 static struct iam_operations iam_lfix_ops = {
648         .id_root_ptr    = iam_lfix_root_ptr,
649         .id_node_read   = iam_node_read,
650         .id_node_init   = iam_lfix_node_init,
651         .id_node_check  = iam_lfix_node_check,
652         .id_node_load   = iam_lfix_node_load,
653         .id_ikeycmp     = iam_lfix_ikeycmp,
654         .id_root_inc    = iam_lfix_root_inc,
655         .id_ipd_alloc   = iam_lfix_ipd_alloc,
656         .id_ipd_free    = iam_ipd_free,
657         .id_name        = "lfix"
658 };
659
660 static int iam_lfix_guess(struct iam_container *c)
661 {
662         int result;
663         struct buffer_head *bh;
664         const struct iam_lfix_root *root;
665
666         assert_corr(c->ic_object != NULL);
667
668         result = iam_node_read(c, iam_lfix_root_ptr(c), NULL, &bh);
669         if (result == 0) {
670                 root = (void *)bh->b_data;
671                 if (le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC) {
672                         struct iam_descr *descr;
673
674                         descr = c->ic_descr;
675                         descr->id_key_size  = le16_to_cpu(root->ilr_keysize);
676                         descr->id_ikey_size = le16_to_cpu(root->ilr_keysize);
677                         descr->id_rec_size  = le16_to_cpu(root->ilr_recsize);
678                         descr->id_ptr_size  = le16_to_cpu(root->ilr_ptrsize);
679                         descr->id_root_gap  = sizeof(struct iam_lfix_root);
680                         descr->id_node_gap  = 0;
681                         descr->id_ops       = &iam_lfix_ops;
682                         descr->id_leaf_ops  = &iam_lfix_leaf_ops;
683                 } else
684                         result = -EBADF;
685                 brelse(bh);
686         }
687         return result;
688 }
689
690 static struct iam_format iam_lfix_format = {
691         .if_guess = iam_lfix_guess
692 };
693
694 void iam_lfix_format_init(void)
695 {
696         iam_format_register(&iam_lfix_format);
697 }
698
699 /*
700  * Debugging aid.
701  */
702
703 #define KEYSIZE (8)
704 #define RECSIZE (8)
705 #define PTRSIZE (4)
706
707 #define LFIX_ROOT_RECNO \
708         ((4096 - sizeof(struct iam_lfix_root)) / (KEYSIZE + PTRSIZE))
709
710 #define LFIX_INDEX_RECNO (4096 / (KEYSIZE + PTRSIZE))
711
712 #define LFIX_LEAF_RECNO \
713         ((4096 - sizeof(struct iam_leaf_head)) / (KEYSIZE + RECSIZE))
714
715 struct lfix_root {
716         struct iam_lfix_root lr_root;
717         struct {
718                 char key[KEYSIZE];
719                 char ptr[PTRSIZE];
720         } lr_entry[LFIX_ROOT_RECNO];
721 };
722
723 struct lfix_index {
724         struct dx_countlimit li_cl;
725         char   li_padding[KEYSIZE + PTRSIZE - sizeof(struct dx_countlimit)];
726         struct {
727                 char key[KEYSIZE];
728                 char ptr[PTRSIZE];
729         } li_entry[LFIX_INDEX_RECNO - 1];
730 };
731
732 struct lfix_leaf {
733         struct iam_leaf_head ll_head;
734         struct {
735                 char key[KEYSIZE];
736                 char rec[RECSIZE];
737         } ll_entry[LFIX_LEAF_RECNO];
738 };
739
740 #define STORE_UNALIGNED(val, dst)                       \
741 ({                                                      \
742         typeof(val) __val = (val);                      \
743         CLASSERT(sizeof(val) == sizeof(*(dst)));        \
744         memcpy(dst, &__val, sizeof(*(dst)));            \
745 })
746
747 static void lfix_root(void *buf,
748                       int blocksize, int keysize, int ptrsize, int recsize)
749 {
750         struct iam_lfix_root *root;
751         struct dx_countlimit *limit;
752         void                 *entry;
753
754         root = buf;
755         *root = (typeof(*root)) {
756                 .ilr_magic           = cpu_to_le64(IAM_LFIX_ROOT_MAGIC),
757                 .ilr_keysize         = cpu_to_le16(keysize),
758                 .ilr_recsize         = cpu_to_le16(recsize),
759                 .ilr_ptrsize         = cpu_to_le16(ptrsize),
760                 .ilr_indirect_levels = 0
761         };
762
763         limit = (void *)(root + 1);
764         *limit = (typeof(*limit)){
765                 /*
766                  * limit itself + one pointer to the leaf.
767                  */
768                 .count = cpu_to_le16(2),
769                 .limit = iam_root_limit(sizeof(struct iam_lfix_root),
770                                         blocksize, keysize + ptrsize)
771         };
772
773         entry = root + 1;
774         /*
775          * Skip over @limit.
776          */
777         entry += keysize + ptrsize;
778
779         /*
780          * Entry format is <key> followed by <ptr>. In the minimal tree
781          * consisting of a root and single node, <key> is a minimal possible
782          * key.
783          *
784          * XXX: this key is hard-coded to be a sequence of 0's.
785          */
786
787         entry += keysize;
788         /* now @entry points to <ptr> */
789         if (ptrsize == 4)
790                 STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
791         else
792                 STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
793 }
794
795 static void lfix_leaf(void *buf,
796                       int blocksize, int keysize, int ptrsize, int recsize)
797 {
798         struct iam_leaf_head *head;
799
800         /* form leaf */
801         head = buf;
802         *head = (struct iam_leaf_head) {
803                 .ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC),
804                 /*
805                  * Leaf contains an entry with the smallest possible key
806                  * (created by zeroing).
807                  */
808                 .ill_count = cpu_to_le16(1),
809         };
810 }
811
812 int iam_lfix_create(struct inode *obj,
813                     int keysize, int ptrsize, int recsize, handle_t *handle)
814 {
815         struct buffer_head *root_node;
816         struct buffer_head *leaf_node;
817         struct super_block *sb;
818
819         u32 blknr;
820         int result;
821         unsigned long bsize;
822
823         assert_corr(obj->i_size == 0);
824
825         sb = obj->i_sb;
826         bsize = sb->s_blocksize;
827         root_node = ldiskfs_append(handle, obj, &blknr, &result);
828         leaf_node = ldiskfs_append(handle, obj, &blknr, &result);
829         if (root_node != NULL && leaf_node != NULL) {
830                 lfix_root(root_node->b_data, bsize, keysize, ptrsize, recsize);
831                 lfix_leaf(leaf_node->b_data, bsize, keysize, ptrsize, recsize);
832                 ldiskfs_mark_inode_dirty(handle, obj);
833                 result = ldiskfs_journal_dirty_metadata(handle, root_node);
834                 if (result == 0)
835                         result = ldiskfs_journal_dirty_metadata(handle, leaf_node);
836                 if (result != 0)
837                         ldiskfs_std_error(sb, result);
838         }
839         brelse(leaf_node);
840         brelse(root_node);
841         return result;
842 }
843 EXPORT_SYMBOL(iam_lfix_create);