4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see [sun.com URL with a
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
35 * implementation of iam format for fixed size records.
37 * Author: Wang Di <wangdi@clusterfs.com>
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 #include <linux/types.h>
42 #include "osd_internal.h"
49 IAM_LEAF_HEADER_MAGIC = 0x1976 /* This is duplicated in
50 * lustre/utils/create_iam.c */
53 /* This is duplicated in lustre/utils/create_iam.c */
54 struct iam_leaf_head {
59 static inline int iam_lfix_entry_size(const struct iam_leaf *l)
61 return iam_leaf_descr(l)->id_key_size + iam_leaf_descr(l)->id_rec_size;
64 static inline struct iam_lentry *
65 iam_lfix_shift(const struct iam_leaf *l, struct iam_lentry *entry, int shift)
67 return (void *)entry + shift * iam_lfix_entry_size(l);
70 static inline struct iam_key *iam_leaf_key_at(struct iam_lentry *entry)
72 return (struct iam_key *)entry;
75 static inline int lfix_keycmp(const struct iam_container *c,
76 const struct iam_key *k1,
77 const struct iam_key *k2)
79 return memcmp(k1, k2, c->ic_descr->id_key_size);
82 static struct iam_leaf_head *iam_get_head(const struct iam_leaf *l)
84 return (struct iam_leaf_head *)l->il_bh->b_data;
87 static struct iam_lentry *iam_entries(const struct buffer_head *bh)
89 return (void *)bh->b_data + sizeof(struct iam_leaf_head);
92 static struct iam_lentry *iam_get_lentries(const struct iam_leaf *l)
94 return iam_entries(l->il_bh);
97 static int leaf_count_limit(const struct iam_leaf *leaf)
101 free_space = iam_leaf_container(leaf)->ic_object->i_sb->s_blocksize;
102 free_space -= sizeof(struct iam_leaf_head);
103 return free_space / iam_lfix_entry_size(leaf);
106 static int lentry_count_get(const struct iam_leaf *leaf)
108 return le16_to_cpu(iam_get_head(leaf)->ill_count);
111 static void lentry_count_set(struct iam_leaf *leaf, unsigned count)
113 assert_corr(0 <= count && count <= leaf_count_limit(leaf));
114 iam_get_head(leaf)->ill_count = cpu_to_le16(count);
117 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l);
119 #if LDISKFS_CORRECTNESS_ON || LDISKFS_INVARIANT_ON
120 static int iam_leaf_at_rec(const struct iam_leaf *folio)
123 iam_get_lentries(folio) <= folio->il_at &&
124 folio->il_at < iam_lfix_get_end(folio);
128 static struct iam_ikey *iam_lfix_ikey(const struct iam_leaf *l,
129 struct iam_ikey *key)
132 assert_corr(iam_leaf_at_rec(l));
133 return (struct iam_ikey*)ie;
136 static struct iam_key *iam_lfix_key(const struct iam_leaf *l)
139 assert_corr(iam_leaf_at_rec(l));
140 return (struct iam_key*)ie;
143 static int iam_lfix_key_size(const struct iam_leaf *l)
145 return iam_leaf_descr(l)->id_key_size;
148 static void iam_lfix_start(struct iam_leaf *l)
150 l->il_at = iam_get_lentries(l);
153 static inline ptrdiff_t iam_lfix_diff(const struct iam_leaf *l,
154 const struct iam_lentry *e1,
155 const struct iam_lentry *e2)
160 esize = iam_lfix_entry_size(l);
161 diff = (void *)e1 - (void *)e2;
162 assert_corr(diff / esize * esize == diff);
166 static int iam_lfix_init(struct iam_leaf *l)
169 struct iam_leaf_head *ill;
172 assert_corr(l->il_bh != NULL);
174 ill = iam_get_head(l);
175 count = le16_to_cpu(ill->ill_count);
176 if (ill->ill_magic == le16_to_cpu(IAM_LEAF_HEADER_MAGIC) &&
177 0 <= count && count <= leaf_count_limit(l)) {
178 l->il_at = l->il_entries = iam_get_lentries(l);
183 obj = iam_leaf_container(l)->ic_object;
184 CERROR("Wrong magic in node %llu (#%lu): %#x != %#x or "
185 "wrong count: %d (%d)\n",
186 (unsigned long long)l->il_bh->b_blocknr, obj->i_ino,
187 ill->ill_magic, le16_to_cpu(IAM_LEAF_HEADER_MAGIC),
188 count, leaf_count_limit(l));
194 static void iam_lfix_fini(struct iam_leaf *l)
196 l->il_entries = l->il_at = NULL;
199 static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l)
201 int count = lentry_count_get(l);
202 struct iam_lentry *ile = iam_lfix_shift(l, l->il_entries, count);
207 struct iam_rec *iam_lfix_rec(const struct iam_leaf *l)
210 assert_corr(iam_leaf_at_rec(l));
211 return e + iam_leaf_descr(l)->id_key_size;
214 static void iam_lfix_next(struct iam_leaf *l)
216 assert_corr(iam_leaf_at_rec(l));
217 l->il_at = iam_lfix_shift(l, l->il_at, 1);
224 EXPORT_SYMBOL(lfix_dump);
226 static char hdigit(char ch)
228 static char d[] = "0123456789abcdef";
232 static char *hex(char ch, char *area)
234 area[0] = hdigit(ch >> 4);
235 area[1] = hdigit(ch);
240 static void l_print(struct iam_leaf *leaf, struct iam_lentry *entry)
246 area = (char *)entry;
247 printk(CFS_KERN_EMERG "[");
248 for (i = iam_lfix_key_size(leaf); i > 0; --i, ++area)
249 printk("%s", hex(*area, h));
251 for (i = iam_leaf_descr(leaf)->id_rec_size; i > 0; --i, ++area)
252 printk("%s", hex(*area, h));
256 static void lfix_print(struct iam_leaf *leaf)
258 struct iam_lentry *entry;
262 entry = leaf->il_entries;
263 count = lentry_count_get(leaf);
264 printk(CFS_KERN_EMERG "lfix: %p %p %d\n", leaf, leaf->il_at, count);
265 for (i = 0; i < count; ++i, entry = iam_lfix_shift(leaf, entry, 1))
266 l_print(leaf, entry);
269 static int iam_lfix_lookup(struct iam_leaf *l, const struct iam_key *k)
271 struct iam_lentry *p, *q, *m, *t;
272 struct iam_container *c;
276 count = lentry_count_get(l);
278 return IAM_LOOKUP_EMPTY;
280 result = IAM_LOOKUP_OK;
281 c = iam_leaf_container(l);
284 q = iam_lfix_shift(l, p, count - 1);
285 if (lfix_keycmp(c, k, iam_leaf_key_at(p)) < 0) {
287 * @k is less than the least key in the leaf
290 result = IAM_LOOKUP_BEFORE;
291 } else if (lfix_keycmp(c, iam_leaf_key_at(q), k) <= 0) {
297 while (iam_lfix_shift(l, p, 1) != q) {
298 m = iam_lfix_shift(l, p, iam_lfix_diff(l, q, p) / 2);
299 assert_corr(p < m && m < q);
300 if (lfix_keycmp(c, iam_leaf_key_at(m), k) <= 0)
305 assert_corr(lfix_keycmp(c, iam_leaf_key_at(p), k) <= 0 &&
306 lfix_keycmp(c, k, iam_leaf_key_at(q)) < 0);
308 * skip over records with duplicate keys.
310 while (p > l->il_entries) {
311 t = iam_lfix_shift(l, p, -1);
312 if (lfix_keycmp(c, iam_leaf_key_at(t), k) == 0)
319 assert_corr(iam_leaf_at_rec(l));
321 if (lfix_keycmp(c, iam_leaf_key_at(l->il_at), k) == 0)
322 result = IAM_LOOKUP_EXACT;
330 static int iam_lfix_ilookup(struct iam_leaf *l, const struct iam_ikey *ik)
333 return IAM_LOOKUP_OK;
336 static void iam_lfix_key_set(struct iam_leaf *l, const struct iam_key *k)
338 assert_corr(iam_leaf_at_rec(l));
339 memcpy(iam_leaf_key_at(l->il_at), k, iam_leaf_descr(l)->id_key_size);
342 static int iam_lfix_key_cmp(const struct iam_leaf *l, const struct iam_key *k)
344 return lfix_keycmp(iam_leaf_container(l), iam_leaf_key_at(l->il_at), k);
347 static int iam_lfix_key_eq(const struct iam_leaf *l, const struct iam_key *k)
349 return !lfix_keycmp(iam_leaf_container(l),
350 iam_leaf_key_at(l->il_at), k);
353 static void iam_lfix_rec_set(struct iam_leaf *l, const struct iam_rec *r)
355 assert_corr(iam_leaf_at_rec(l));
356 memcpy(iam_lfix_rec(l), r, iam_leaf_descr(l)->id_rec_size);
359 static void iam_lfix_rec_get(const struct iam_leaf *l, struct iam_rec *r)
361 assert_corr(iam_leaf_at_rec(l));
362 memcpy(r, iam_lfix_rec(l), iam_leaf_descr(l)->id_rec_size);
365 static void iam_lfix_rec_add(struct iam_leaf *leaf,
366 const struct iam_key *k, const struct iam_rec *r)
368 struct iam_lentry *end;
369 struct iam_lentry *cur;
370 struct iam_lentry *start;
374 assert_corr(iam_leaf_can_add(leaf, k, r));
376 count = lentry_count_get(leaf);
378 * This branch handles two exceptional cases:
380 * - leaf positioned beyond last record, and
384 if (!iam_leaf_at_end(leaf)) {
385 end = iam_lfix_get_end(leaf);
387 if (lfix_keycmp(iam_leaf_container(leaf),
388 k, iam_leaf_key_at(cur)) >= 0)
392 * Another exceptional case: insertion with the key
393 * less than least key in the leaf.
395 assert_corr(cur == leaf->il_entries);
398 diff = (void *)end - (void *)start;
399 assert_corr(diff >= 0);
400 memmove(iam_lfix_shift(leaf, start, 1), start, diff);
402 lentry_count_set(leaf, count + 1);
403 iam_lfix_key_set(leaf, k);
404 iam_lfix_rec_set(leaf, r);
405 assert_corr(iam_leaf_at_rec(leaf));
408 static void iam_lfix_rec_del(struct iam_leaf *leaf, int shift)
410 struct iam_lentry *next, *end;
414 assert_corr(iam_leaf_at_rec(leaf));
416 count = lentry_count_get(leaf);
417 end = iam_lfix_get_end(leaf);
418 next = iam_lfix_shift(leaf, leaf->il_at, 1);
419 diff = (void *)end - (void *)next;
420 memmove(leaf->il_at, next, diff);
422 lentry_count_set(leaf, count - 1);
425 static int iam_lfix_can_add(const struct iam_leaf *l,
426 const struct iam_key *k, const struct iam_rec *r)
428 return lentry_count_get(l) < leaf_count_limit(l);
431 static int iam_lfix_at_end(const struct iam_leaf *folio)
433 return folio->il_at == iam_lfix_get_end(folio);
436 static void iam_lfix_init_new(struct iam_container *c, struct buffer_head *bh)
438 struct iam_leaf_head *hdr;
440 hdr = (struct iam_leaf_head*)bh->b_data;
441 hdr->ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC);
442 hdr->ill_count = cpu_to_le16(0);
445 static void iam_lfix_split(struct iam_leaf *l, struct buffer_head **bh,
448 struct iam_path *path;
449 struct iam_leaf_head *hdr;
450 const struct iam_ikey *pivot;
451 struct buffer_head *new_leaf;
460 path = iam_leaf_path(l);
462 hdr = (void *)new_leaf->b_data;
464 count = lentry_count_get(l);
467 start = iam_lfix_shift(l, iam_get_lentries(l), split);
468 finis = iam_lfix_shift(l, iam_get_lentries(l), count);
470 pivot = (const struct iam_ikey *)iam_leaf_key_at(start);
472 memmove(iam_entries(new_leaf), start, finis - start);
473 hdr->ill_count = count - split;
474 lentry_count_set(l, split);
475 if ((void *)l->il_at >= start) {
477 * insertion point moves into new leaf.
482 shift = iam_lfix_diff(l, l->il_at, start);
485 l->il_curidx = new_blknr;
486 result = iam_lfix_init(l);
488 * init cannot fail, as node was just initialized.
490 assert_corr(result == 0);
491 l->il_at = iam_lfix_shift(l, iam_get_lentries(l), shift);
494 * Insert pointer to the new node (together with the least key in
495 * the node) into index node.
497 iam_insert_key_lock(path, path->ip_frame, pivot, new_blknr);
500 static struct iam_leaf_operations iam_lfix_leaf_ops = {
501 .init = iam_lfix_init,
502 .init_new = iam_lfix_init_new,
503 .fini = iam_lfix_fini,
504 .start = iam_lfix_start,
505 .next = iam_lfix_next,
507 .ikey = iam_lfix_ikey,
509 .key_set = iam_lfix_key_set,
510 .key_cmp = iam_lfix_key_cmp,
511 .key_eq = iam_lfix_key_eq,
512 .key_size = iam_lfix_key_size,
513 .rec_set = iam_lfix_rec_set,
514 .rec_get = iam_lfix_rec_get,
515 .lookup = iam_lfix_lookup,
516 .ilookup = iam_lfix_ilookup,
517 .at_end = iam_lfix_at_end,
518 .rec_add = iam_lfix_rec_add,
519 .rec_del = iam_lfix_rec_del,
520 .can_add = iam_lfix_can_add,
521 .split = iam_lfix_split
529 /* This is duplicated in lustre/utils/create_iam.c */
531 * Then shalt thou see the dew-BEDABBLED wretch
532 * Turn, and return, indenting with the way;
533 * Each envious brier his weary legs doth scratch,
534 * Each shadow makes him stop, each murmur stay:
535 * For misery is trodden on by many,
536 * And being low never relieved by any.
538 IAM_LFIX_ROOT_MAGIC = 0xbedabb1edULL // d01efull
541 /* This is duplicated in lustre/utils/create_iam.c */
542 struct iam_lfix_root {
547 u8 ilr_indirect_levels;
551 static __u32 iam_lfix_root_ptr(struct iam_container *c)
556 static int iam_lfix_node_init(struct iam_container *c, struct buffer_head *bh,
562 static struct iam_entry *iam_lfix_root_inc(struct iam_container *c,
563 struct iam_path *path,
564 struct iam_frame *frame)
566 struct iam_lfix_root *root;
567 struct iam_entry *entries;
569 entries = frame->entries;
571 dx_set_count(entries, 2);
572 assert_corr(dx_get_limit(entries) == dx_root_limit(path));
574 root = (void *)frame->bh->b_data;
575 assert_corr(le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC);
576 root->ilr_indirect_levels ++;
577 frame->at = entries = iam_entry_shift(path, entries, 1);
578 memset(iam_ikey_at(path, entries), 0,
579 iam_path_descr(path)->id_ikey_size);
583 static int iam_lfix_node_check(struct iam_path *path, struct iam_frame *frame)
587 unsigned limit_correct;
588 struct iam_entry *entries;
590 entries = dx_node_get_entries(path, frame);
592 if (frame == path->ip_frames) {
593 struct iam_lfix_root *root;
595 root = (void *)frame->bh->b_data;
596 if (le64_to_cpu(root->ilr_magic) != IAM_LFIX_ROOT_MAGIC) {
599 limit_correct = dx_root_limit(path);
601 limit_correct = dx_node_limit(path);
602 count = dx_get_count(entries);
603 limit = dx_get_limit(entries);
607 if (limit != limit_correct) {
613 static int iam_lfix_node_load(struct iam_path *path, struct iam_frame *frame)
615 struct iam_entry *entries;
617 entries = dx_node_get_entries(path, frame);
619 data = frame->bh->b_data;
621 if (frame == path->ip_frames) {
622 struct iam_lfix_root *root;
625 path->ip_indirect = root->ilr_indirect_levels;
626 if (path->ip_ikey_target == NULL)
627 path->ip_ikey_target =
628 (struct iam_ikey *)path->ip_key_target;
630 frame->entries = frame->at = entries;
634 static int iam_lfix_ikeycmp(const struct iam_container *c,
635 const struct iam_ikey *k1,
636 const struct iam_ikey *k2)
638 return memcmp(k1, k2, c->ic_descr->id_ikey_size);
641 static struct iam_path_descr *iam_lfix_ipd_alloc(const struct iam_container *c,
644 return iam_ipd_alloc(area, c->ic_descr->id_ikey_size);
647 static struct iam_operations iam_lfix_ops = {
648 .id_root_ptr = iam_lfix_root_ptr,
649 .id_node_read = iam_node_read,
650 .id_node_init = iam_lfix_node_init,
651 .id_node_check = iam_lfix_node_check,
652 .id_node_load = iam_lfix_node_load,
653 .id_ikeycmp = iam_lfix_ikeycmp,
654 .id_root_inc = iam_lfix_root_inc,
655 .id_ipd_alloc = iam_lfix_ipd_alloc,
656 .id_ipd_free = iam_ipd_free,
660 static int iam_lfix_guess(struct iam_container *c)
663 struct buffer_head *bh;
664 const struct iam_lfix_root *root;
666 assert_corr(c->ic_object != NULL);
668 result = iam_node_read(c, iam_lfix_root_ptr(c), NULL, &bh);
670 root = (void *)bh->b_data;
671 if (le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC) {
672 struct iam_descr *descr;
675 descr->id_key_size = le16_to_cpu(root->ilr_keysize);
676 descr->id_ikey_size = le16_to_cpu(root->ilr_keysize);
677 descr->id_rec_size = le16_to_cpu(root->ilr_recsize);
678 descr->id_ptr_size = le16_to_cpu(root->ilr_ptrsize);
679 descr->id_root_gap = sizeof(struct iam_lfix_root);
680 descr->id_node_gap = 0;
681 descr->id_ops = &iam_lfix_ops;
682 descr->id_leaf_ops = &iam_lfix_leaf_ops;
690 static struct iam_format iam_lfix_format = {
691 .if_guess = iam_lfix_guess
694 void iam_lfix_format_init(void)
696 iam_format_register(&iam_lfix_format);
707 #define LFIX_ROOT_RECNO \
708 ((4096 - sizeof(struct iam_lfix_root)) / (KEYSIZE + PTRSIZE))
710 #define LFIX_INDEX_RECNO (4096 / (KEYSIZE + PTRSIZE))
712 #define LFIX_LEAF_RECNO \
713 ((4096 - sizeof(struct iam_leaf_head)) / (KEYSIZE + RECSIZE))
716 struct iam_lfix_root lr_root;
720 } lr_entry[LFIX_ROOT_RECNO];
724 struct dx_countlimit li_cl;
725 char li_padding[KEYSIZE + PTRSIZE - sizeof(struct dx_countlimit)];
729 } li_entry[LFIX_INDEX_RECNO - 1];
733 struct iam_leaf_head ll_head;
737 } ll_entry[LFIX_LEAF_RECNO];
740 #define STORE_UNALIGNED(val, dst) \
742 typeof(val) __val = (val); \
743 CLASSERT(sizeof(val) == sizeof(*(dst))); \
744 memcpy(dst, &__val, sizeof(*(dst))); \
747 static void lfix_root(void *buf,
748 int blocksize, int keysize, int ptrsize, int recsize)
750 struct iam_lfix_root *root;
751 struct dx_countlimit *limit;
755 *root = (typeof(*root)) {
756 .ilr_magic = cpu_to_le64(IAM_LFIX_ROOT_MAGIC),
757 .ilr_keysize = cpu_to_le16(keysize),
758 .ilr_recsize = cpu_to_le16(recsize),
759 .ilr_ptrsize = cpu_to_le16(ptrsize),
760 .ilr_indirect_levels = 0
763 limit = (void *)(root + 1);
764 *limit = (typeof(*limit)){
766 * limit itself + one pointer to the leaf.
768 .count = cpu_to_le16(2),
769 .limit = iam_root_limit(sizeof(struct iam_lfix_root),
770 blocksize, keysize + ptrsize)
777 entry += keysize + ptrsize;
780 * Entry format is <key> followed by <ptr>. In the minimal tree
781 * consisting of a root and single node, <key> is a minimal possible
784 * XXX: this key is hard-coded to be a sequence of 0's.
788 /* now @entry points to <ptr> */
790 STORE_UNALIGNED(cpu_to_le32(1), (u_int32_t *)entry);
792 STORE_UNALIGNED(cpu_to_le64(1), (u_int64_t *)entry);
795 static void lfix_leaf(void *buf,
796 int blocksize, int keysize, int ptrsize, int recsize)
798 struct iam_leaf_head *head;
802 *head = (struct iam_leaf_head) {
803 .ill_magic = cpu_to_le16(IAM_LEAF_HEADER_MAGIC),
805 * Leaf contains an entry with the smallest possible key
806 * (created by zeroing).
808 .ill_count = cpu_to_le16(1),
812 int iam_lfix_create(struct inode *obj,
813 int keysize, int ptrsize, int recsize, handle_t *handle)
815 struct buffer_head *root_node;
816 struct buffer_head *leaf_node;
817 struct super_block *sb;
823 assert_corr(obj->i_size == 0);
826 bsize = sb->s_blocksize;
827 root_node = ldiskfs_append(handle, obj, &blknr, &result);
828 leaf_node = ldiskfs_append(handle, obj, &blknr, &result);
829 if (root_node != NULL && leaf_node != NULL) {
830 lfix_root(root_node->b_data, bsize, keysize, ptrsize, recsize);
831 lfix_leaf(leaf_node->b_data, bsize, keysize, ptrsize, recsize);
832 ldiskfs_mark_inode_dirty(handle, obj);
833 result = ldiskfs_journal_dirty_metadata(handle, root_node);
835 result = ldiskfs_journal_dirty_metadata(handle, leaf_node);
837 ldiskfs_std_error(sb, result);
843 EXPORT_SYMBOL(iam_lfix_create);