1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see [sun.com URL with a
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
39 * Top-level entry points into iam module
41 * Author: Wang Di <wangdi@clusterfs.com>
42 * Author: Nikita Danilov <nikita@clusterfs.com>
46 * iam: big theory statement.
48 * iam (Index Access Module) is a module providing abstraction of persistent
49 * transactional container on top of generalized ldiskfs htree.
53 * - key, pointer, and record size specifiable per container.
55 * - trees taller than 2 index levels.
57 * - read/write to existing ldiskfs htree directories as iam containers.
59 * iam container is a tree, consisting of leaf nodes containing keys and
60 * records stored in this container, and index nodes, containing keys and
61 * pointers to leaf or index nodes.
63 * iam does not work with keys directly, instead it calls user-supplied key
64 * comparison function (->dpo_keycmp()).
66 * Pointers are (currently) interpreted as logical offsets (measured in
67 * blocksful) within underlying flat file on top of which iam tree lives.
71 * iam mostly tries to reuse existing htree formats.
73 * Format of index node:
75 * +-----+-------+-------+-------+------+-------+------------+
76 * | | count | | | | | |
77 * | gap | / | entry | entry | .... | entry | free space |
78 * | | limit | | | | | |
79 * +-----+-------+-------+-------+------+-------+------------+
81 * gap this part of node is never accessed by iam code. It
82 * exists for binary compatibility with ldiskfs htree (that,
83 * in turn, stores fake struct ext2_dirent for ext2
84 * compatibility), and to keep some unspecified per-node
85 * data. Gap can be different for root and non-root index
86 * nodes. Gap size can be specified for each container
87 * (gap of 0 is allowed).
89 * count/limit current number of entries in this node, and the maximal
90 * number of entries that can fit into node. count/limit
91 * has the same size as entry, and is itself counted in
94 * entry index entry: consists of a key immediately followed by
95 * a pointer to a child node. Size of a key and size of a
96 * pointer depends on container. Entry has neither
97 * alignment nor padding.
99 * free space portion of node new entries are added to
101 * Entries in index node are sorted by their key value.
103 * Format of a leaf node is not specified. Generic iam code accesses leaf
104 * nodes through ->id_leaf methods in struct iam_descr.
108 #include <linux/module.h>
109 #include <linux/fs.h>
110 #include <linux/pagemap.h>
111 #include <linux/time.h>
112 #include <linux/fcntl.h>
113 #include <linux/stat.h>
114 #include <linux/string.h>
115 #include <linux/quotaops.h>
116 #include <linux/buffer_head.h>
117 #include <linux/smp_lock.h>
118 #include "osd_internal.h"
124 * List of all registered formats.
126 * No locking. Callers synchronize.
128 static CFS_LIST_HEAD(iam_formats);
130 void iam_format_register(struct iam_format *fmt)
132 cfs_list_add(&fmt->if_linkage, &iam_formats);
134 EXPORT_SYMBOL(iam_format_register);
137 * Determine format of given container. This is done by scanning list of
138 * registered formats and calling ->if_guess() method of each in turn.
140 static int iam_format_guess(struct iam_container *c)
143 struct iam_format *fmt;
146 * XXX temporary initialization hook.
149 static int initialized = 0;
152 iam_lvar_format_init();
153 iam_lfix_format_init();
159 cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
160 result = fmt->if_guess(c);
168 * Initialize container @c.
170 int iam_container_init(struct iam_container *c,
171 struct iam_descr *descr, struct inode *inode)
173 memset(c, 0, sizeof *c);
175 c->ic_object = inode;
176 cfs_init_rwsem(&c->ic_sem);
179 EXPORT_SYMBOL(iam_container_init);
182 * Determine container format.
184 int iam_container_setup(struct iam_container *c)
186 return iam_format_guess(c);
188 EXPORT_SYMBOL(iam_container_setup);
191 * Finalize container @c, release all resources.
193 void iam_container_fini(struct iam_container *c)
195 brelse(c->ic_root_bh);
196 c->ic_root_bh = NULL;
198 EXPORT_SYMBOL(iam_container_fini);
200 void iam_path_init(struct iam_path *path, struct iam_container *c,
201 struct iam_path_descr *pd)
203 memset(path, 0, sizeof *path);
204 path->ip_container = c;
205 path->ip_frame = path->ip_frames;
207 path->ip_leaf.il_path = path;
210 static void iam_leaf_fini(struct iam_leaf *leaf);
212 void iam_path_release(struct iam_path *path)
216 for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
217 if (path->ip_frames[i].bh != NULL) {
218 brelse(path->ip_frames[i].bh);
219 path->ip_frames[i].bh = NULL;
224 void iam_path_fini(struct iam_path *path)
226 iam_leaf_fini(&path->ip_leaf);
227 iam_path_release(path);
231 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
235 path->ipc_hinfo = &path->ipc_hinfo_area;
236 for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
237 path->ipc_descr.ipd_key_scratch[i] =
238 (struct iam_ikey *)&path->ipc_scratch[i];
240 iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
243 void iam_path_compat_fini(struct iam_path_compat *path)
245 iam_path_fini(&path->ipc_path);
249 * Helper function initializing iam_path_descr and its key scratch area.
251 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
253 struct iam_path_descr *ipd;
259 for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
260 ipd->ipd_key_scratch[i] = karea;
263 EXPORT_SYMBOL(iam_ipd_alloc);
265 void iam_ipd_free(struct iam_path_descr *ipd)
268 EXPORT_SYMBOL(iam_ipd_free);
270 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
271 handle_t *h, struct buffer_head **bh)
275 /* NB: it can be called by iam_lfix_guess() which is still at
276 * very early stage, c->ic_root_bh and c->ic_descr->id_ops
277 * haven't been intialized yet.
278 * Also, we don't have this for IAM dir.
280 if (c->ic_root_bh != NULL &&
281 c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
282 get_bh(c->ic_root_bh);
287 *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
294 * Return pointer to current leaf record. Pointer is valid while corresponding
295 * leaf node is locked and pinned.
297 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
299 return iam_leaf_ops(leaf)->rec(leaf);
303 * Return pointer to the current leaf key. This function returns pointer to
304 * the key stored in node.
306 * Caller should assume that returned pointer is only valid while leaf node is
309 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
311 return iam_leaf_ops(leaf)->key(leaf);
314 static int iam_leaf_key_size(const struct iam_leaf *leaf)
316 return iam_leaf_ops(leaf)->key_size(leaf);
319 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
320 struct iam_ikey *key)
322 return iam_leaf_ops(leaf)->ikey(leaf, key);
325 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
326 const struct iam_key *key)
328 return iam_leaf_ops(leaf)->key_cmp(leaf, key);
331 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
332 const struct iam_key *key)
334 return iam_leaf_ops(leaf)->key_eq(leaf, key);
337 #if LDISKFS_INVARIANT_ON
338 static int iam_leaf_check(struct iam_leaf *leaf);
339 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
341 static int iam_path_check(struct iam_path *p)
346 struct iam_descr *param;
349 param = iam_path_descr(p);
350 for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
351 f = &p->ip_frames[i];
353 result = dx_node_check(p, f);
355 result = !param->id_ops->id_node_check(p, f);
358 if (result && p->ip_leaf.il_bh != NULL)
359 result = iam_leaf_check(&p->ip_leaf);
361 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
367 static int iam_leaf_load(struct iam_path *path)
371 struct iam_container *c;
372 struct buffer_head *bh;
373 struct iam_leaf *leaf;
374 struct iam_descr *descr;
376 c = path->ip_container;
377 leaf = &path->ip_leaf;
378 descr = iam_path_descr(path);
379 block = path->ip_frame->leaf;
382 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
383 (long unsigned)path->ip_frame->leaf,
384 dx_get_count(dx_node_get_entries(path, path->ip_frame)),
385 path->ip_frames[0].bh, path->ip_frames[1].bh,
386 path->ip_frames[2].bh);
388 err = descr->id_ops->id_node_read(c, block, NULL, &bh);
391 leaf->il_curidx = block;
392 err = iam_leaf_ops(leaf)->init(leaf);
393 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
398 static void iam_unlock_htree(struct inode *dir, struct dynlock_handle *lh)
401 dynlock_unlock(&LDISKFS_I(dir)->i_htree_lock, lh);
405 static void iam_leaf_unlock(struct iam_leaf *leaf)
407 if (leaf->il_lock != NULL) {
408 iam_unlock_htree(iam_leaf_container(leaf)->ic_object,
411 leaf->il_lock = NULL;
415 static void iam_leaf_fini(struct iam_leaf *leaf)
417 if (leaf->il_path != NULL) {
418 iam_leaf_unlock(leaf);
419 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
420 iam_leaf_ops(leaf)->fini(leaf);
429 static void iam_leaf_start(struct iam_leaf *folio)
431 iam_leaf_ops(folio)->start(folio);
434 void iam_leaf_next(struct iam_leaf *folio)
436 iam_leaf_ops(folio)->next(folio);
439 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
440 const struct iam_rec *rec)
442 iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
445 static void iam_rec_del(struct iam_leaf *leaf, int shift)
447 iam_leaf_ops(leaf)->rec_del(leaf, shift);
450 int iam_leaf_at_end(const struct iam_leaf *leaf)
452 return iam_leaf_ops(leaf)->at_end(leaf);
455 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
457 iam_leaf_ops(l)->split(l, bh, nr);
460 int iam_leaf_can_add(const struct iam_leaf *l,
461 const struct iam_key *k, const struct iam_rec *r)
463 return iam_leaf_ops(l)->can_add(l, k, r);
466 #if LDISKFS_INVARIANT_ON
467 static int iam_leaf_check(struct iam_leaf *leaf)
471 struct iam_lentry *orig;
472 struct iam_path *path;
473 struct iam_container *bag;
480 path = iam_leaf_path(leaf);
481 bag = iam_leaf_container(leaf);
483 result = iam_leaf_ops(leaf)->init(leaf);
488 iam_leaf_start(leaf);
489 k0 = iam_path_ikey(path, 0);
490 k1 = iam_path_ikey(path, 1);
491 while (!iam_leaf_at_end(leaf)) {
492 iam_ikeycpy(bag, k0, k1);
493 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
494 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
506 static int iam_txn_dirty(handle_t *handle,
507 struct iam_path *path, struct buffer_head *bh)
511 result = ldiskfs_journal_dirty_metadata(handle, bh);
513 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
517 static int iam_txn_add(handle_t *handle,
518 struct iam_path *path, struct buffer_head *bh)
522 result = ldiskfs_journal_get_write_access(handle, bh);
524 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
528 /***********************************************************************/
529 /* iterator interface */
530 /***********************************************************************/
532 static enum iam_it_state it_state(const struct iam_iterator *it)
538 * Helper function returning scratch key.
540 static struct iam_container *iam_it_container(const struct iam_iterator *it)
542 return it->ii_path.ip_container;
545 static inline int it_keycmp(const struct iam_iterator *it,
546 const struct iam_key *k)
548 return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
551 static inline int it_keyeq(const struct iam_iterator *it,
552 const struct iam_key *k)
554 return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
557 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
559 return iam_ikeycmp(it->ii_path.ip_container,
560 iam_leaf_ikey(&it->ii_path.ip_leaf,
561 iam_path_ikey(&it->ii_path, 0)), ik);
564 static inline int it_at_rec(const struct iam_iterator *it)
566 return !iam_leaf_at_end(&it->ii_path.ip_leaf);
569 static inline int it_before(const struct iam_iterator *it)
571 return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
575 * Helper wrapper around iam_it_get(): returns 0 (success) only when record
576 * with exactly the same key as asked is found.
578 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
582 result = iam_it_get(it, k);
585 else if (result == 0)
587 * Return -ENOENT if cursor is located above record with a key
588 * different from one specified, or in the empty leaf.
590 * XXX returning -ENOENT only works if iam_it_get() never
591 * returns -ENOENT as a legitimate error.
597 void iam_container_write_lock(struct iam_container *ic)
599 cfs_down_write(&ic->ic_sem);
602 void iam_container_write_unlock(struct iam_container *ic)
604 cfs_up_write(&ic->ic_sem);
607 void iam_container_read_lock(struct iam_container *ic)
609 cfs_down_read(&ic->ic_sem);
612 void iam_container_read_unlock(struct iam_container *ic)
614 cfs_up_read(&ic->ic_sem);
618 * Initialize iterator to IAM_IT_DETACHED state.
620 * postcondition: it_state(it) == IAM_IT_DETACHED
622 int iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
623 struct iam_path_descr *pd)
625 memset(it, 0, sizeof *it);
626 it->ii_flags = flags;
627 it->ii_state = IAM_IT_DETACHED;
628 iam_path_init(&it->ii_path, c, pd);
631 EXPORT_SYMBOL(iam_it_init);
634 * Finalize iterator and release all resources.
636 * precondition: it_state(it) == IAM_IT_DETACHED
638 void iam_it_fini(struct iam_iterator *it)
640 assert_corr(it_state(it) == IAM_IT_DETACHED);
641 iam_path_fini(&it->ii_path);
643 EXPORT_SYMBOL(iam_it_fini);
646 * this locking primitives are used to protect parts
647 * of dir's htree. protection unit is block: leaf or index
649 struct dynlock_handle *iam_lock_htree(struct inode *dir, unsigned long value,
650 enum dynlock_type lt)
652 return dynlock_lock(&LDISKFS_I(dir)->i_htree_lock, value, lt, GFP_NOFS);
657 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
661 for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
663 *lh = iam_lock_htree(iam_path_obj(path), f->curidx, DLT_READ);
671 * Fast check for frame consistency.
673 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
675 struct iam_container *bag;
676 struct iam_entry *next;
677 struct iam_entry *last;
678 struct iam_entry *entries;
679 struct iam_entry *at;
681 bag = path->ip_container;
683 entries = frame->entries;
684 last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
686 if (unlikely(at > last))
689 if (unlikely(dx_get_block(path, at) != frame->leaf))
692 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
693 path->ip_ikey_target) > 0))
696 next = iam_entry_shift(path, at, +1);
698 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
699 path->ip_ikey_target) <= 0))
705 int dx_index_is_compat(struct iam_path *path)
707 return iam_path_descr(path) == NULL;
713 * search position of specified hash in index
717 struct iam_entry *iam_find_position(struct iam_path *path,
718 struct iam_frame *frame)
725 count = dx_get_count(frame->entries);
726 assert_corr(count && count <= dx_get_limit(frame->entries));
727 p = iam_entry_shift(path, frame->entries,
728 dx_index_is_compat(path) ? 1 : 2);
729 q = iam_entry_shift(path, frame->entries, count - 1);
731 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
732 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
733 path->ip_ikey_target) > 0)
734 q = iam_entry_shift(path, m, -1);
736 p = iam_entry_shift(path, m, +1);
738 return iam_entry_shift(path, p, -1);
743 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
745 return dx_get_block(path, iam_find_position(path, frame));
748 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
749 const struct iam_ikey *key, iam_ptr_t ptr)
751 struct iam_entry *entries = frame->entries;
752 struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
753 int count = dx_get_count(entries);
756 * Unfortunately we cannot assert this, as this function is sometimes
757 * called by VFS under i_sem and without pdirops lock.
759 assert_corr(1 || iam_frame_is_locked(path, frame));
760 assert_corr(count < dx_get_limit(entries));
761 assert_corr(frame->at < iam_entry_shift(path, entries, count));
762 assert_inv(dx_node_check(path, frame));
764 memmove(iam_entry_shift(path, new, 1), new,
765 (char *)iam_entry_shift(path, entries, count) - (char *)new);
766 dx_set_ikey(path, new, key);
767 dx_set_block(path, new, ptr);
768 dx_set_count(entries, count + 1);
769 assert_inv(dx_node_check(path, frame));
772 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
773 const struct iam_ikey *key, iam_ptr_t ptr)
775 iam_lock_bh(frame->bh);
776 iam_insert_key(path, frame, key, ptr);
777 iam_unlock_bh(frame->bh);
780 * returns 0 if path was unchanged, -EAGAIN otherwise.
782 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
786 iam_lock_bh(frame->bh);
787 equal = iam_check_fast(path, frame) == 0 ||
788 frame->leaf == iam_find_ptr(path, frame);
789 DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
790 iam_unlock_bh(frame->bh);
792 return equal ? 0 : -EAGAIN;
795 static int iam_lookup_try(struct iam_path *path)
801 struct iam_descr *param;
802 struct iam_frame *frame;
803 struct iam_container *c;
805 param = iam_path_descr(path);
806 c = path->ip_container;
808 ptr = param->id_ops->id_root_ptr(c);
809 for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
811 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
815 iam_lock_bh(frame->bh);
817 * node must be initialized under bh lock because concurrent
818 * creation procedure may change it and iam_lookup_try() will
819 * see obsolete tree height. -bzzz
824 if (LDISKFS_INVARIANT_ON) {
825 err = param->id_ops->id_node_check(path, frame);
830 err = param->id_ops->id_node_load(path, frame);
834 assert_inv(dx_node_check(path, frame));
836 * splitting may change root index block and move hash we're
837 * looking for into another index block so, we have to check
838 * this situation and repeat from begining if path got changed
842 err = iam_check_path(path, frame - 1);
847 frame->at = iam_find_position(path, frame);
849 frame->leaf = ptr = dx_get_block(path, frame->at);
851 iam_unlock_bh(frame->bh);
855 iam_unlock_bh(frame->bh);
856 path->ip_frame = --frame;
860 static int __iam_path_lookup(struct iam_path *path)
865 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
866 assert(path->ip_frames[i].bh == NULL);
869 err = iam_lookup_try(path);
873 } while (err == -EAGAIN);
879 * returns 0 if path was unchanged, -EAGAIN otherwise.
881 static int iam_check_full_path(struct iam_path *path, int search)
883 struct iam_frame *bottom;
884 struct iam_frame *scan;
890 for (bottom = path->ip_frames, i = 0;
891 i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
892 ; /* find last filled in frame */
896 * Lock frames, bottom to top.
898 for (scan = bottom - 1; scan >= path->ip_frames; --scan)
899 iam_lock_bh(scan->bh);
901 * Check them top to bottom.
904 for (scan = path->ip_frames; scan < bottom; ++scan) {
905 struct iam_entry *pos;
908 if (iam_check_fast(path, scan) == 0)
911 pos = iam_find_position(path, scan);
912 if (scan->leaf != dx_get_block(path, pos)) {
918 pos = iam_entry_shift(path, scan->entries,
919 dx_get_count(scan->entries) - 1);
920 if (scan->at > pos ||
921 scan->leaf != dx_get_block(path, scan->at)) {
929 * Unlock top to bottom.
931 for (scan = path->ip_frames; scan < bottom; ++scan)
932 iam_unlock_bh(scan->bh);
933 DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
941 * Performs path lookup and returns with found leaf (if any) locked by htree
944 int iam_lookup_lock(struct iam_path *path,
945 struct dynlock_handle **dl, enum dynlock_type lt)
950 dir = iam_path_obj(path);
951 while ((result = __iam_path_lookup(path)) == 0) {
953 *dl = iam_lock_htree(dir, path->ip_frame->leaf, lt);
961 * while locking leaf we just found may get split so we need
962 * to check this -bzzz
964 if (iam_check_full_path(path, 1) == 0)
966 iam_unlock_htree(dir, *dl);
973 * Performs tree top-to-bottom traversal starting from root, and loads leaf
976 static int iam_path_lookup(struct iam_path *path, int index)
978 struct iam_container *c;
979 struct iam_descr *descr;
980 struct iam_leaf *leaf;
983 c = path->ip_container;
984 leaf = &path->ip_leaf;
985 descr = iam_path_descr(path);
986 result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
987 assert_inv(iam_path_check(path));
990 result = iam_leaf_load(path);
991 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
995 result = iam_leaf_ops(leaf)->
996 ilookup(leaf, path->ip_ikey_target);
998 result = iam_leaf_ops(leaf)->
999 lookup(leaf, path->ip_key_target);
1000 do_corr(schedule());
1003 iam_leaf_unlock(leaf);
1009 * Common part of iam_it_{i,}get().
1011 static int __iam_it_get(struct iam_iterator *it, int index)
1014 assert_corr(it_state(it) == IAM_IT_DETACHED);
1016 result = iam_path_lookup(&it->ii_path, index);
1020 collision = result & IAM_LOOKUP_LAST;
1021 switch (result & ~IAM_LOOKUP_LAST) {
1022 case IAM_LOOKUP_EXACT:
1024 it->ii_state = IAM_IT_ATTACHED;
1028 it->ii_state = IAM_IT_ATTACHED;
1030 case IAM_LOOKUP_BEFORE:
1031 case IAM_LOOKUP_EMPTY:
1033 it->ii_state = IAM_IT_SKEWED;
1038 result |= collision;
1041 * See iam_it_get_exact() for explanation.
1043 assert_corr(result != -ENOENT);
1048 * Correct hash, but not the same key was found, iterate through hash
1049 * collision chain, looking for correct record.
1051 static int iam_it_collision(struct iam_iterator *it)
1055 assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1057 while ((result = iam_it_next(it)) == 0) {
1058 do_corr(schedule());
1059 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1061 if (it_keyeq(it, it->ii_path.ip_key_target))
1068 * Attach iterator. After successful completion, @it points to record with
1069 * least key not larger than @k.
1071 * Return value: 0: positioned on existing record,
1072 * +ve: exact position found,
1075 * precondition: it_state(it) == IAM_IT_DETACHED
1076 * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1077 * it_keycmp(it, k) <= 0)
1079 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1082 assert_corr(it_state(it) == IAM_IT_DETACHED);
1084 it->ii_path.ip_ikey_target = NULL;
1085 it->ii_path.ip_key_target = k;
1087 result = __iam_it_get(it, 0);
1089 if (result == IAM_LOOKUP_LAST) {
1090 result = iam_it_collision(it);
1094 result = __iam_it_get(it, 0);
1099 result &= ~IAM_LOOKUP_LAST;
1101 assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1102 assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1103 it_keycmp(it, k) <= 0));
1106 EXPORT_SYMBOL(iam_it_get);
1109 * Attach iterator by index key.
1111 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1113 assert_corr(it_state(it) == IAM_IT_DETACHED);
1115 it->ii_path.ip_ikey_target = k;
1116 return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1120 * Attach iterator, and assure it points to the record (not skewed).
1122 * Return value: 0: positioned on existing record,
1123 * +ve: exact position found,
1126 * precondition: it_state(it) == IAM_IT_DETACHED &&
1127 * !(it->ii_flags&IAM_IT_WRITE)
1128 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1130 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1133 assert_corr(it_state(it) == IAM_IT_DETACHED &&
1134 !(it->ii_flags&IAM_IT_WRITE));
1135 result = iam_it_get(it, k);
1137 if (it_state(it) != IAM_IT_ATTACHED) {
1138 assert_corr(it_state(it) == IAM_IT_SKEWED);
1139 result = iam_it_next(it);
1142 assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1145 EXPORT_SYMBOL(iam_it_get_at);
1148 * Duplicates iterator.
1150 * postcondition: it_state(dst) == it_state(src) &&
1151 * iam_it_container(dst) == iam_it_container(src) &&
1152 * dst->ii_flags = src->ii_flags &&
1153 * ergo(it_state(src) == IAM_IT_ATTACHED,
1154 * iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1155 * iam_it_key_get(dst) == iam_it_key_get(src))
1157 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1159 dst->ii_flags = src->ii_flags;
1160 dst->ii_state = src->ii_state;
1161 /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1163 * XXX: duplicate lock.
1165 assert_corr(it_state(dst) == it_state(src));
1166 assert_corr(iam_it_container(dst) == iam_it_container(src));
1167 assert_corr(dst->ii_flags = src->ii_flags);
1168 assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1169 iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1170 iam_it_key_get(dst) == iam_it_key_get(src)));
1175 * Detach iterator. Does nothing it detached state.
1177 * postcondition: it_state(it) == IAM_IT_DETACHED
1179 void iam_it_put(struct iam_iterator *it)
1181 if (it->ii_state != IAM_IT_DETACHED) {
1182 it->ii_state = IAM_IT_DETACHED;
1183 iam_leaf_fini(&it->ii_path.ip_leaf);
1186 EXPORT_SYMBOL(iam_it_put);
1188 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1189 struct iam_ikey *ikey);
1193 * This function increments the frame pointer to search the next leaf
1194 * block, and reads in the necessary intervening nodes if the search
1195 * should be necessary. Whether or not the search is necessary is
1196 * controlled by the hash parameter. If the hash value is even, then
1197 * the search is only continued if the next block starts with that
1198 * hash value. This is used if we are searching for a specific file.
1200 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1202 * This function returns 1 if the caller should continue to search,
1203 * or 0 if it should not. If there is an error reading one of the
1204 * index blocks, it will a negative error code.
1206 * If start_hash is non-null, it will be filled in with the starting
1207 * hash of the next page.
1209 static int iam_htree_advance(struct inode *dir, __u32 hash,
1210 struct iam_path *path, __u32 *start_hash,
1213 struct iam_frame *p;
1214 struct buffer_head *bh;
1215 int err, num_frames = 0;
1220 * Find the next leaf page by incrementing the frame pointer.
1221 * If we run out of entries in the interior node, loop around and
1222 * increment pointer in the parent node. When we break out of
1223 * this loop, num_frames indicates the number of interior
1224 * nodes need to be read.
1227 do_corr(schedule());
1229 p->at = iam_entry_shift(path, p->at, +1);
1230 if (p->at < iam_entry_shift(path, p->entries,
1231 dx_get_count(p->entries))) {
1232 p->leaf = dx_get_block(path, p->at);
1233 iam_unlock_bh(p->bh);
1236 iam_unlock_bh(p->bh);
1237 if (p == path->ip_frames)
1248 * If the hash is 1, then continue only if the next page has a
1249 * continuation hash of any value. This is used for readdir
1250 * handling. Otherwise, check to see if the hash matches the
1251 * desired contiuation hash. If it doesn't, return since
1252 * there's no point to read in the successive index pages.
1254 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1256 *start_hash = bhash;
1257 if ((hash & 1) == 0) {
1258 if ((bhash & ~1) != hash)
1263 * If the hash is HASH_NB_ALWAYS, we always go to the next
1264 * block so no check is necessary
1266 while (num_frames--) {
1269 do_corr(schedule());
1271 idx = p->leaf = dx_get_block(path, p->at);
1272 iam_unlock_bh(p->bh);
1273 err = iam_path_descr(path)->id_ops->
1274 id_node_read(path->ip_container, idx, NULL, &bh);
1276 return err; /* Failure */
1279 assert_corr(p->bh != bh);
1281 p->entries = dx_node_get_entries(path, p);
1282 p->at = iam_entry_shift(path, p->entries, !compat);
1283 assert_corr(p->curidx != idx);
1286 assert_corr(p->leaf != dx_get_block(path, p->at));
1287 p->leaf = dx_get_block(path, p->at);
1288 iam_unlock_bh(p->bh);
1289 assert_inv(dx_node_check(path, p));
1295 static inline int iam_index_advance(struct iam_path *path)
1297 return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1300 static void iam_unlock_array(struct inode *dir, struct dynlock_handle **lh)
1304 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1306 iam_unlock_htree(dir, *lh);
1312 * Advance index part of @path to point to the next leaf. Returns 1 on
1313 * success, 0, when end of container was reached. Leaf node is locked.
1315 int iam_index_next(struct iam_container *c, struct iam_path *path)
1318 struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1320 struct inode *object;
1323 * Locking for iam_index_next()... is to be described.
1326 object = c->ic_object;
1327 cursor = path->ip_frame->leaf;
1330 result = iam_index_lock(path, lh);
1331 do_corr(schedule());
1335 result = iam_check_full_path(path, 0);
1336 if (result == 0 && cursor == path->ip_frame->leaf) {
1337 result = iam_index_advance(path);
1339 assert_corr(result == 0 ||
1340 cursor != path->ip_frame->leaf);
1344 iam_unlock_array(object, lh);
1346 iam_path_release(path);
1347 do_corr(schedule());
1349 result = __iam_path_lookup(path);
1353 while (path->ip_frame->leaf != cursor) {
1354 do_corr(schedule());
1356 result = iam_index_lock(path, lh);
1357 do_corr(schedule());
1361 result = iam_check_full_path(path, 0);
1365 result = iam_index_advance(path);
1367 CERROR("cannot find cursor : %u\n",
1373 result = iam_check_full_path(path, 0);
1376 iam_unlock_array(object, lh);
1378 } while (result == -EAGAIN);
1382 iam_unlock_array(object, lh);
1387 * Move iterator one record right.
1389 * Return value: 0: success,
1390 * +1: end of container reached
1393 * precondition: (it_state(it) == IAM_IT_ATTACHED ||
1394 * it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1395 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1396 * ergo(result > 0, it_state(it) == IAM_IT_DETACHED)
1398 int iam_it_next(struct iam_iterator *it)
1401 struct iam_path *path;
1402 struct iam_leaf *leaf;
1404 do_corr(struct iam_ikey *ik_orig);
1406 /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1407 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1408 it_state(it) == IAM_IT_SKEWED);
1410 path = &it->ii_path;
1411 leaf = &path->ip_leaf;
1412 obj = iam_path_obj(path);
1414 assert_corr(iam_leaf_is_locked(leaf));
1417 do_corr(ik_orig = it_at_rec(it) ?
1418 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1419 if (it_before(it)) {
1420 assert_corr(!iam_leaf_at_end(leaf));
1421 it->ii_state = IAM_IT_ATTACHED;
1423 if (!iam_leaf_at_end(leaf))
1424 /* advance within leaf node */
1425 iam_leaf_next(leaf);
1427 * multiple iterations may be necessary due to empty leaves.
1429 while (result == 0 && iam_leaf_at_end(leaf)) {
1430 do_corr(schedule());
1431 /* advance index portion of the path */
1432 result = iam_index_next(iam_it_container(it), path);
1433 assert_corr(iam_leaf_is_locked(leaf));
1435 struct dynlock_handle *lh;
1436 lh = iam_lock_htree(obj, path->ip_frame->leaf,
1439 iam_leaf_fini(leaf);
1441 result = iam_leaf_load(path);
1443 iam_leaf_start(leaf);
1446 } else if (result == 0)
1447 /* end of container reached */
1453 it->ii_state = IAM_IT_ATTACHED;
1455 assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1456 assert_corr(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
1457 assert_corr(ergo(result == 0 && ik_orig != NULL,
1458 it_ikeycmp(it, ik_orig) >= 0));
1461 EXPORT_SYMBOL(iam_it_next);
1464 * Return pointer to the record under iterator.
1466 * precondition: it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1467 * postcondition: it_state(it) == IAM_IT_ATTACHED
1469 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1471 assert_corr(it_state(it) == IAM_IT_ATTACHED);
1472 assert_corr(it_at_rec(it));
1473 return iam_leaf_rec(&it->ii_path.ip_leaf);
1475 EXPORT_SYMBOL(iam_it_rec_get);
1477 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1479 struct iam_leaf *folio;
1481 folio = &it->ii_path.ip_leaf;
1482 iam_leaf_ops(folio)->rec_set(folio, r);
1486 * Replace contents of record under iterator.
1488 * precondition: it_state(it) == IAM_IT_ATTACHED &&
1489 * it->ii_flags&IAM_IT_WRITE
1490 * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1491 * ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1493 int iam_it_rec_set(handle_t *h,
1494 struct iam_iterator *it, const struct iam_rec *r)
1497 struct iam_path *path;
1498 struct buffer_head *bh;
1500 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1501 it->ii_flags&IAM_IT_WRITE);
1502 assert_corr(it_at_rec(it));
1504 path = &it->ii_path;
1505 bh = path->ip_leaf.il_bh;
1506 result = iam_txn_add(h, path, bh);
1508 iam_it_reccpy(it, r);
1509 result = iam_txn_dirty(h, path, bh);
1513 EXPORT_SYMBOL(iam_it_rec_set);
1516 * Return pointer to the index key under iterator.
1518 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1519 * it_state(it) == IAM_IT_SKEWED
1521 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1522 struct iam_ikey *ikey)
1524 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1525 it_state(it) == IAM_IT_SKEWED);
1526 assert_corr(it_at_rec(it));
1527 return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1531 * Return pointer to the key under iterator.
1533 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1534 * it_state(it) == IAM_IT_SKEWED
1536 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1538 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1539 it_state(it) == IAM_IT_SKEWED);
1540 assert_corr(it_at_rec(it));
1541 return iam_leaf_key(&it->ii_path.ip_leaf);
1543 EXPORT_SYMBOL(iam_it_key_get);
1546 * Return size of key under iterator (in bytes)
1548 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1549 * it_state(it) == IAM_IT_SKEWED
1551 int iam_it_key_size(const struct iam_iterator *it)
1553 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1554 it_state(it) == IAM_IT_SKEWED);
1555 assert_corr(it_at_rec(it));
1556 return iam_leaf_key_size(&it->ii_path.ip_leaf);
1558 EXPORT_SYMBOL(iam_it_key_size);
1561 * Insertion of new record. Interaction with jbd during non-trivial case (when
1562 * split happens) is as following:
1564 * - new leaf node is involved into transaction by ldiskfs_append();
1566 * - old leaf node is involved into transaction by iam_add_rec();
1568 * - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1570 * - leaf without insertion point is marked dirty (as @new_leaf) by
1573 * - split index nodes are involved into transaction and marked dirty by
1574 * split_index_node().
1576 * - "safe" index node, which is no split, but where new pointer is inserted
1577 * is involved into transaction and marked dirty by split_index_node().
1579 * - index node where pointer to new leaf is inserted is involved into
1580 * transaction by split_index_node() and marked dirty by iam_add_rec().
1582 * - inode is marked dirty by iam_add_rec().
1586 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1590 struct buffer_head *new_leaf;
1591 struct buffer_head *old_leaf;
1592 struct iam_container *c;
1594 struct iam_path *path;
1596 assert_inv(iam_leaf_check(leaf));
1598 c = iam_leaf_container(leaf);
1599 path = leaf->il_path;
1602 new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1603 do_corr(schedule());
1604 if (new_leaf != NULL) {
1605 struct dynlock_handle *lh;
1607 lh = iam_lock_htree(obj, blknr, DLT_WRITE);
1608 do_corr(schedule());
1610 iam_leaf_ops(leaf)->init_new(c, new_leaf);
1611 do_corr(schedule());
1612 old_leaf = leaf->il_bh;
1613 iam_leaf_split(leaf, &new_leaf, blknr);
1614 if (old_leaf != leaf->il_bh) {
1616 * Switched to the new leaf.
1618 iam_leaf_unlock(leaf);
1620 path->ip_frame->leaf = blknr;
1622 iam_unlock_htree(obj, lh);
1623 do_corr(schedule());
1624 err = iam_txn_dirty(handle, path, new_leaf);
1627 err = ldiskfs_mark_inode_dirty(handle, obj);
1628 do_corr(schedule());
1632 assert_inv(iam_leaf_check(leaf));
1633 assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1634 assert_inv(iam_path_check(iam_leaf_path(leaf)));
1638 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1640 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1643 static int iam_shift_entries(struct iam_path *path,
1644 struct iam_frame *frame, unsigned count,
1645 struct iam_entry *entries, struct iam_entry *entries2,
1652 struct iam_frame *parent = frame - 1;
1653 struct iam_ikey *pivot = iam_path_ikey(path, 3);
1655 delta = dx_index_is_compat(path) ? 0 : +1;
1657 count1 = count/2 + delta;
1658 count2 = count - count1;
1659 dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1661 dxtrace(printk("Split index %d/%d\n", count1, count2));
1663 memcpy((char *) iam_entry_shift(path, entries2, delta),
1664 (char *) iam_entry_shift(path, entries, count1),
1665 count2 * iam_entry_size(path));
1667 dx_set_count(entries2, count2 + delta);
1668 dx_set_limit(entries2, dx_node_limit(path));
1671 * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1672 * level index in root index, then we insert new index here and set
1673 * new count in that 2nd level index. so, dx_probe() may see 2nd level
1674 * index w/o hash it looks for. the solution is to check root index
1675 * after we locked just founded 2nd level index -bzzz
1677 iam_insert_key_lock(path, parent, pivot, newblock);
1680 * now old and new 2nd level index blocks contain all pointers, so
1681 * dx_probe() may find it in the both. it's OK -bzzz
1683 iam_lock_bh(frame->bh);
1684 dx_set_count(entries, count1);
1685 iam_unlock_bh(frame->bh);
1688 * now old 2nd level index block points to first half of leafs. it's
1689 * importand that dx_probe() must check root index block for changes
1690 * under dx_lock_bh(frame->bh) -bzzz
1697 int split_index_node(handle_t *handle, struct iam_path *path,
1698 struct dynlock_handle **lh)
1701 struct iam_entry *entries; /* old block contents */
1702 struct iam_entry *entries2; /* new block contents */
1703 struct iam_frame *frame, *safe;
1704 struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1705 u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1706 struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1707 struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1708 struct inode *dir = iam_path_obj(path);
1709 struct iam_descr *descr;
1713 descr = iam_path_descr(path);
1715 * Algorithm below depends on this.
1717 assert_corr(dx_root_limit(path) < dx_node_limit(path));
1719 frame = path->ip_frame;
1720 entries = frame->entries;
1723 * Tall-tree handling: we might have to split multiple index blocks
1724 * all the way up to tree root. Tricky point here is error handling:
1725 * to avoid complicated undo/rollback we
1727 * - first allocate all necessary blocks
1729 * - insert pointers into them atomically.
1733 * Locking: leaf is already locked. htree-locks are acquired on all
1734 * index nodes that require split bottom-to-top, on the "safe" node,
1735 * and on all new nodes
1738 dxtrace(printk("using %u of %u node entries\n",
1739 dx_get_count(entries), dx_get_limit(entries)));
1741 /* What levels need split? */
1742 for (nr_splet = 0; frame >= path->ip_frames &&
1743 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1744 --frame, ++nr_splet) {
1745 do_corr(schedule());
1746 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1748 CWARN(dir->i_sb, __FUNCTION__,
1749 "Directory index full!\n");
1759 * Lock all nodes, bottom to top.
1761 for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1762 do_corr(schedule());
1763 lock[i] = iam_lock_htree(dir, frame->curidx, DLT_WRITE);
1764 if (lock[i] == NULL) {
1771 * Check for concurrent index modification.
1773 err = iam_check_full_path(path, 1);
1777 * And check that the same number of nodes is to be split.
1779 for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1780 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1784 if (i != nr_splet) {
1789 /* Go back down, allocating blocks, locking them, and adding into
1791 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1792 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1793 do_corr(schedule());
1795 descr->id_ops->id_node_init(path->ip_container,
1798 new_lock[i] = iam_lock_htree(dir, newblock[i], DLT_WRITE);
1799 if (new_lock[i] == NULL) {
1803 do_corr(schedule());
1804 BUFFER_TRACE(frame->bh, "get_write_access");
1805 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1809 /* Add "safe" node to transaction too */
1810 if (safe + 1 != path->ip_frames) {
1811 do_corr(schedule());
1812 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1817 /* Go through nodes once more, inserting pointers */
1818 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1821 struct buffer_head *bh2;
1822 struct buffer_head *bh;
1824 entries = frame->entries;
1825 count = dx_get_count(entries);
1826 idx = iam_entry_diff(path, frame->at, entries);
1829 entries2 = dx_get_entries(path, bh2->b_data, 0);
1832 if (frame == path->ip_frames) {
1833 /* splitting root node. Tricky point:
1835 * In the "normal" B-tree we'd split root *and* add
1836 * new root to the tree with pointers to the old root
1837 * and its sibling (thus introducing two new nodes).
1839 * In htree it's enough to add one node, because
1840 * capacity of the root node is smaller than that of
1843 struct iam_frame *frames;
1844 struct iam_entry *next;
1846 assert_corr(i == 0);
1848 do_corr(schedule());
1850 frames = path->ip_frames;
1851 memcpy((char *) entries2, (char *) entries,
1852 count * iam_entry_size(path));
1853 dx_set_limit(entries2, dx_node_limit(path));
1856 iam_lock_bh(frame->bh);
1857 next = descr->id_ops->id_root_inc(path->ip_container,
1859 dx_set_block(path, next, newblock[0]);
1860 iam_unlock_bh(frame->bh);
1862 do_corr(schedule());
1863 /* Shift frames in the path */
1864 memmove(frames + 2, frames + 1,
1865 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1866 /* Add new access path frame */
1867 frames[1].at = iam_entry_shift(path, entries2, idx);
1868 frames[1].entries = entries = entries2;
1870 assert_inv(dx_node_check(path, frame));
1873 assert_inv(dx_node_check(path, frame));
1874 bh_new[0] = NULL; /* buffer head is "consumed" */
1875 err = ldiskfs_journal_get_write_access(handle, bh2);
1878 do_corr(schedule());
1880 /* splitting non-root index node. */
1881 struct iam_frame *parent = frame - 1;
1883 do_corr(schedule());
1884 count = iam_shift_entries(path, frame, count,
1885 entries, entries2, newblock[i]);
1886 /* Which index block gets the new entry? */
1888 int d = dx_index_is_compat(path) ? 0 : +1;
1890 frame->at = iam_entry_shift(path, entries2,
1892 frame->entries = entries = entries2;
1893 frame->curidx = newblock[i];
1894 swap(frame->bh, bh2);
1895 assert_corr(lock[i + 1] != NULL);
1896 assert_corr(new_lock[i] != NULL);
1897 swap(lock[i + 1], new_lock[i]);
1899 parent->at = iam_entry_shift(path,
1902 assert_inv(dx_node_check(path, frame));
1903 assert_inv(dx_node_check(path, parent));
1904 dxtrace(dx_show_index ("node", frame->entries));
1905 dxtrace(dx_show_index ("node",
1906 ((struct dx_node *) bh2->b_data)->entries));
1907 err = ldiskfs_journal_dirty_metadata(handle, bh2);
1910 do_corr(schedule());
1911 err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1915 do_corr(schedule());
1916 err = ldiskfs_journal_dirty_metadata(handle, bh);
1921 * This function was called to make insertion of new leaf
1922 * possible. Check that it fulfilled its obligations.
1924 assert_corr(dx_get_count(path->ip_frame->entries) <
1925 dx_get_limit(path->ip_frame->entries));
1926 assert_corr(lock[nr_splet] != NULL);
1927 *lh = lock[nr_splet];
1928 lock[nr_splet] = NULL;
1931 * Log ->i_size modification.
1933 err = ldiskfs_mark_inode_dirty(handle, dir);
1939 ldiskfs_std_error(dir->i_sb, err);
1942 iam_unlock_array(dir, lock);
1943 iam_unlock_array(dir, new_lock);
1945 assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1947 do_corr(schedule());
1948 for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1949 if (bh_new[i] != NULL)
1955 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1956 struct iam_path *path,
1957 const struct iam_key *k, const struct iam_rec *r)
1960 struct iam_leaf *leaf;
1962 leaf = &path->ip_leaf;
1963 assert_inv(iam_leaf_check(leaf));
1964 assert_inv(iam_path_check(path));
1965 err = iam_txn_add(handle, path, leaf->il_bh);
1967 do_corr(schedule());
1968 if (!iam_leaf_can_add(leaf, k, r)) {
1969 struct dynlock_handle *lh = NULL;
1972 assert_corr(lh == NULL);
1973 do_corr(schedule());
1974 err = split_index_node(handle, path, &lh);
1975 if (err == -EAGAIN) {
1976 assert_corr(lh == NULL);
1978 iam_path_fini(path);
1979 it->ii_state = IAM_IT_DETACHED;
1981 do_corr(schedule());
1982 err = iam_it_get_exact(it, k);
1984 err = +1; /* repeat split */
1989 assert_inv(iam_path_check(path));
1991 assert_corr(lh != NULL);
1992 do_corr(schedule());
1993 err = iam_new_leaf(handle, leaf);
1995 err = iam_txn_dirty(handle, path,
1996 path->ip_frame->bh);
1998 iam_unlock_htree(iam_path_obj(path), lh);
1999 do_corr(schedule());
2002 iam_leaf_rec_add(leaf, k, r);
2003 err = iam_txn_dirty(handle, path, leaf->il_bh);
2006 assert_inv(iam_leaf_check(leaf));
2007 assert_inv(iam_leaf_check(&path->ip_leaf));
2008 assert_inv(iam_path_check(path));
2013 * Insert new record with key @k and contents from @r, shifting records to the
2014 * right. On success, iterator is positioned on the newly inserted record.
2016 * precondition: it->ii_flags&IAM_IT_WRITE &&
2017 * (it_state(it) == IAM_IT_ATTACHED ||
2018 * it_state(it) == IAM_IT_SKEWED) &&
2019 * ergo(it_state(it) == IAM_IT_ATTACHED,
2020 * it_keycmp(it, k) <= 0) &&
2021 * ergo(it_before(it), it_keycmp(it, k) > 0));
2022 * postcondition: ergo(result == 0,
2023 * it_state(it) == IAM_IT_ATTACHED &&
2024 * it_keycmp(it, k) == 0 &&
2025 * !memcmp(iam_it_rec_get(it), r, ...))
2027 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2028 const struct iam_key *k, const struct iam_rec *r)
2031 struct iam_path *path;
2033 path = &it->ii_path;
2035 assert_corr(it->ii_flags&IAM_IT_WRITE);
2036 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2037 it_state(it) == IAM_IT_SKEWED);
2038 assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2039 it_keycmp(it, k) <= 0));
2040 assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2041 result = iam_add_rec(h, it, path, k, r);
2043 it->ii_state = IAM_IT_ATTACHED;
2044 assert_corr(ergo(result == 0,
2045 it_state(it) == IAM_IT_ATTACHED &&
2046 it_keycmp(it, k) == 0));
2049 EXPORT_SYMBOL(iam_it_rec_insert);
2052 * Delete record under iterator.
2054 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2055 * it->ii_flags&IAM_IT_WRITE &&
2057 * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2058 * it_state(it) == IAM_IT_DETACHED
2060 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2063 struct iam_leaf *leaf;
2064 struct iam_path *path;
2066 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2067 it->ii_flags&IAM_IT_WRITE);
2068 assert_corr(it_at_rec(it));
2070 path = &it->ii_path;
2071 leaf = &path->ip_leaf;
2073 assert_inv(iam_leaf_check(leaf));
2074 assert_inv(iam_path_check(path));
2076 result = iam_txn_add(h, path, leaf->il_bh);
2078 * no compaction for now.
2081 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2082 result = iam_txn_dirty(h, path, leaf->il_bh);
2083 if (result == 0 && iam_leaf_at_end(leaf) &&
2084 it->ii_flags&IAM_IT_MOVE) {
2085 result = iam_it_next(it);
2090 assert_inv(iam_leaf_check(leaf));
2091 assert_inv(iam_path_check(path));
2092 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2093 it_state(it) == IAM_IT_DETACHED);
2096 EXPORT_SYMBOL(iam_it_rec_delete);
2099 * Convert iterator to cookie.
2101 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2102 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2103 * postcondition: it_state(it) == IAM_IT_ATTACHED
2105 iam_pos_t iam_it_store(const struct iam_iterator *it)
2109 assert_corr(it_state(it) == IAM_IT_ATTACHED);
2110 assert_corr(it_at_rec(it));
2111 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2115 return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2117 EXPORT_SYMBOL(iam_it_store);
2120 * Restore iterator from cookie.
2122 * precondition: it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2123 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2124 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2125 * iam_it_store(it) == pos)
2127 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2129 assert_corr(it_state(it) == IAM_IT_DETACHED &&
2130 it->ii_flags&IAM_IT_MOVE);
2131 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2132 return iam_it_iget(it, (struct iam_ikey *)&pos);
2134 EXPORT_SYMBOL(iam_it_load);
2136 /***********************************************************************/
2138 /***********************************************************************/
2140 static inline int ptr_inside(void *base, size_t size, void *ptr)
2142 return (base <= ptr) && (ptr < base + size);
2145 int iam_frame_invariant(struct iam_frame *f)
2149 f->bh->b_data != NULL &&
2150 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2151 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2152 f->entries <= f->at);
2154 int iam_leaf_invariant(struct iam_leaf *l)
2158 l->il_bh->b_data != NULL &&
2159 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2160 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2161 l->il_entries <= l->il_at;
2164 int iam_path_invariant(struct iam_path *p)
2168 if (p->ip_container == NULL ||
2169 p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2170 p->ip_frame != p->ip_frames + p->ip_indirect ||
2171 !iam_leaf_invariant(&p->ip_leaf))
2173 for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2174 if (i <= p->ip_indirect) {
2175 if (!iam_frame_invariant(&p->ip_frames[i]))
2182 int iam_it_invariant(struct iam_iterator *it)
2185 (it->ii_state == IAM_IT_DETACHED ||
2186 it->ii_state == IAM_IT_ATTACHED ||
2187 it->ii_state == IAM_IT_SKEWED) &&
2188 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2189 ergo(it->ii_state == IAM_IT_ATTACHED ||
2190 it->ii_state == IAM_IT_SKEWED,
2191 iam_path_invariant(&it->ii_path) &&
2192 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2196 * Search container @c for record with key @k. If record is found, its data
2197 * are moved into @r.
2199 * Return values: 0: found, -ENOENT: not-found, -ve: error
2201 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2202 struct iam_rec *r, struct iam_path_descr *pd)
2204 struct iam_iterator it;
2207 iam_it_init(&it, c, 0, pd);
2209 result = iam_it_get_exact(&it, k);
2212 * record with required key found, copy it into user buffer
2214 iam_reccpy(&it.ii_path.ip_leaf, r);
2219 EXPORT_SYMBOL(iam_lookup);
2222 * Insert new record @r with key @k into container @c (within context of
2225 * Return values: 0: success, -ve: error, including -EEXIST when record with
2226 * given key is already present.
2228 * postcondition: ergo(result == 0 || result == -EEXIST,
2229 * iam_lookup(c, k, r2) > 0;
2231 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2232 const struct iam_rec *r, struct iam_path_descr *pd)
2234 struct iam_iterator it;
2237 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2239 result = iam_it_get_exact(&it, k);
2240 if (result == -ENOENT)
2241 result = iam_it_rec_insert(h, &it, k, r);
2242 else if (result == 0)
2248 EXPORT_SYMBOL(iam_insert);
2251 * Update record with the key @k in container @c (within context of
2252 * transaction @h), new record is given by @r.
2254 * Return values: 0: success, -ve: error, including -ENOENT if no record with
2255 * the given key found.
2257 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2258 const struct iam_rec *r, struct iam_path_descr *pd)
2260 struct iam_iterator it;
2263 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2265 result = iam_it_get_exact(&it, k);
2267 iam_it_rec_set(h, &it, r);
2272 EXPORT_SYMBOL(iam_update);
2275 * Delete existing record with key @k.
2277 * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2279 * postcondition: ergo(result == 0 || result == -ENOENT,
2280 * !iam_lookup(c, k, *));
2282 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2283 struct iam_path_descr *pd)
2285 struct iam_iterator it;
2288 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2290 result = iam_it_get_exact(&it, k);
2292 iam_it_rec_delete(h, &it);
2297 EXPORT_SYMBOL(iam_delete);
2299 int iam_root_limit(int rootgap, int blocksize, int size)
2304 limit = (blocksize - rootgap) / size;
2305 nlimit = blocksize / size;
2306 if (limit == nlimit)