4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see [sun.com URL with a
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 * Top-level entry points into iam module
39 * Author: Wang Di <wangdi@clusterfs.com>
40 * Author: Nikita Danilov <nikita@clusterfs.com>
44 * iam: big theory statement.
46 * iam (Index Access Module) is a module providing abstraction of persistent
47 * transactional container on top of generalized ldiskfs htree.
51 * - key, pointer, and record size specifiable per container.
53 * - trees taller than 2 index levels.
55 * - read/write to existing ldiskfs htree directories as iam containers.
57 * iam container is a tree, consisting of leaf nodes containing keys and
58 * records stored in this container, and index nodes, containing keys and
59 * pointers to leaf or index nodes.
61 * iam does not work with keys directly, instead it calls user-supplied key
62 * comparison function (->dpo_keycmp()).
64 * Pointers are (currently) interpreted as logical offsets (measured in
65 * blocksful) within underlying flat file on top of which iam tree lives.
69 * iam mostly tries to reuse existing htree formats.
71 * Format of index node:
73 * +-----+-------+-------+-------+------+-------+------------+
74 * | | count | | | | | |
75 * | gap | / | entry | entry | .... | entry | free space |
76 * | | limit | | | | | |
77 * +-----+-------+-------+-------+------+-------+------------+
79 * gap this part of node is never accessed by iam code. It
80 * exists for binary compatibility with ldiskfs htree (that,
81 * in turn, stores fake struct ext2_dirent for ext2
82 * compatibility), and to keep some unspecified per-node
83 * data. Gap can be different for root and non-root index
84 * nodes. Gap size can be specified for each container
85 * (gap of 0 is allowed).
87 * count/limit current number of entries in this node, and the maximal
88 * number of entries that can fit into node. count/limit
89 * has the same size as entry, and is itself counted in
92 * entry index entry: consists of a key immediately followed by
93 * a pointer to a child node. Size of a key and size of a
94 * pointer depends on container. Entry has neither
95 * alignment nor padding.
97 * free space portion of node new entries are added to
99 * Entries in index node are sorted by their key value.
101 * Format of a leaf node is not specified. Generic iam code accesses leaf
102 * nodes through ->id_leaf methods in struct iam_descr.
104 * The IAM root block is a special node, which contains the IAM descriptor.
105 * It is on disk format:
107 * +---------+-------+--------+---------+-------+------+-------+------------+
108 * |IAM desc | count | idle | | | | | |
109 * |(fix/var)| / | blocks | padding | entry | .... | entry | free space |
110 * | | limit | | | | | | |
111 * +---------+-------+--------+---------+-------+------+-------+------------+
113 * The padding length is calculated with the parameters in the IAM descriptor.
115 * The field "idle_blocks" is used to record empty leaf nodes, which have not
116 * been released but all contained entries in them have been removed. Usually,
117 * the idle blocks in the IAM should be reused when need to allocate new leaf
118 * nodes for new entries, it depends on the IAM hash functions to map the new
119 * entries to these idle blocks. Unfortunately, it is not easy to design some
120 * hash functions for such clever mapping, especially considering the insert/
121 * lookup performance.
123 * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124 * idle blocks pool. If need some new leaf node, it will try to take idle block
125 * from such pool with priority, in spite of how the IAM hash functions to map
128 * The idle blocks pool is organized as a series of tables, and each table
129 * can be described as following (on-disk format):
131 * +---------+---------+---------+---------+------+---------+-------+
132 * | magic | count | next | logic | | logic | free |
133 * |(16 bits)|(16 bits)| table | blk # | .... | blk # | space |
134 * | | |(32 bits)|(32 bits)| |(32 bits)| |
135 * +---------+---------+---------+---------+------+---------+-------+
137 * The logic blk# for the first table is stored in the root node "idle_blocks".
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150 #include "osd_internal.h"
156 * List of all registered formats.
158 * No locking. Callers synchronize.
160 static CFS_LIST_HEAD(iam_formats);
162 void iam_format_register(struct iam_format *fmt)
164 cfs_list_add(&fmt->if_linkage, &iam_formats);
166 EXPORT_SYMBOL(iam_format_register);
168 static struct buffer_head *
169 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
171 struct inode *inode = c->ic_object;
172 struct iam_idle_head *head;
173 struct buffer_head *bh;
176 LASSERT_SEM_LOCKED(&c->ic_idle_sem);
181 bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
183 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
184 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
185 c->ic_idle_failed = 1;
189 head = (struct iam_idle_head *)(bh->b_data);
190 if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
191 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
192 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
193 le16_to_cpu(head->iih_magic));
195 c->ic_idle_failed = 1;
196 return ERR_PTR(-EBADF);
203 * Determine format of given container. This is done by scanning list of
204 * registered formats and calling ->if_guess() method of each in turn.
206 static int iam_format_guess(struct iam_container *c)
209 struct iam_format *fmt;
212 * XXX temporary initialization hook.
215 static int initialized = 0;
218 iam_lvar_format_init();
219 iam_lfix_format_init();
225 cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
226 result = fmt->if_guess(c);
232 struct buffer_head *bh;
235 LASSERT(c->ic_root_bh != NULL);
237 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
238 c->ic_descr->id_root_gap +
239 sizeof(struct dx_countlimit));
240 down(&c->ic_idle_sem);
241 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
242 if (bh != NULL && IS_ERR(bh))
243 result = PTR_ERR(bh);
253 * Initialize container @c.
255 int iam_container_init(struct iam_container *c,
256 struct iam_descr *descr, struct inode *inode)
258 memset(c, 0, sizeof *c);
260 c->ic_object = inode;
261 init_rwsem(&c->ic_sem);
262 dynlock_init(&c->ic_tree_lock);
263 sema_init(&c->ic_idle_sem, 1);
266 EXPORT_SYMBOL(iam_container_init);
269 * Determine container format.
271 int iam_container_setup(struct iam_container *c)
273 return iam_format_guess(c);
275 EXPORT_SYMBOL(iam_container_setup);
278 * Finalize container @c, release all resources.
280 void iam_container_fini(struct iam_container *c)
282 brelse(c->ic_idle_bh);
283 c->ic_idle_bh = NULL;
284 brelse(c->ic_root_bh);
285 c->ic_root_bh = NULL;
287 EXPORT_SYMBOL(iam_container_fini);
289 void iam_path_init(struct iam_path *path, struct iam_container *c,
290 struct iam_path_descr *pd)
292 memset(path, 0, sizeof *path);
293 path->ip_container = c;
294 path->ip_frame = path->ip_frames;
296 path->ip_leaf.il_path = path;
299 static void iam_leaf_fini(struct iam_leaf *leaf);
301 void iam_path_release(struct iam_path *path)
305 for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
306 if (path->ip_frames[i].bh != NULL) {
307 path->ip_frames[i].at_shifted = 0;
308 brelse(path->ip_frames[i].bh);
309 path->ip_frames[i].bh = NULL;
314 void iam_path_fini(struct iam_path *path)
316 iam_leaf_fini(&path->ip_leaf);
317 iam_path_release(path);
321 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
325 path->ipc_hinfo = &path->ipc_hinfo_area;
326 for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
327 path->ipc_descr.ipd_key_scratch[i] =
328 (struct iam_ikey *)&path->ipc_scratch[i];
330 iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
333 void iam_path_compat_fini(struct iam_path_compat *path)
335 iam_path_fini(&path->ipc_path);
339 * Helper function initializing iam_path_descr and its key scratch area.
341 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
343 struct iam_path_descr *ipd;
349 for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
350 ipd->ipd_key_scratch[i] = karea;
353 EXPORT_SYMBOL(iam_ipd_alloc);
355 void iam_ipd_free(struct iam_path_descr *ipd)
358 EXPORT_SYMBOL(iam_ipd_free);
360 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
361 handle_t *h, struct buffer_head **bh)
365 /* NB: it can be called by iam_lfix_guess() which is still at
366 * very early stage, c->ic_root_bh and c->ic_descr->id_ops
367 * haven't been intialized yet.
368 * Also, we don't have this for IAM dir.
370 if (c->ic_root_bh != NULL &&
371 c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
372 get_bh(c->ic_root_bh);
377 *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
384 * Return pointer to current leaf record. Pointer is valid while corresponding
385 * leaf node is locked and pinned.
387 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
389 return iam_leaf_ops(leaf)->rec(leaf);
393 * Return pointer to the current leaf key. This function returns pointer to
394 * the key stored in node.
396 * Caller should assume that returned pointer is only valid while leaf node is
399 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
401 return iam_leaf_ops(leaf)->key(leaf);
404 static int iam_leaf_key_size(const struct iam_leaf *leaf)
406 return iam_leaf_ops(leaf)->key_size(leaf);
409 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
410 struct iam_ikey *key)
412 return iam_leaf_ops(leaf)->ikey(leaf, key);
415 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
416 const struct iam_key *key)
418 return iam_leaf_ops(leaf)->key_cmp(leaf, key);
421 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
422 const struct iam_key *key)
424 return iam_leaf_ops(leaf)->key_eq(leaf, key);
427 #if LDISKFS_INVARIANT_ON
428 static int iam_leaf_check(struct iam_leaf *leaf);
429 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
431 static int iam_path_check(struct iam_path *p)
436 struct iam_descr *param;
439 param = iam_path_descr(p);
440 for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
441 f = &p->ip_frames[i];
443 result = dx_node_check(p, f);
445 result = !param->id_ops->id_node_check(p, f);
448 if (result && p->ip_leaf.il_bh != NULL)
449 result = iam_leaf_check(&p->ip_leaf);
451 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
457 static int iam_leaf_load(struct iam_path *path)
461 struct iam_container *c;
462 struct buffer_head *bh;
463 struct iam_leaf *leaf;
464 struct iam_descr *descr;
466 c = path->ip_container;
467 leaf = &path->ip_leaf;
468 descr = iam_path_descr(path);
469 block = path->ip_frame->leaf;
472 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
473 (long unsigned)path->ip_frame->leaf,
474 dx_get_count(dx_node_get_entries(path, path->ip_frame)),
475 path->ip_frames[0].bh, path->ip_frames[1].bh,
476 path->ip_frames[2].bh);
478 err = descr->id_ops->id_node_read(c, block, NULL, &bh);
481 leaf->il_curidx = block;
482 err = iam_leaf_ops(leaf)->init(leaf);
483 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
488 static void iam_unlock_htree(struct iam_container *ic,
489 struct dynlock_handle *lh)
492 dynlock_unlock(&ic->ic_tree_lock, lh);
496 static void iam_leaf_unlock(struct iam_leaf *leaf)
498 if (leaf->il_lock != NULL) {
499 iam_unlock_htree(iam_leaf_container(leaf),
502 leaf->il_lock = NULL;
506 static void iam_leaf_fini(struct iam_leaf *leaf)
508 if (leaf->il_path != NULL) {
509 iam_leaf_unlock(leaf);
510 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
511 iam_leaf_ops(leaf)->fini(leaf);
520 static void iam_leaf_start(struct iam_leaf *folio)
522 iam_leaf_ops(folio)->start(folio);
525 void iam_leaf_next(struct iam_leaf *folio)
527 iam_leaf_ops(folio)->next(folio);
530 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
531 const struct iam_rec *rec)
533 iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
536 static void iam_rec_del(struct iam_leaf *leaf, int shift)
538 iam_leaf_ops(leaf)->rec_del(leaf, shift);
541 int iam_leaf_at_end(const struct iam_leaf *leaf)
543 return iam_leaf_ops(leaf)->at_end(leaf);
546 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
548 iam_leaf_ops(l)->split(l, bh, nr);
551 static inline int iam_leaf_empty(struct iam_leaf *l)
553 return iam_leaf_ops(l)->leaf_empty(l);
556 int iam_leaf_can_add(const struct iam_leaf *l,
557 const struct iam_key *k, const struct iam_rec *r)
559 return iam_leaf_ops(l)->can_add(l, k, r);
562 #if LDISKFS_INVARIANT_ON
563 static int iam_leaf_check(struct iam_leaf *leaf)
567 struct iam_lentry *orig;
568 struct iam_path *path;
569 struct iam_container *bag;
576 path = iam_leaf_path(leaf);
577 bag = iam_leaf_container(leaf);
579 result = iam_leaf_ops(leaf)->init(leaf);
584 iam_leaf_start(leaf);
585 k0 = iam_path_ikey(path, 0);
586 k1 = iam_path_ikey(path, 1);
587 while (!iam_leaf_at_end(leaf)) {
588 iam_ikeycpy(bag, k0, k1);
589 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
590 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
602 static int iam_txn_dirty(handle_t *handle,
603 struct iam_path *path, struct buffer_head *bh)
607 result = ldiskfs_journal_dirty_metadata(handle, bh);
609 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
613 static int iam_txn_add(handle_t *handle,
614 struct iam_path *path, struct buffer_head *bh)
618 result = ldiskfs_journal_get_write_access(handle, bh);
620 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
624 /***********************************************************************/
625 /* iterator interface */
626 /***********************************************************************/
628 static enum iam_it_state it_state(const struct iam_iterator *it)
634 * Helper function returning scratch key.
636 static struct iam_container *iam_it_container(const struct iam_iterator *it)
638 return it->ii_path.ip_container;
641 static inline int it_keycmp(const struct iam_iterator *it,
642 const struct iam_key *k)
644 return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
647 static inline int it_keyeq(const struct iam_iterator *it,
648 const struct iam_key *k)
650 return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
653 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
655 return iam_ikeycmp(it->ii_path.ip_container,
656 iam_leaf_ikey(&it->ii_path.ip_leaf,
657 iam_path_ikey(&it->ii_path, 0)), ik);
660 static inline int it_at_rec(const struct iam_iterator *it)
662 return !iam_leaf_at_end(&it->ii_path.ip_leaf);
665 static inline int it_before(const struct iam_iterator *it)
667 return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
671 * Helper wrapper around iam_it_get(): returns 0 (success) only when record
672 * with exactly the same key as asked is found.
674 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
678 result = iam_it_get(it, k);
681 else if (result == 0)
683 * Return -ENOENT if cursor is located above record with a key
684 * different from one specified, or in the empty leaf.
686 * XXX returning -ENOENT only works if iam_it_get() never
687 * returns -ENOENT as a legitimate error.
693 void iam_container_write_lock(struct iam_container *ic)
695 down_write(&ic->ic_sem);
698 void iam_container_write_unlock(struct iam_container *ic)
700 up_write(&ic->ic_sem);
703 void iam_container_read_lock(struct iam_container *ic)
705 down_read(&ic->ic_sem);
708 void iam_container_read_unlock(struct iam_container *ic)
710 up_read(&ic->ic_sem);
714 * Initialize iterator to IAM_IT_DETACHED state.
716 * postcondition: it_state(it) == IAM_IT_DETACHED
718 int iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
719 struct iam_path_descr *pd)
721 memset(it, 0, sizeof *it);
722 it->ii_flags = flags;
723 it->ii_state = IAM_IT_DETACHED;
724 iam_path_init(&it->ii_path, c, pd);
727 EXPORT_SYMBOL(iam_it_init);
730 * Finalize iterator and release all resources.
732 * precondition: it_state(it) == IAM_IT_DETACHED
734 void iam_it_fini(struct iam_iterator *it)
736 assert_corr(it_state(it) == IAM_IT_DETACHED);
737 iam_path_fini(&it->ii_path);
739 EXPORT_SYMBOL(iam_it_fini);
742 * this locking primitives are used to protect parts
743 * of dir's htree. protection unit is block: leaf or index
745 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
747 enum dynlock_type lt)
749 return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
752 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
756 for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
758 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
766 * Fast check for frame consistency.
768 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
770 struct iam_container *bag;
771 struct iam_entry *next;
772 struct iam_entry *last;
773 struct iam_entry *entries;
774 struct iam_entry *at;
776 bag = path->ip_container;
778 entries = frame->entries;
779 last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
781 if (unlikely(at > last))
784 if (unlikely(dx_get_block(path, at) != frame->leaf))
787 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
788 path->ip_ikey_target) > 0))
791 next = iam_entry_shift(path, at, +1);
793 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
794 path->ip_ikey_target) <= 0))
800 int dx_index_is_compat(struct iam_path *path)
802 return iam_path_descr(path) == NULL;
808 * search position of specified hash in index
812 struct iam_entry *iam_find_position(struct iam_path *path,
813 struct iam_frame *frame)
820 count = dx_get_count(frame->entries);
821 assert_corr(count && count <= dx_get_limit(frame->entries));
822 p = iam_entry_shift(path, frame->entries,
823 dx_index_is_compat(path) ? 1 : 2);
824 q = iam_entry_shift(path, frame->entries, count - 1);
826 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
827 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
828 path->ip_ikey_target) > 0)
829 q = iam_entry_shift(path, m, -1);
831 p = iam_entry_shift(path, m, +1);
833 return iam_entry_shift(path, p, -1);
838 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
840 return dx_get_block(path, iam_find_position(path, frame));
843 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
844 const struct iam_ikey *key, iam_ptr_t ptr)
846 struct iam_entry *entries = frame->entries;
847 struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
848 int count = dx_get_count(entries);
851 * Unfortunately we cannot assert this, as this function is sometimes
852 * called by VFS under i_sem and without pdirops lock.
854 assert_corr(1 || iam_frame_is_locked(path, frame));
855 assert_corr(count < dx_get_limit(entries));
856 assert_corr(frame->at < iam_entry_shift(path, entries, count));
857 assert_inv(dx_node_check(path, frame));
859 memmove(iam_entry_shift(path, new, 1), new,
860 (char *)iam_entry_shift(path, entries, count) - (char *)new);
861 dx_set_ikey(path, new, key);
862 dx_set_block(path, new, ptr);
863 dx_set_count(entries, count + 1);
864 assert_inv(dx_node_check(path, frame));
867 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
868 const struct iam_ikey *key, iam_ptr_t ptr)
870 iam_lock_bh(frame->bh);
871 iam_insert_key(path, frame, key, ptr);
872 iam_unlock_bh(frame->bh);
875 * returns 0 if path was unchanged, -EAGAIN otherwise.
877 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
881 iam_lock_bh(frame->bh);
882 equal = iam_check_fast(path, frame) == 0 ||
883 frame->leaf == iam_find_ptr(path, frame);
884 DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
885 iam_unlock_bh(frame->bh);
887 return equal ? 0 : -EAGAIN;
890 static int iam_lookup_try(struct iam_path *path)
896 struct iam_descr *param;
897 struct iam_frame *frame;
898 struct iam_container *c;
900 param = iam_path_descr(path);
901 c = path->ip_container;
903 ptr = param->id_ops->id_root_ptr(c);
904 for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
906 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
910 iam_lock_bh(frame->bh);
912 * node must be initialized under bh lock because concurrent
913 * creation procedure may change it and iam_lookup_try() will
914 * see obsolete tree height. -bzzz
919 if (LDISKFS_INVARIANT_ON) {
920 err = param->id_ops->id_node_check(path, frame);
925 err = param->id_ops->id_node_load(path, frame);
929 assert_inv(dx_node_check(path, frame));
931 * splitting may change root index block and move hash we're
932 * looking for into another index block so, we have to check
933 * this situation and repeat from begining if path got changed
937 err = iam_check_path(path, frame - 1);
942 frame->at = iam_find_position(path, frame);
944 frame->leaf = ptr = dx_get_block(path, frame->at);
946 iam_unlock_bh(frame->bh);
950 iam_unlock_bh(frame->bh);
951 path->ip_frame = --frame;
955 static int __iam_path_lookup(struct iam_path *path)
960 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
961 assert(path->ip_frames[i].bh == NULL);
964 err = iam_lookup_try(path);
968 } while (err == -EAGAIN);
974 * returns 0 if path was unchanged, -EAGAIN otherwise.
976 static int iam_check_full_path(struct iam_path *path, int search)
978 struct iam_frame *bottom;
979 struct iam_frame *scan;
985 for (bottom = path->ip_frames, i = 0;
986 i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
987 ; /* find last filled in frame */
991 * Lock frames, bottom to top.
993 for (scan = bottom - 1; scan >= path->ip_frames; --scan)
994 iam_lock_bh(scan->bh);
996 * Check them top to bottom.
999 for (scan = path->ip_frames; scan < bottom; ++scan) {
1000 struct iam_entry *pos;
1003 if (iam_check_fast(path, scan) == 0)
1006 pos = iam_find_position(path, scan);
1007 if (scan->leaf != dx_get_block(path, pos)) {
1013 pos = iam_entry_shift(path, scan->entries,
1014 dx_get_count(scan->entries) - 1);
1015 if (scan->at > pos ||
1016 scan->leaf != dx_get_block(path, scan->at)) {
1024 * Unlock top to bottom.
1026 for (scan = path->ip_frames; scan < bottom; ++scan)
1027 iam_unlock_bh(scan->bh);
1028 DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1029 do_corr(schedule());
1036 * Performs path lookup and returns with found leaf (if any) locked by htree
1039 int iam_lookup_lock(struct iam_path *path,
1040 struct dynlock_handle **dl, enum dynlock_type lt)
1045 dir = iam_path_obj(path);
1046 while ((result = __iam_path_lookup(path)) == 0) {
1047 do_corr(schedule());
1048 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1051 iam_path_fini(path);
1055 do_corr(schedule());
1057 * while locking leaf we just found may get split so we need
1058 * to check this -bzzz
1060 if (iam_check_full_path(path, 1) == 0)
1062 iam_unlock_htree(path->ip_container, *dl);
1064 iam_path_fini(path);
1069 * Performs tree top-to-bottom traversal starting from root, and loads leaf
1072 static int iam_path_lookup(struct iam_path *path, int index)
1074 struct iam_container *c;
1075 struct iam_descr *descr;
1076 struct iam_leaf *leaf;
1079 c = path->ip_container;
1080 leaf = &path->ip_leaf;
1081 descr = iam_path_descr(path);
1082 result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1083 assert_inv(iam_path_check(path));
1084 do_corr(schedule());
1086 result = iam_leaf_load(path);
1087 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1089 do_corr(schedule());
1091 result = iam_leaf_ops(leaf)->
1092 ilookup(leaf, path->ip_ikey_target);
1094 result = iam_leaf_ops(leaf)->
1095 lookup(leaf, path->ip_key_target);
1096 do_corr(schedule());
1099 iam_leaf_unlock(leaf);
1105 * Common part of iam_it_{i,}get().
1107 static int __iam_it_get(struct iam_iterator *it, int index)
1110 assert_corr(it_state(it) == IAM_IT_DETACHED);
1112 result = iam_path_lookup(&it->ii_path, index);
1116 collision = result & IAM_LOOKUP_LAST;
1117 switch (result & ~IAM_LOOKUP_LAST) {
1118 case IAM_LOOKUP_EXACT:
1120 it->ii_state = IAM_IT_ATTACHED;
1124 it->ii_state = IAM_IT_ATTACHED;
1126 case IAM_LOOKUP_BEFORE:
1127 case IAM_LOOKUP_EMPTY:
1129 it->ii_state = IAM_IT_SKEWED;
1134 result |= collision;
1137 * See iam_it_get_exact() for explanation.
1139 assert_corr(result != -ENOENT);
1144 * Correct hash, but not the same key was found, iterate through hash
1145 * collision chain, looking for correct record.
1147 static int iam_it_collision(struct iam_iterator *it)
1151 assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1153 while ((result = iam_it_next(it)) == 0) {
1154 do_corr(schedule());
1155 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1157 if (it_keyeq(it, it->ii_path.ip_key_target))
1164 * Attach iterator. After successful completion, @it points to record with
1165 * least key not larger than @k.
1167 * Return value: 0: positioned on existing record,
1168 * +ve: exact position found,
1171 * precondition: it_state(it) == IAM_IT_DETACHED
1172 * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1173 * it_keycmp(it, k) <= 0)
1175 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1178 assert_corr(it_state(it) == IAM_IT_DETACHED);
1180 it->ii_path.ip_ikey_target = NULL;
1181 it->ii_path.ip_key_target = k;
1183 result = __iam_it_get(it, 0);
1185 if (result == IAM_LOOKUP_LAST) {
1186 result = iam_it_collision(it);
1190 result = __iam_it_get(it, 0);
1195 result &= ~IAM_LOOKUP_LAST;
1197 assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1198 assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1199 it_keycmp(it, k) <= 0));
1202 EXPORT_SYMBOL(iam_it_get);
1205 * Attach iterator by index key.
1207 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1209 assert_corr(it_state(it) == IAM_IT_DETACHED);
1211 it->ii_path.ip_ikey_target = k;
1212 return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1216 * Attach iterator, and assure it points to the record (not skewed).
1218 * Return value: 0: positioned on existing record,
1219 * +ve: exact position found,
1222 * precondition: it_state(it) == IAM_IT_DETACHED &&
1223 * !(it->ii_flags&IAM_IT_WRITE)
1224 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1226 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1229 assert_corr(it_state(it) == IAM_IT_DETACHED &&
1230 !(it->ii_flags&IAM_IT_WRITE));
1231 result = iam_it_get(it, k);
1233 if (it_state(it) != IAM_IT_ATTACHED) {
1234 assert_corr(it_state(it) == IAM_IT_SKEWED);
1235 result = iam_it_next(it);
1238 assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1241 EXPORT_SYMBOL(iam_it_get_at);
1244 * Duplicates iterator.
1246 * postcondition: it_state(dst) == it_state(src) &&
1247 * iam_it_container(dst) == iam_it_container(src) &&
1248 * dst->ii_flags = src->ii_flags &&
1249 * ergo(it_state(src) == IAM_IT_ATTACHED,
1250 * iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1251 * iam_it_key_get(dst) == iam_it_key_get(src))
1253 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1255 dst->ii_flags = src->ii_flags;
1256 dst->ii_state = src->ii_state;
1257 /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1259 * XXX: duplicate lock.
1261 assert_corr(it_state(dst) == it_state(src));
1262 assert_corr(iam_it_container(dst) == iam_it_container(src));
1263 assert_corr(dst->ii_flags = src->ii_flags);
1264 assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1265 iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1266 iam_it_key_get(dst) == iam_it_key_get(src)));
1271 * Detach iterator. Does nothing it detached state.
1273 * postcondition: it_state(it) == IAM_IT_DETACHED
1275 void iam_it_put(struct iam_iterator *it)
1277 if (it->ii_state != IAM_IT_DETACHED) {
1278 it->ii_state = IAM_IT_DETACHED;
1279 iam_leaf_fini(&it->ii_path.ip_leaf);
1282 EXPORT_SYMBOL(iam_it_put);
1284 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1285 struct iam_ikey *ikey);
1289 * This function increments the frame pointer to search the next leaf
1290 * block, and reads in the necessary intervening nodes if the search
1291 * should be necessary. Whether or not the search is necessary is
1292 * controlled by the hash parameter. If the hash value is even, then
1293 * the search is only continued if the next block starts with that
1294 * hash value. This is used if we are searching for a specific file.
1296 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1298 * This function returns 1 if the caller should continue to search,
1299 * or 0 if it should not. If there is an error reading one of the
1300 * index blocks, it will a negative error code.
1302 * If start_hash is non-null, it will be filled in with the starting
1303 * hash of the next page.
1305 static int iam_htree_advance(struct inode *dir, __u32 hash,
1306 struct iam_path *path, __u32 *start_hash,
1309 struct iam_frame *p;
1310 struct buffer_head *bh;
1311 int err, num_frames = 0;
1316 * Find the next leaf page by incrementing the frame pointer.
1317 * If we run out of entries in the interior node, loop around and
1318 * increment pointer in the parent node. When we break out of
1319 * this loop, num_frames indicates the number of interior
1320 * nodes need to be read.
1323 do_corr(schedule());
1328 p->at = iam_entry_shift(path, p->at, +1);
1329 if (p->at < iam_entry_shift(path, p->entries,
1330 dx_get_count(p->entries))) {
1331 p->leaf = dx_get_block(path, p->at);
1332 iam_unlock_bh(p->bh);
1335 iam_unlock_bh(p->bh);
1336 if (p == path->ip_frames)
1347 * If the hash is 1, then continue only if the next page has a
1348 * continuation hash of any value. This is used for readdir
1349 * handling. Otherwise, check to see if the hash matches the
1350 * desired contiuation hash. If it doesn't, return since
1351 * there's no point to read in the successive index pages.
1353 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1355 *start_hash = bhash;
1356 if ((hash & 1) == 0) {
1357 if ((bhash & ~1) != hash)
1362 * If the hash is HASH_NB_ALWAYS, we always go to the next
1363 * block so no check is necessary
1365 while (num_frames--) {
1368 do_corr(schedule());
1370 idx = p->leaf = dx_get_block(path, p->at);
1371 iam_unlock_bh(p->bh);
1372 err = iam_path_descr(path)->id_ops->
1373 id_node_read(path->ip_container, idx, NULL, &bh);
1375 return err; /* Failure */
1378 assert_corr(p->bh != bh);
1380 p->entries = dx_node_get_entries(path, p);
1381 p->at = iam_entry_shift(path, p->entries, !compat);
1382 assert_corr(p->curidx != idx);
1385 assert_corr(p->leaf != dx_get_block(path, p->at));
1386 p->leaf = dx_get_block(path, p->at);
1387 iam_unlock_bh(p->bh);
1388 assert_inv(dx_node_check(path, p));
1394 static inline int iam_index_advance(struct iam_path *path)
1396 return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1399 static void iam_unlock_array(struct iam_container *ic,
1400 struct dynlock_handle **lh)
1404 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1406 iam_unlock_htree(ic, *lh);
1412 * Advance index part of @path to point to the next leaf. Returns 1 on
1413 * success, 0, when end of container was reached. Leaf node is locked.
1415 int iam_index_next(struct iam_container *c, struct iam_path *path)
1418 struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1420 struct inode *object;
1423 * Locking for iam_index_next()... is to be described.
1426 object = c->ic_object;
1427 cursor = path->ip_frame->leaf;
1430 result = iam_index_lock(path, lh);
1431 do_corr(schedule());
1435 result = iam_check_full_path(path, 0);
1436 if (result == 0 && cursor == path->ip_frame->leaf) {
1437 result = iam_index_advance(path);
1439 assert_corr(result == 0 ||
1440 cursor != path->ip_frame->leaf);
1444 iam_unlock_array(c, lh);
1446 iam_path_release(path);
1447 do_corr(schedule());
1449 result = __iam_path_lookup(path);
1453 while (path->ip_frame->leaf != cursor) {
1454 do_corr(schedule());
1456 result = iam_index_lock(path, lh);
1457 do_corr(schedule());
1461 result = iam_check_full_path(path, 0);
1465 result = iam_index_advance(path);
1467 CERROR("cannot find cursor : %u\n",
1473 result = iam_check_full_path(path, 0);
1476 iam_unlock_array(c, lh);
1478 } while (result == -EAGAIN);
1482 iam_unlock_array(c, lh);
1487 * Move iterator one record right.
1489 * Return value: 0: success,
1490 * +1: end of container reached
1493 * precondition: (it_state(it) == IAM_IT_ATTACHED ||
1494 * it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1495 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1496 * ergo(result > 0, it_state(it) == IAM_IT_DETACHED)
1498 int iam_it_next(struct iam_iterator *it)
1501 struct iam_path *path;
1502 struct iam_leaf *leaf;
1504 do_corr(struct iam_ikey *ik_orig);
1506 /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1507 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1508 it_state(it) == IAM_IT_SKEWED);
1510 path = &it->ii_path;
1511 leaf = &path->ip_leaf;
1512 obj = iam_path_obj(path);
1514 assert_corr(iam_leaf_is_locked(leaf));
1517 do_corr(ik_orig = it_at_rec(it) ?
1518 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1519 if (it_before(it)) {
1520 assert_corr(!iam_leaf_at_end(leaf));
1521 it->ii_state = IAM_IT_ATTACHED;
1523 if (!iam_leaf_at_end(leaf))
1524 /* advance within leaf node */
1525 iam_leaf_next(leaf);
1527 * multiple iterations may be necessary due to empty leaves.
1529 while (result == 0 && iam_leaf_at_end(leaf)) {
1530 do_corr(schedule());
1531 /* advance index portion of the path */
1532 result = iam_index_next(iam_it_container(it), path);
1533 assert_corr(iam_leaf_is_locked(leaf));
1535 struct dynlock_handle *lh;
1536 lh = iam_lock_htree(iam_it_container(it),
1537 path->ip_frame->leaf,
1540 iam_leaf_fini(leaf);
1542 result = iam_leaf_load(path);
1544 iam_leaf_start(leaf);
1547 } else if (result == 0)
1548 /* end of container reached */
1554 it->ii_state = IAM_IT_ATTACHED;
1556 assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1557 assert_corr(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
1558 assert_corr(ergo(result == 0 && ik_orig != NULL,
1559 it_ikeycmp(it, ik_orig) >= 0));
1562 EXPORT_SYMBOL(iam_it_next);
1565 * Return pointer to the record under iterator.
1567 * precondition: it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1568 * postcondition: it_state(it) == IAM_IT_ATTACHED
1570 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1572 assert_corr(it_state(it) == IAM_IT_ATTACHED);
1573 assert_corr(it_at_rec(it));
1574 return iam_leaf_rec(&it->ii_path.ip_leaf);
1576 EXPORT_SYMBOL(iam_it_rec_get);
1578 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1580 struct iam_leaf *folio;
1582 folio = &it->ii_path.ip_leaf;
1583 iam_leaf_ops(folio)->rec_set(folio, r);
1587 * Replace contents of record under iterator.
1589 * precondition: it_state(it) == IAM_IT_ATTACHED &&
1590 * it->ii_flags&IAM_IT_WRITE
1591 * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1592 * ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1594 int iam_it_rec_set(handle_t *h,
1595 struct iam_iterator *it, const struct iam_rec *r)
1598 struct iam_path *path;
1599 struct buffer_head *bh;
1601 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1602 it->ii_flags&IAM_IT_WRITE);
1603 assert_corr(it_at_rec(it));
1605 path = &it->ii_path;
1606 bh = path->ip_leaf.il_bh;
1607 result = iam_txn_add(h, path, bh);
1609 iam_it_reccpy(it, r);
1610 result = iam_txn_dirty(h, path, bh);
1614 EXPORT_SYMBOL(iam_it_rec_set);
1617 * Return pointer to the index key under iterator.
1619 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1620 * it_state(it) == IAM_IT_SKEWED
1622 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1623 struct iam_ikey *ikey)
1625 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1626 it_state(it) == IAM_IT_SKEWED);
1627 assert_corr(it_at_rec(it));
1628 return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1632 * Return pointer to the key under iterator.
1634 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1635 * it_state(it) == IAM_IT_SKEWED
1637 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1639 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1640 it_state(it) == IAM_IT_SKEWED);
1641 assert_corr(it_at_rec(it));
1642 return iam_leaf_key(&it->ii_path.ip_leaf);
1644 EXPORT_SYMBOL(iam_it_key_get);
1647 * Return size of key under iterator (in bytes)
1649 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1650 * it_state(it) == IAM_IT_SKEWED
1652 int iam_it_key_size(const struct iam_iterator *it)
1654 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1655 it_state(it) == IAM_IT_SKEWED);
1656 assert_corr(it_at_rec(it));
1657 return iam_leaf_key_size(&it->ii_path.ip_leaf);
1659 EXPORT_SYMBOL(iam_it_key_size);
1661 struct buffer_head *
1662 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1664 struct inode *inode = c->ic_object;
1665 struct buffer_head *bh = NULL;
1666 struct iam_idle_head *head;
1667 struct buffer_head *idle;
1671 if (c->ic_idle_bh == NULL)
1674 down(&c->ic_idle_sem);
1675 if (unlikely(c->ic_idle_bh == NULL)) {
1676 up(&c->ic_idle_sem);
1680 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1681 count = le16_to_cpu(head->iih_count);
1683 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1688 *b = le32_to_cpu(head->iih_blks[count]);
1689 head->iih_count = cpu_to_le16(count);
1690 *e = ldiskfs_journal_dirty_metadata(h, c->ic_idle_bh);
1694 up(&c->ic_idle_sem);
1695 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1701 /* The block itself which contains the iam_idle_head is
1702 * also an idle block, and can be used as the new node. */
1703 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1704 c->ic_descr->id_root_gap +
1705 sizeof(struct dx_countlimit));
1706 *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1710 *b = le32_to_cpu(*idle_blocks);
1711 iam_lock_bh(c->ic_root_bh);
1712 *idle_blocks = head->iih_next;
1713 iam_unlock_bh(c->ic_root_bh);
1714 *e = ldiskfs_journal_dirty_metadata(h, c->ic_root_bh);
1716 iam_lock_bh(c->ic_root_bh);
1717 *idle_blocks = cpu_to_le32(*b);
1718 iam_unlock_bh(c->ic_root_bh);
1723 idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1724 if (idle != NULL && IS_ERR(idle)) {
1726 c->ic_idle_bh = NULL;
1731 c->ic_idle_bh = idle;
1732 up(&c->ic_idle_sem);
1735 /* get write access for the found buffer head */
1736 *e = ldiskfs_journal_get_write_access(h, bh);
1740 ldiskfs_std_error(inode->i_sb, *e);
1742 /* Clear the reused node as new node does. */
1743 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1744 set_buffer_uptodate(bh);
1749 bh = ldiskfs_append(h, inode, b, e);
1753 up(&c->ic_idle_sem);
1754 ldiskfs_std_error(inode->i_sb, *e);
1759 * Insertion of new record. Interaction with jbd during non-trivial case (when
1760 * split happens) is as following:
1762 * - new leaf node is involved into transaction by iam_new_node();
1764 * - old leaf node is involved into transaction by iam_add_rec();
1766 * - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1768 * - leaf without insertion point is marked dirty (as @new_leaf) by
1771 * - split index nodes are involved into transaction and marked dirty by
1772 * split_index_node().
1774 * - "safe" index node, which is no split, but where new pointer is inserted
1775 * is involved into transaction and marked dirty by split_index_node().
1777 * - index node where pointer to new leaf is inserted is involved into
1778 * transaction by split_index_node() and marked dirty by iam_add_rec().
1780 * - inode is marked dirty by iam_add_rec().
1784 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1788 struct buffer_head *new_leaf;
1789 struct buffer_head *old_leaf;
1790 struct iam_container *c;
1792 struct iam_path *path;
1794 assert_inv(iam_leaf_check(leaf));
1796 c = iam_leaf_container(leaf);
1797 path = leaf->il_path;
1800 new_leaf = iam_new_node(handle, c, &blknr, &err);
1801 do_corr(schedule());
1802 if (new_leaf != NULL) {
1803 struct dynlock_handle *lh;
1805 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1806 do_corr(schedule());
1808 iam_leaf_ops(leaf)->init_new(c, new_leaf);
1809 do_corr(schedule());
1810 old_leaf = leaf->il_bh;
1811 iam_leaf_split(leaf, &new_leaf, blknr);
1812 if (old_leaf != leaf->il_bh) {
1814 * Switched to the new leaf.
1816 iam_leaf_unlock(leaf);
1818 path->ip_frame->leaf = blknr;
1820 iam_unlock_htree(path->ip_container, lh);
1821 do_corr(schedule());
1822 err = iam_txn_dirty(handle, path, new_leaf);
1825 err = ldiskfs_mark_inode_dirty(handle, obj);
1826 do_corr(schedule());
1830 assert_inv(iam_leaf_check(leaf));
1831 assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1832 assert_inv(iam_path_check(iam_leaf_path(leaf)));
1836 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1838 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1841 static int iam_shift_entries(struct iam_path *path,
1842 struct iam_frame *frame, unsigned count,
1843 struct iam_entry *entries, struct iam_entry *entries2,
1850 struct iam_frame *parent = frame - 1;
1851 struct iam_ikey *pivot = iam_path_ikey(path, 3);
1853 delta = dx_index_is_compat(path) ? 0 : +1;
1855 count1 = count/2 + delta;
1856 count2 = count - count1;
1857 dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1859 dxtrace(printk("Split index %d/%d\n", count1, count2));
1861 memcpy((char *) iam_entry_shift(path, entries2, delta),
1862 (char *) iam_entry_shift(path, entries, count1),
1863 count2 * iam_entry_size(path));
1865 dx_set_count(entries2, count2 + delta);
1866 dx_set_limit(entries2, dx_node_limit(path));
1869 * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1870 * level index in root index, then we insert new index here and set
1871 * new count in that 2nd level index. so, dx_probe() may see 2nd level
1872 * index w/o hash it looks for. the solution is to check root index
1873 * after we locked just founded 2nd level index -bzzz
1875 iam_insert_key_lock(path, parent, pivot, newblock);
1878 * now old and new 2nd level index blocks contain all pointers, so
1879 * dx_probe() may find it in the both. it's OK -bzzz
1881 iam_lock_bh(frame->bh);
1882 dx_set_count(entries, count1);
1883 iam_unlock_bh(frame->bh);
1886 * now old 2nd level index block points to first half of leafs. it's
1887 * importand that dx_probe() must check root index block for changes
1888 * under dx_lock_bh(frame->bh) -bzzz
1895 int split_index_node(handle_t *handle, struct iam_path *path,
1896 struct dynlock_handle **lh)
1899 struct iam_entry *entries; /* old block contents */
1900 struct iam_entry *entries2; /* new block contents */
1901 struct iam_frame *frame, *safe;
1902 struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1903 u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1904 struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1905 struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1906 struct inode *dir = iam_path_obj(path);
1907 struct iam_descr *descr;
1911 descr = iam_path_descr(path);
1913 * Algorithm below depends on this.
1915 assert_corr(dx_root_limit(path) < dx_node_limit(path));
1917 frame = path->ip_frame;
1918 entries = frame->entries;
1921 * Tall-tree handling: we might have to split multiple index blocks
1922 * all the way up to tree root. Tricky point here is error handling:
1923 * to avoid complicated undo/rollback we
1925 * - first allocate all necessary blocks
1927 * - insert pointers into them atomically.
1931 * Locking: leaf is already locked. htree-locks are acquired on all
1932 * index nodes that require split bottom-to-top, on the "safe" node,
1933 * and on all new nodes
1936 dxtrace(printk("using %u of %u node entries\n",
1937 dx_get_count(entries), dx_get_limit(entries)));
1939 /* What levels need split? */
1940 for (nr_splet = 0; frame >= path->ip_frames &&
1941 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1942 --frame, ++nr_splet) {
1943 do_corr(schedule());
1944 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1946 CWARN(dir->i_sb, __FUNCTION__,
1947 "Directory index full!\n");
1957 * Lock all nodes, bottom to top.
1959 for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1960 do_corr(schedule());
1961 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1963 if (lock[i] == NULL) {
1970 * Check for concurrent index modification.
1972 err = iam_check_full_path(path, 1);
1976 * And check that the same number of nodes is to be split.
1978 for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1979 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1983 if (i != nr_splet) {
1988 /* Go back down, allocating blocks, locking them, and adding into
1990 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1991 bh_new[i] = iam_new_node(handle, path->ip_container,
1992 &newblock[i], &err);
1993 do_corr(schedule());
1995 descr->id_ops->id_node_init(path->ip_container,
1998 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
2000 if (new_lock[i] == NULL) {
2004 do_corr(schedule());
2005 BUFFER_TRACE(frame->bh, "get_write_access");
2006 err = ldiskfs_journal_get_write_access(handle, frame->bh);
2010 /* Add "safe" node to transaction too */
2011 if (safe + 1 != path->ip_frames) {
2012 do_corr(schedule());
2013 err = ldiskfs_journal_get_write_access(handle, safe->bh);
2018 /* Go through nodes once more, inserting pointers */
2019 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
2022 struct buffer_head *bh2;
2023 struct buffer_head *bh;
2025 entries = frame->entries;
2026 count = dx_get_count(entries);
2027 idx = iam_entry_diff(path, frame->at, entries);
2030 entries2 = dx_get_entries(path, bh2->b_data, 0);
2033 if (frame == path->ip_frames) {
2034 /* splitting root node. Tricky point:
2036 * In the "normal" B-tree we'd split root *and* add
2037 * new root to the tree with pointers to the old root
2038 * and its sibling (thus introducing two new nodes).
2040 * In htree it's enough to add one node, because
2041 * capacity of the root node is smaller than that of
2044 struct iam_frame *frames;
2045 struct iam_entry *next;
2047 assert_corr(i == 0);
2049 do_corr(schedule());
2051 frames = path->ip_frames;
2052 memcpy((char *) entries2, (char *) entries,
2053 count * iam_entry_size(path));
2054 dx_set_limit(entries2, dx_node_limit(path));
2057 iam_lock_bh(frame->bh);
2058 next = descr->id_ops->id_root_inc(path->ip_container,
2060 dx_set_block(path, next, newblock[0]);
2061 iam_unlock_bh(frame->bh);
2063 do_corr(schedule());
2064 /* Shift frames in the path */
2065 memmove(frames + 2, frames + 1,
2066 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2067 /* Add new access path frame */
2068 frames[1].at = iam_entry_shift(path, entries2, idx);
2069 frames[1].entries = entries = entries2;
2071 assert_inv(dx_node_check(path, frame));
2074 assert_inv(dx_node_check(path, frame));
2075 bh_new[0] = NULL; /* buffer head is "consumed" */
2076 err = ldiskfs_journal_dirty_metadata(handle, bh2);
2079 do_corr(schedule());
2081 /* splitting non-root index node. */
2082 struct iam_frame *parent = frame - 1;
2084 do_corr(schedule());
2085 count = iam_shift_entries(path, frame, count,
2086 entries, entries2, newblock[i]);
2087 /* Which index block gets the new entry? */
2089 int d = dx_index_is_compat(path) ? 0 : +1;
2091 frame->at = iam_entry_shift(path, entries2,
2093 frame->entries = entries = entries2;
2094 frame->curidx = newblock[i];
2095 swap(frame->bh, bh2);
2096 assert_corr(lock[i + 1] != NULL);
2097 assert_corr(new_lock[i] != NULL);
2098 swap(lock[i + 1], new_lock[i]);
2100 parent->at = iam_entry_shift(path,
2103 assert_inv(dx_node_check(path, frame));
2104 assert_inv(dx_node_check(path, parent));
2105 dxtrace(dx_show_index ("node", frame->entries));
2106 dxtrace(dx_show_index ("node",
2107 ((struct dx_node *) bh2->b_data)->entries));
2108 err = ldiskfs_journal_dirty_metadata(handle, bh2);
2111 do_corr(schedule());
2112 err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
2116 do_corr(schedule());
2117 err = ldiskfs_journal_dirty_metadata(handle, bh);
2122 * This function was called to make insertion of new leaf
2123 * possible. Check that it fulfilled its obligations.
2125 assert_corr(dx_get_count(path->ip_frame->entries) <
2126 dx_get_limit(path->ip_frame->entries));
2127 assert_corr(lock[nr_splet] != NULL);
2128 *lh = lock[nr_splet];
2129 lock[nr_splet] = NULL;
2132 * Log ->i_size modification.
2134 err = ldiskfs_mark_inode_dirty(handle, dir);
2140 ldiskfs_std_error(dir->i_sb, err);
2143 iam_unlock_array(path->ip_container, lock);
2144 iam_unlock_array(path->ip_container, new_lock);
2146 assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2148 do_corr(schedule());
2149 for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2150 if (bh_new[i] != NULL)
2156 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2157 struct iam_path *path,
2158 const struct iam_key *k, const struct iam_rec *r)
2161 struct iam_leaf *leaf;
2163 leaf = &path->ip_leaf;
2164 assert_inv(iam_leaf_check(leaf));
2165 assert_inv(iam_path_check(path));
2166 err = iam_txn_add(handle, path, leaf->il_bh);
2168 do_corr(schedule());
2169 if (!iam_leaf_can_add(leaf, k, r)) {
2170 struct dynlock_handle *lh = NULL;
2173 assert_corr(lh == NULL);
2174 do_corr(schedule());
2175 err = split_index_node(handle, path, &lh);
2176 if (err == -EAGAIN) {
2177 assert_corr(lh == NULL);
2179 iam_path_fini(path);
2180 it->ii_state = IAM_IT_DETACHED;
2182 do_corr(schedule());
2183 err = iam_it_get_exact(it, k);
2185 err = +1; /* repeat split */
2190 assert_inv(iam_path_check(path));
2192 assert_corr(lh != NULL);
2193 do_corr(schedule());
2194 err = iam_new_leaf(handle, leaf);
2196 err = iam_txn_dirty(handle, path,
2197 path->ip_frame->bh);
2199 iam_unlock_htree(path->ip_container, lh);
2200 do_corr(schedule());
2203 iam_leaf_rec_add(leaf, k, r);
2204 err = iam_txn_dirty(handle, path, leaf->il_bh);
2207 assert_inv(iam_leaf_check(leaf));
2208 assert_inv(iam_leaf_check(&path->ip_leaf));
2209 assert_inv(iam_path_check(path));
2214 * Insert new record with key @k and contents from @r, shifting records to the
2215 * right. On success, iterator is positioned on the newly inserted record.
2217 * precondition: it->ii_flags&IAM_IT_WRITE &&
2218 * (it_state(it) == IAM_IT_ATTACHED ||
2219 * it_state(it) == IAM_IT_SKEWED) &&
2220 * ergo(it_state(it) == IAM_IT_ATTACHED,
2221 * it_keycmp(it, k) <= 0) &&
2222 * ergo(it_before(it), it_keycmp(it, k) > 0));
2223 * postcondition: ergo(result == 0,
2224 * it_state(it) == IAM_IT_ATTACHED &&
2225 * it_keycmp(it, k) == 0 &&
2226 * !memcmp(iam_it_rec_get(it), r, ...))
2228 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2229 const struct iam_key *k, const struct iam_rec *r)
2232 struct iam_path *path;
2234 path = &it->ii_path;
2236 assert_corr(it->ii_flags&IAM_IT_WRITE);
2237 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2238 it_state(it) == IAM_IT_SKEWED);
2239 assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2240 it_keycmp(it, k) <= 0));
2241 assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2242 result = iam_add_rec(h, it, path, k, r);
2244 it->ii_state = IAM_IT_ATTACHED;
2245 assert_corr(ergo(result == 0,
2246 it_state(it) == IAM_IT_ATTACHED &&
2247 it_keycmp(it, k) == 0));
2250 EXPORT_SYMBOL(iam_it_rec_insert);
2252 static inline int iam_idle_blocks_limit(struct inode *inode)
2254 return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2258 * If the leaf cannnot be recycled, we will lose one block for reusing.
2259 * It is not a serious issue because it almost the same of non-recycle.
2261 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2262 struct iam_leaf *l, struct buffer_head **bh)
2264 struct iam_container *c = p->ip_container;
2265 struct inode *inode = c->ic_object;
2266 struct iam_frame *frame = p->ip_frame;
2267 struct iam_entry *entries;
2268 struct iam_entry *pos;
2269 struct dynlock_handle *lh;
2273 if (c->ic_idle_failed)
2276 if (unlikely(frame == NULL))
2279 if (!iam_leaf_empty(l))
2282 lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2284 CWARN("%.16s: No memory to recycle idle blocks\n",
2285 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2289 rc = iam_txn_add(h, p, frame->bh);
2291 iam_unlock_htree(c, lh);
2295 iam_lock_bh(frame->bh);
2296 entries = frame->entries;
2297 count = dx_get_count(entries);
2298 /* NOT shrink the last entry in the index node, which can be reused
2299 * directly by next new node. */
2301 iam_unlock_bh(frame->bh);
2302 iam_unlock_htree(c, lh);
2306 pos = iam_find_position(p, frame);
2307 /* There may be some new leaf nodes have been added or empty leaf nodes
2308 * have been shrinked during my delete operation.
2310 * If the empty leaf is not under current index node because the index
2311 * node has been split, then just skip the empty leaf, which is rare. */
2312 if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2313 iam_unlock_bh(frame->bh);
2314 iam_unlock_htree(c, lh);
2319 if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2320 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2322 memmove(frame->at, n,
2323 (char *)iam_entry_shift(p, entries, count) - (char *)n);
2324 frame->at_shifted = 1;
2326 dx_set_count(entries, count - 1);
2327 iam_unlock_bh(frame->bh);
2328 rc = iam_txn_dirty(h, p, frame->bh);
2329 iam_unlock_htree(c, lh);
2339 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2340 __u32 *idle_blocks, iam_ptr_t blk)
2342 struct iam_container *c = p->ip_container;
2343 struct buffer_head *old = c->ic_idle_bh;
2344 struct iam_idle_head *head;
2347 head = (struct iam_idle_head *)(bh->b_data);
2348 head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2349 head->iih_count = 0;
2350 head->iih_next = *idle_blocks;
2351 /* The bh already get_write_accessed. */
2352 rc = iam_txn_dirty(h, p, bh);
2356 rc = iam_txn_add(h, p, c->ic_root_bh);
2360 iam_lock_bh(c->ic_root_bh);
2361 *idle_blocks = cpu_to_le32(blk);
2362 iam_unlock_bh(c->ic_root_bh);
2363 rc = iam_txn_dirty(h, p, c->ic_root_bh);
2365 /* NOT release old before new assigned. */
2370 iam_lock_bh(c->ic_root_bh);
2371 *idle_blocks = head->iih_next;
2372 iam_unlock_bh(c->ic_root_bh);
2378 * If the leaf cannnot be recycled, we will lose one block for reusing.
2379 * It is not a serious issue because it almost the same of non-recycle.
2381 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2382 struct buffer_head *bh, iam_ptr_t blk)
2384 struct iam_container *c = p->ip_container;
2385 struct inode *inode = c->ic_object;
2386 struct iam_idle_head *head;
2391 down(&c->ic_idle_sem);
2392 if (unlikely(c->ic_idle_failed)) {
2397 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2398 c->ic_descr->id_root_gap +
2399 sizeof(struct dx_countlimit));
2400 /* It is the first idle block. */
2401 if (c->ic_idle_bh == NULL) {
2402 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2406 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2407 count = le16_to_cpu(head->iih_count);
2408 /* Current ic_idle_bh is full, to be replaced by the leaf. */
2409 if (count == iam_idle_blocks_limit(inode)) {
2410 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2414 /* Just add to ic_idle_bh. */
2415 rc = iam_txn_add(h, p, c->ic_idle_bh);
2419 head->iih_blks[count] = cpu_to_le32(blk);
2420 head->iih_count = cpu_to_le16(count + 1);
2421 rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2424 up(&c->ic_idle_sem);
2426 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2427 LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2431 * Delete record under iterator.
2433 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2434 * it->ii_flags&IAM_IT_WRITE &&
2436 * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2437 * it_state(it) == IAM_IT_DETACHED
2439 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2442 struct iam_leaf *leaf;
2443 struct iam_path *path;
2445 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2446 it->ii_flags&IAM_IT_WRITE);
2447 assert_corr(it_at_rec(it));
2449 path = &it->ii_path;
2450 leaf = &path->ip_leaf;
2452 assert_inv(iam_leaf_check(leaf));
2453 assert_inv(iam_path_check(path));
2455 result = iam_txn_add(h, path, leaf->il_bh);
2457 * no compaction for now.
2460 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2461 result = iam_txn_dirty(h, path, leaf->il_bh);
2462 if (result == 0 && iam_leaf_at_end(leaf)) {
2463 struct buffer_head *bh = NULL;
2466 blk = iam_index_shrink(h, path, leaf, &bh);
2467 if (it->ii_flags & IAM_IT_MOVE) {
2468 result = iam_it_next(it);
2474 iam_recycle_leaf(h, path, bh, blk);
2479 assert_inv(iam_leaf_check(leaf));
2480 assert_inv(iam_path_check(path));
2481 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2482 it_state(it) == IAM_IT_DETACHED);
2485 EXPORT_SYMBOL(iam_it_rec_delete);
2488 * Convert iterator to cookie.
2490 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2491 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2492 * postcondition: it_state(it) == IAM_IT_ATTACHED
2494 iam_pos_t iam_it_store(const struct iam_iterator *it)
2498 assert_corr(it_state(it) == IAM_IT_ATTACHED);
2499 assert_corr(it_at_rec(it));
2500 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2504 return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2506 EXPORT_SYMBOL(iam_it_store);
2509 * Restore iterator from cookie.
2511 * precondition: it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2512 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2513 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2514 * iam_it_store(it) == pos)
2516 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2518 assert_corr(it_state(it) == IAM_IT_DETACHED &&
2519 it->ii_flags&IAM_IT_MOVE);
2520 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2521 return iam_it_iget(it, (struct iam_ikey *)&pos);
2523 EXPORT_SYMBOL(iam_it_load);
2525 /***********************************************************************/
2527 /***********************************************************************/
2529 static inline int ptr_inside(void *base, size_t size, void *ptr)
2531 return (base <= ptr) && (ptr < base + size);
2534 int iam_frame_invariant(struct iam_frame *f)
2538 f->bh->b_data != NULL &&
2539 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2540 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2541 f->entries <= f->at);
2543 int iam_leaf_invariant(struct iam_leaf *l)
2547 l->il_bh->b_data != NULL &&
2548 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2549 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2550 l->il_entries <= l->il_at;
2553 int iam_path_invariant(struct iam_path *p)
2557 if (p->ip_container == NULL ||
2558 p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2559 p->ip_frame != p->ip_frames + p->ip_indirect ||
2560 !iam_leaf_invariant(&p->ip_leaf))
2562 for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2563 if (i <= p->ip_indirect) {
2564 if (!iam_frame_invariant(&p->ip_frames[i]))
2571 int iam_it_invariant(struct iam_iterator *it)
2574 (it->ii_state == IAM_IT_DETACHED ||
2575 it->ii_state == IAM_IT_ATTACHED ||
2576 it->ii_state == IAM_IT_SKEWED) &&
2577 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2578 ergo(it->ii_state == IAM_IT_ATTACHED ||
2579 it->ii_state == IAM_IT_SKEWED,
2580 iam_path_invariant(&it->ii_path) &&
2581 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2585 * Search container @c for record with key @k. If record is found, its data
2586 * are moved into @r.
2588 * Return values: 0: found, -ENOENT: not-found, -ve: error
2590 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2591 struct iam_rec *r, struct iam_path_descr *pd)
2593 struct iam_iterator it;
2596 iam_it_init(&it, c, 0, pd);
2598 result = iam_it_get_exact(&it, k);
2601 * record with required key found, copy it into user buffer
2603 iam_reccpy(&it.ii_path.ip_leaf, r);
2608 EXPORT_SYMBOL(iam_lookup);
2611 * Insert new record @r with key @k into container @c (within context of
2614 * Return values: 0: success, -ve: error, including -EEXIST when record with
2615 * given key is already present.
2617 * postcondition: ergo(result == 0 || result == -EEXIST,
2618 * iam_lookup(c, k, r2) > 0;
2620 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2621 const struct iam_rec *r, struct iam_path_descr *pd)
2623 struct iam_iterator it;
2626 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2628 result = iam_it_get_exact(&it, k);
2629 if (result == -ENOENT)
2630 result = iam_it_rec_insert(h, &it, k, r);
2631 else if (result == 0)
2637 EXPORT_SYMBOL(iam_insert);
2640 * Update record with the key @k in container @c (within context of
2641 * transaction @h), new record is given by @r.
2643 * Return values: +1: skip because of the same rec value, 0: success,
2644 * -ve: error, including -ENOENT if no record with the given key found.
2646 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2647 const struct iam_rec *r, struct iam_path_descr *pd)
2649 struct iam_iterator it;
2650 struct iam_leaf *folio;
2653 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2655 result = iam_it_get_exact(&it, k);
2657 folio = &it.ii_path.ip_leaf;
2658 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2660 iam_it_rec_set(h, &it, r);
2668 EXPORT_SYMBOL(iam_update);
2671 * Delete existing record with key @k.
2673 * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2675 * postcondition: ergo(result == 0 || result == -ENOENT,
2676 * !iam_lookup(c, k, *));
2678 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2679 struct iam_path_descr *pd)
2681 struct iam_iterator it;
2684 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2686 result = iam_it_get_exact(&it, k);
2688 iam_it_rec_delete(h, &it);
2693 EXPORT_SYMBOL(iam_delete);
2695 int iam_root_limit(int rootgap, int blocksize, int size)
2700 limit = (blocksize - rootgap) / size;
2701 nlimit = blocksize / size;
2702 if (limit == nlimit)