4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 * Top-level entry points into iam module
34 * Author: Wang Di <wangdi@clusterfs.com>
35 * Author: Nikita Danilov <nikita@clusterfs.com>
39 * iam: big theory statement.
41 * iam (Index Access Module) is a module providing abstraction of persistent
42 * transactional container on top of generalized ldiskfs htree.
46 * - key, pointer, and record size specifiable per container.
48 * - trees taller than 2 index levels.
50 * - read/write to existing ldiskfs htree directories as iam containers.
52 * iam container is a tree, consisting of leaf nodes containing keys and
53 * records stored in this container, and index nodes, containing keys and
54 * pointers to leaf or index nodes.
56 * iam does not work with keys directly, instead it calls user-supplied key
57 * comparison function (->dpo_keycmp()).
59 * Pointers are (currently) interpreted as logical offsets (measured in
60 * blocksful) within underlying flat file on top of which iam tree lives.
64 * iam mostly tries to reuse existing htree formats.
66 * Format of index node:
68 * +-----+-------+-------+-------+------+-------+------------+
69 * | | count | | | | | |
70 * | gap | / | entry | entry | .... | entry | free space |
71 * | | limit | | | | | |
72 * +-----+-------+-------+-------+------+-------+------------+
74 * gap this part of node is never accessed by iam code. It
75 * exists for binary compatibility with ldiskfs htree (that,
76 * in turn, stores fake struct ext2_dirent for ext2
77 * compatibility), and to keep some unspecified per-node
78 * data. Gap can be different for root and non-root index
79 * nodes. Gap size can be specified for each container
80 * (gap of 0 is allowed).
82 * count/limit current number of entries in this node, and the maximal
83 * number of entries that can fit into node. count/limit
84 * has the same size as entry, and is itself counted in
87 * entry index entry: consists of a key immediately followed by
88 * a pointer to a child node. Size of a key and size of a
89 * pointer depends on container. Entry has neither
90 * alignment nor padding.
92 * free space portion of node new entries are added to
94 * Entries in index node are sorted by their key value.
96 * Format of a leaf node is not specified. Generic iam code accesses leaf
97 * nodes through ->id_leaf methods in struct iam_descr.
99 * The IAM root block is a special node, which contains the IAM descriptor.
100 * It is on disk format:
102 * +---------+-------+--------+---------+-------+------+-------+------------+
103 * |IAM desc | count | idle | | | | | |
104 * |(fix/var)| / | blocks | padding | entry | .... | entry | free space |
105 * | | limit | | | | | | |
106 * +---------+-------+--------+---------+-------+------+-------+------------+
108 * The padding length is calculated with the parameters in the IAM descriptor.
110 * The field "idle_blocks" is used to record empty leaf nodes, which have not
111 * been released but all contained entries in them have been removed. Usually,
112 * the idle blocks in the IAM should be reused when need to allocate new leaf
113 * nodes for new entries, it depends on the IAM hash functions to map the new
114 * entries to these idle blocks. Unfortunately, it is not easy to design some
115 * hash functions for such clever mapping, especially considering the insert/
116 * lookup performance.
118 * So the IAM recycles the empty leaf nodes, and put them into a per-file based
119 * idle blocks pool. If need some new leaf node, it will try to take idle block
120 * from such pool with priority, in spite of how the IAM hash functions to map
123 * The idle blocks pool is organized as a series of tables, and each table
124 * can be described as following (on-disk format):
126 * +---------+---------+---------+---------+------+---------+-------+
127 * | magic | count | next | logic | | logic | free |
128 * |(16 bits)|(16 bits)| table | blk # | .... | blk # | space |
129 * | | |(32 bits)|(32 bits)| |(32 bits)| |
130 * +---------+---------+---------+---------+------+---------+-------+
132 * The logic blk# for the first table is stored in the root node "idle_blocks".
136 #include <linux/module.h>
137 #include <linux/fs.h>
138 #include <linux/pagemap.h>
139 #include <linux/time.h>
140 #include <linux/fcntl.h>
141 #include <linux/stat.h>
142 #include <linux/string.h>
143 #include <linux/quotaops.h>
144 #include <linux/buffer_head.h>
146 #include <ldiskfs/ldiskfs.h>
147 #include <ldiskfs/xattr.h>
150 #include "osd_internal.h"
152 #include <ldiskfs/acl.h>
154 static struct buffer_head *
155 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
157 struct inode *inode = c->ic_object;
158 struct iam_idle_head *head;
159 struct buffer_head *bh;
161 LASSERT(mutex_is_locked(&c->ic_idle_mutex));
166 bh = __ldiskfs_bread(NULL, inode, blk, 0);
167 if (IS_ERR_OR_NULL(bh)) {
168 CERROR("%s: cannot load idle blocks, blk = %u: rc = %ld\n",
169 osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
170 c->ic_idle_failed = 1;
176 head = (struct iam_idle_head *)(bh->b_data);
177 if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
180 CERROR("%s: invalid idle block head, blk = %u, magic = %x: rc = %d\n",
181 osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic),
184 c->ic_idle_failed = 1;
192 * Determine format of given container. This is done by scanning list of
193 * registered formats and calling ->if_guess() method of each in turn.
195 static int iam_format_guess(struct iam_container *c)
199 result = iam_lvar_guess(c);
201 result = iam_lfix_guess(c);
204 struct buffer_head *bh;
207 LASSERT(c->ic_root_bh != NULL);
209 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
210 c->ic_descr->id_root_gap +
211 sizeof(struct dx_countlimit));
212 mutex_lock(&c->ic_idle_mutex);
213 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
214 if (bh != NULL && IS_ERR(bh))
215 result = PTR_ERR(bh);
218 mutex_unlock(&c->ic_idle_mutex);
225 * Initialize container @c.
227 int iam_container_init(struct iam_container *c,
228 struct iam_descr *descr, struct inode *inode)
230 memset(c, 0, sizeof(*c));
232 c->ic_object = inode;
233 dynlock_init(&c->ic_tree_lock);
234 mutex_init(&c->ic_idle_mutex);
239 * Determine container format.
241 int iam_container_setup(struct iam_container *c)
243 return iam_format_guess(c);
247 * Finalize container @c, release all resources.
249 void iam_container_fini(struct iam_container *c)
251 brelse(c->ic_idle_bh);
252 c->ic_idle_bh = NULL;
253 brelse(c->ic_root_bh);
254 c->ic_root_bh = NULL;
257 void iam_path_init(struct iam_path *path, struct iam_container *c,
258 struct iam_path_descr *pd)
260 memset(path, 0, sizeof(*path));
261 path->ip_container = c;
262 path->ip_frame = path->ip_frames;
264 path->ip_leaf.il_path = path;
267 static void iam_leaf_fini(struct iam_leaf *leaf);
269 void iam_path_release(struct iam_path *path)
273 for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
274 if (path->ip_frames[i].bh != NULL) {
275 path->ip_frames[i].at_shifted = 0;
276 brelse(path->ip_frames[i].bh);
277 path->ip_frames[i].bh = NULL;
282 void iam_path_fini(struct iam_path *path)
284 iam_leaf_fini(&path->ip_leaf);
285 iam_path_release(path);
289 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
293 path->ipc_hinfo = &path->ipc_hinfo_area;
294 for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
295 path->ipc_descr.ipd_key_scratch[i] =
296 (struct iam_ikey *)&path->ipc_scratch[i];
298 iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
301 void iam_path_compat_fini(struct iam_path_compat *path)
303 iam_path_fini(&path->ipc_path);
307 * Helper function initializing iam_path_descr and its key scratch area.
309 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
311 struct iam_path_descr *ipd;
317 for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
318 ipd->ipd_key_scratch[i] = karea;
322 void iam_ipd_free(struct iam_path_descr *ipd)
326 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
327 handle_t *h, struct buffer_head **bh)
330 * NB: it can be called by iam_lfix_guess() which is still at
331 * very early stage, c->ic_root_bh and c->ic_descr->id_ops
332 * haven't been intialized yet.
333 * Also, we don't have this for IAM dir.
335 if (c->ic_root_bh != NULL &&
336 c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
337 get_bh(c->ic_root_bh);
342 *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
353 * Return pointer to current leaf record. Pointer is valid while corresponding
354 * leaf node is locked and pinned.
356 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
358 return iam_leaf_ops(leaf)->rec(leaf);
362 * Return pointer to the current leaf key. This function returns pointer to
363 * the key stored in node.
365 * Caller should assume that returned pointer is only valid while leaf node is
368 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
370 return iam_leaf_ops(leaf)->key(leaf);
373 static int iam_leaf_key_size(const struct iam_leaf *leaf)
375 return iam_leaf_ops(leaf)->key_size(leaf);
378 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
379 struct iam_ikey *key)
381 return iam_leaf_ops(leaf)->ikey(leaf, key);
384 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
385 const struct iam_key *key)
387 return iam_leaf_ops(leaf)->key_cmp(leaf, key);
390 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
391 const struct iam_key *key)
393 return iam_leaf_ops(leaf)->key_eq(leaf, key);
396 #if LDISKFS_INVARIANT_ON
397 static int iam_path_check(struct iam_path *p)
402 struct iam_descr *param;
405 param = iam_path_descr(p);
406 for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
407 f = &p->ip_frames[i];
409 result = dx_node_check(p, f);
411 result = !param->id_ops->id_node_check(p, f);
414 if (result && p->ip_leaf.il_bh != NULL)
417 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
423 static int iam_leaf_load(struct iam_path *path)
427 struct iam_container *c;
428 struct buffer_head *bh;
429 struct iam_leaf *leaf;
430 struct iam_descr *descr;
432 c = path->ip_container;
433 leaf = &path->ip_leaf;
434 descr = iam_path_descr(path);
435 block = path->ip_frame->leaf;
438 pr_err("wrong leaf: %lu %d [%p %p %p]\n",
439 (unsigned long)path->ip_frame->leaf,
440 dx_get_count(dx_node_get_entries(path, path->ip_frame)),
441 path->ip_frames[0].bh, path->ip_frames[1].bh,
442 path->ip_frames[2].bh);
444 err = descr->id_ops->id_node_read(c, block, NULL, &bh);
447 leaf->il_curidx = block;
448 err = iam_leaf_ops(leaf)->init(leaf);
453 static void iam_unlock_htree(struct iam_container *ic,
454 struct dynlock_handle *lh)
457 dynlock_unlock(&ic->ic_tree_lock, lh);
461 static void iam_leaf_unlock(struct iam_leaf *leaf)
463 if (leaf->il_lock != NULL) {
464 iam_unlock_htree(iam_leaf_container(leaf),
467 leaf->il_lock = NULL;
471 static void iam_leaf_fini(struct iam_leaf *leaf)
473 if (leaf->il_path != NULL) {
474 iam_leaf_unlock(leaf);
475 iam_leaf_ops(leaf)->fini(leaf);
484 static void iam_leaf_start(struct iam_leaf *folio)
486 iam_leaf_ops(folio)->start(folio);
489 void iam_leaf_next(struct iam_leaf *folio)
491 iam_leaf_ops(folio)->next(folio);
494 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
495 const struct iam_rec *rec)
497 iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
500 static void iam_rec_del(struct iam_leaf *leaf, int shift)
502 iam_leaf_ops(leaf)->rec_del(leaf, shift);
505 int iam_leaf_at_end(const struct iam_leaf *leaf)
507 return iam_leaf_ops(leaf)->at_end(leaf);
510 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
513 iam_leaf_ops(l)->split(l, bh, nr);
516 static inline int iam_leaf_empty(struct iam_leaf *l)
518 return iam_leaf_ops(l)->leaf_empty(l);
521 int iam_leaf_can_add(const struct iam_leaf *l,
522 const struct iam_key *k, const struct iam_rec *r)
524 return iam_leaf_ops(l)->can_add(l, k, r);
527 static int iam_txn_dirty(handle_t *handle,
528 struct iam_path *path, struct buffer_head *bh)
532 result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
534 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
538 static int iam_txn_add(handle_t *handle,
539 struct iam_path *path, struct buffer_head *bh)
542 struct super_block *sb = iam_path_obj(path)->i_sb;
544 result = osd_ldiskfs_journal_get_write_access(handle, sb, bh,
547 ldiskfs_std_error(sb, result);
551 /* iterator interface */
552 static enum iam_it_state it_state(const struct iam_iterator *it)
558 * Helper function returning scratch key.
560 static struct iam_container *iam_it_container(const struct iam_iterator *it)
562 return it->ii_path.ip_container;
565 static inline int it_keycmp(const struct iam_iterator *it,
566 const struct iam_key *k)
568 return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
571 static inline int it_keyeq(const struct iam_iterator *it,
572 const struct iam_key *k)
574 return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
577 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
579 return iam_ikeycmp(it->ii_path.ip_container,
580 iam_leaf_ikey(&it->ii_path.ip_leaf,
581 iam_path_ikey(&it->ii_path, 0)), ik);
584 static inline int it_at_rec(const struct iam_iterator *it)
586 return !iam_leaf_at_end(&it->ii_path.ip_leaf);
589 static inline int it_before(const struct iam_iterator *it)
591 return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
595 * Helper wrapper around iam_it_get(): returns 0 (success) only when record
596 * with exactly the same key as asked is found.
598 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
602 result = iam_it_get(it, k);
605 else if (result == 0)
607 * Return -ENOENT if cursor is located above record with a key
608 * different from one specified, or in the empty leaf.
610 * XXX returning -ENOENT only works if iam_it_get() never
611 * returns -ENOENT as a legitimate error.
618 * Initialize iterator to IAM_IT_DETACHED state.
620 * postcondition: it_state(it) == IAM_IT_DETACHED
622 int iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
623 struct iam_path_descr *pd)
625 memset(it, 0, sizeof(*it));
626 it->ii_flags = flags;
627 it->ii_state = IAM_IT_DETACHED;
628 iam_path_init(&it->ii_path, c, pd);
633 * Finalize iterator and release all resources.
635 * precondition: it_state(it) == IAM_IT_DETACHED
637 void iam_it_fini(struct iam_iterator *it)
639 assert_corr(it_state(it) == IAM_IT_DETACHED);
640 iam_path_fini(&it->ii_path);
644 * this locking primitives are used to protect parts
645 * of dir's htree. protection unit is block: leaf or index
647 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
649 enum dynlock_type lt)
651 return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
654 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
658 for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
660 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
668 * Fast check for frame consistency.
670 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
672 struct iam_container *bag;
673 struct iam_entry *next;
674 struct iam_entry *last;
675 struct iam_entry *entries;
676 struct iam_entry *at;
678 bag = path->ip_container;
680 entries = frame->entries;
681 last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
683 if (unlikely(at > last))
686 if (unlikely(dx_get_block(path, at) != frame->leaf))
689 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
690 path->ip_ikey_target) > 0))
693 next = iam_entry_shift(path, at, +1);
695 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
696 path->ip_ikey_target) <= 0))
702 int dx_index_is_compat(struct iam_path *path)
704 return iam_path_descr(path) == NULL;
710 * search position of specified hash in index
714 static struct iam_entry *iam_find_position(struct iam_path *path,
715 struct iam_frame *frame)
722 count = dx_get_count(frame->entries);
723 assert_corr(count && count <= dx_get_limit(frame->entries));
724 p = iam_entry_shift(path, frame->entries,
725 dx_index_is_compat(path) ? 1 : 2);
726 q = iam_entry_shift(path, frame->entries, count - 1);
728 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
729 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
730 path->ip_ikey_target) > 0)
731 q = iam_entry_shift(path, m, -1);
733 p = iam_entry_shift(path, m, +1);
735 return iam_entry_shift(path, p, -1);
740 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
742 return dx_get_block(path, iam_find_position(path, frame));
745 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
746 const struct iam_ikey *key, iam_ptr_t ptr)
748 struct iam_entry *entries = frame->entries;
749 struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
750 int count = dx_get_count(entries);
753 * Unfortunately we cannot assert this, as this function is sometimes
754 * called by VFS under i_sem and without pdirops lock.
756 assert_corr(1 || iam_frame_is_locked(path, frame));
757 assert_corr(count < dx_get_limit(entries));
758 assert_corr(frame->at < iam_entry_shift(path, entries, count));
759 assert_inv(dx_node_check(path, frame));
760 /* Prevent memory corruption outside of buffer_head */
761 BUG_ON(count >= dx_get_limit(entries));
762 BUG_ON((char *)iam_entry_shift(path, entries, count + 1) >
763 (frame->bh->b_data + frame->bh->b_size));
765 memmove(iam_entry_shift(path, new, 1), new,
766 (char *)iam_entry_shift(path, entries, count) - (char *)new);
767 dx_set_ikey(path, new, key);
768 dx_set_block(path, new, ptr);
769 dx_set_count(entries, count + 1);
771 BUG_ON(count > dx_get_limit(entries));
772 assert_inv(dx_node_check(path, frame));
775 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
776 const struct iam_ikey *key, iam_ptr_t ptr)
778 iam_lock_bh(frame->bh);
779 iam_insert_key(path, frame, key, ptr);
780 iam_unlock_bh(frame->bh);
783 * returns 0 if path was unchanged, -EAGAIN otherwise.
785 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
789 iam_lock_bh(frame->bh);
790 equal = iam_check_fast(path, frame) == 0 ||
791 frame->leaf == iam_find_ptr(path, frame);
792 DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
793 iam_unlock_bh(frame->bh);
795 return equal ? 0 : -EAGAIN;
798 static int iam_lookup_try(struct iam_path *path)
804 struct iam_descr *param;
805 struct iam_frame *frame;
806 struct iam_container *c;
808 param = iam_path_descr(path);
809 c = path->ip_container;
811 ptr = param->id_ops->id_root_ptr(c);
812 for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
814 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
818 iam_lock_bh(frame->bh);
820 * node must be initialized under bh lock because concurrent
821 * creation procedure may change it and iam_lookup_try() will
822 * see obsolete tree height. -bzzz
827 if (LDISKFS_INVARIANT_ON) {
828 err = param->id_ops->id_node_check(path, frame);
833 err = param->id_ops->id_node_load(path, frame);
837 assert_inv(dx_node_check(path, frame));
839 * splitting may change root index block and move hash we're
840 * looking for into another index block so, we have to check
841 * this situation and repeat from begining if path got changed
845 err = iam_check_path(path, frame - 1);
850 frame->at = iam_find_position(path, frame);
852 frame->leaf = ptr = dx_get_block(path, frame->at);
854 iam_unlock_bh(frame->bh);
858 iam_unlock_bh(frame->bh);
859 path->ip_frame = --frame;
863 static int __iam_path_lookup(struct iam_path *path)
868 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i)
869 assert(path->ip_frames[i].bh == NULL);
872 err = iam_lookup_try(path);
876 } while (err == -EAGAIN);
882 * returns 0 if path was unchanged, -EAGAIN otherwise.
884 static int iam_check_full_path(struct iam_path *path, int search)
886 struct iam_frame *bottom;
887 struct iam_frame *scan;
893 for (bottom = path->ip_frames, i = 0;
894 i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
895 ; /* find last filled in frame */
898 /* Lock frames, bottom to top. */
899 for (scan = bottom - 1; scan >= path->ip_frames; --scan)
900 iam_lock_bh(scan->bh);
901 /* Check them top to bottom. */
903 for (scan = path->ip_frames; scan < bottom; ++scan) {
904 struct iam_entry *pos;
907 if (iam_check_fast(path, scan) == 0)
910 pos = iam_find_position(path, scan);
911 if (scan->leaf != dx_get_block(path, pos)) {
917 pos = iam_entry_shift(path, scan->entries,
918 dx_get_count(scan->entries) - 1);
919 if (scan->at > pos ||
920 scan->leaf != dx_get_block(path, scan->at)) {
927 /* Unlock top to bottom. */
928 for (scan = path->ip_frames; scan < bottom; ++scan)
929 iam_unlock_bh(scan->bh);
930 DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
938 * Performs path lookup and returns with found leaf (if any) locked by htree
941 static int iam_lookup_lock(struct iam_path *path,
942 struct dynlock_handle **dl, enum dynlock_type lt)
946 while ((result = __iam_path_lookup(path)) == 0) {
948 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
957 * while locking leaf we just found may get split so we need
958 * to check this -bzzz
960 if (iam_check_full_path(path, 1) == 0)
962 iam_unlock_htree(path->ip_container, *dl);
969 * Performs tree top-to-bottom traversal starting from root, and loads leaf
972 static int iam_path_lookup(struct iam_path *path, int index)
974 struct iam_leaf *leaf;
977 leaf = &path->ip_leaf;
978 result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
979 assert_inv(iam_path_check(path));
982 result = iam_leaf_load(path);
986 result = iam_leaf_ops(leaf)->
987 ilookup(leaf, path->ip_ikey_target);
989 result = iam_leaf_ops(leaf)->
990 lookup(leaf, path->ip_key_target);
994 iam_leaf_unlock(leaf);
1000 * Common part of iam_it_{i,}get().
1002 static int __iam_it_get(struct iam_iterator *it, int index)
1006 assert_corr(it_state(it) == IAM_IT_DETACHED);
1008 result = iam_path_lookup(&it->ii_path, index);
1012 collision = result & IAM_LOOKUP_LAST;
1013 switch (result & ~IAM_LOOKUP_LAST) {
1014 case IAM_LOOKUP_EXACT:
1016 it->ii_state = IAM_IT_ATTACHED;
1020 it->ii_state = IAM_IT_ATTACHED;
1022 case IAM_LOOKUP_BEFORE:
1023 case IAM_LOOKUP_EMPTY:
1025 it->ii_state = IAM_IT_SKEWED;
1030 result |= collision;
1032 /* See iam_it_get_exact() for explanation. */
1033 assert_corr(result != -ENOENT);
1038 * Correct hash, but not the same key was found, iterate through hash
1039 * collision chain, looking for correct record.
1041 static int iam_it_collision(struct iam_iterator *it)
1045 assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1047 while ((result = iam_it_next(it)) == 0) {
1048 do_corr(schedule());
1049 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1051 if (it_keyeq(it, it->ii_path.ip_key_target))
1058 * Attach iterator. After successful completion, @it points to record with
1059 * least key not larger than @k.
1061 * Return value: 0: positioned on existing record,
1062 * +ve: exact position found,
1065 * precondition: it_state(it) == IAM_IT_DETACHED
1066 * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1067 * it_keycmp(it, k) <= 0)
1069 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1073 assert_corr(it_state(it) == IAM_IT_DETACHED);
1075 it->ii_path.ip_ikey_target = NULL;
1076 it->ii_path.ip_key_target = k;
1078 result = __iam_it_get(it, 0);
1080 if (result == IAM_LOOKUP_LAST) {
1081 result = iam_it_collision(it);
1085 result = __iam_it_get(it, 0);
1090 result &= ~IAM_LOOKUP_LAST;
1092 assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1093 assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1094 it_keycmp(it, k) <= 0));
1099 * Attach iterator by index key.
1101 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1103 assert_corr(it_state(it) == IAM_IT_DETACHED);
1105 it->ii_path.ip_ikey_target = k;
1106 return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1110 * Attach iterator, and assure it points to the record (not skewed).
1112 * Return value: 0: positioned on existing record,
1113 * +ve: exact position found,
1116 * precondition: it_state(it) == IAM_IT_DETACHED &&
1117 * !(it->ii_flags&IAM_IT_WRITE)
1118 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1120 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1124 assert_corr(it_state(it) == IAM_IT_DETACHED &&
1125 !(it->ii_flags&IAM_IT_WRITE));
1126 result = iam_it_get(it, k);
1128 if (it_state(it) != IAM_IT_ATTACHED) {
1129 assert_corr(it_state(it) == IAM_IT_SKEWED);
1130 result = iam_it_next(it);
1133 assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1138 * Duplicates iterator.
1140 * postcondition: it_state(dst) == it_state(src) &&
1141 * iam_it_container(dst) == iam_it_container(src) &&
1142 * dst->ii_flags = src->ii_flags &&
1143 * ergo(it_state(src) == IAM_IT_ATTACHED,
1144 * iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1145 * iam_it_key_get(dst) == iam_it_key_get(src))
1147 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1149 dst->ii_flags = src->ii_flags;
1150 dst->ii_state = src->ii_state;
1151 /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1153 * XXX: duplicate lock.
1155 assert_corr(it_state(dst) == it_state(src));
1156 assert_corr(iam_it_container(dst) == iam_it_container(src));
1157 assert_corr(dst->ii_flags = src->ii_flags);
1158 assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1159 iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1160 iam_it_key_get(dst) == iam_it_key_get(src)));
1164 * Detach iterator. Does nothing it detached state.
1166 * postcondition: it_state(it) == IAM_IT_DETACHED
1168 void iam_it_put(struct iam_iterator *it)
1170 if (it->ii_state != IAM_IT_DETACHED) {
1171 it->ii_state = IAM_IT_DETACHED;
1172 iam_leaf_fini(&it->ii_path.ip_leaf);
1176 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1177 struct iam_ikey *ikey);
1181 * This function increments the frame pointer to search the next leaf
1182 * block, and reads in the necessary intervening nodes if the search
1183 * should be necessary. Whether or not the search is necessary is
1184 * controlled by the hash parameter. If the hash value is even, then
1185 * the search is only continued if the next block starts with that
1186 * hash value. This is used if we are searching for a specific file.
1188 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1190 * This function returns 1 if the caller should continue to search,
1191 * or 0 if it should not. If there is an error reading one of the
1192 * index blocks, it will a negative error code.
1194 * If start_hash is non-null, it will be filled in with the starting
1195 * hash of the next page.
1197 static int iam_htree_advance(struct inode *dir, __u32 hash,
1198 struct iam_path *path, __u32 *start_hash,
1201 struct iam_frame *p;
1202 struct buffer_head *bh;
1203 int err, num_frames = 0;
1208 * Find the next leaf page by incrementing the frame pointer.
1209 * If we run out of entries in the interior node, loop around and
1210 * increment pointer in the parent node. When we break out of
1211 * this loop, num_frames indicates the number of interior
1212 * nodes need to be read.
1215 do_corr(schedule());
1220 p->at = iam_entry_shift(path, p->at, +1);
1221 if (p->at < iam_entry_shift(path, p->entries,
1222 dx_get_count(p->entries))) {
1223 p->leaf = dx_get_block(path, p->at);
1224 iam_unlock_bh(p->bh);
1227 iam_unlock_bh(p->bh);
1228 if (p == path->ip_frames)
1235 /* Htree hash magic. */
1238 * If the hash is 1, then continue only if the next page has a
1239 * continuation hash of any value. This is used for readdir
1240 * handling. Otherwise, check to see if the hash matches the
1241 * desired contiuation hash. If it doesn't, return since
1242 * there's no point to read in the successive index pages.
1244 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1246 *start_hash = bhash;
1247 if ((hash & 1) == 0) {
1248 if ((bhash & ~1) != hash)
1253 * If the hash is HASH_NB_ALWAYS, we always go to the next
1254 * block so no check is necessary
1256 while (num_frames--) {
1259 do_corr(schedule());
1261 idx = p->leaf = dx_get_block(path, p->at);
1262 iam_unlock_bh(p->bh);
1263 err = iam_path_descr(path)->id_ops->
1264 id_node_read(path->ip_container, idx, NULL, &bh);
1266 return err; /* Failure */
1269 assert_corr(p->bh != bh);
1271 p->entries = dx_node_get_entries(path, p);
1272 p->at = iam_entry_shift(path, p->entries, !compat);
1273 assert_corr(p->curidx != idx);
1276 assert_corr(p->leaf != dx_get_block(path, p->at));
1277 p->leaf = dx_get_block(path, p->at);
1278 iam_unlock_bh(p->bh);
1279 assert_inv(dx_node_check(path, p));
1284 static inline int iam_index_advance(struct iam_path *path)
1286 return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1289 static void iam_unlock_array(struct iam_container *ic,
1290 struct dynlock_handle **lh)
1294 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1296 iam_unlock_htree(ic, *lh);
1302 * Advance index part of @path to point to the next leaf. Returns 1 on
1303 * success, 0, when end of container was reached. Leaf node is locked.
1305 int iam_index_next(struct iam_container *c, struct iam_path *path)
1308 struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1311 /* Locking for iam_index_next()... is to be described. */
1313 cursor = path->ip_frame->leaf;
1316 result = iam_index_lock(path, lh);
1317 do_corr(schedule());
1321 result = iam_check_full_path(path, 0);
1322 if (result == 0 && cursor == path->ip_frame->leaf) {
1323 result = iam_index_advance(path);
1325 assert_corr(result == 0 ||
1326 cursor != path->ip_frame->leaf);
1330 iam_unlock_array(c, lh);
1332 iam_path_release(path);
1333 do_corr(schedule());
1335 result = __iam_path_lookup(path);
1339 while (path->ip_frame->leaf != cursor) {
1340 do_corr(schedule());
1342 result = iam_index_lock(path, lh);
1343 do_corr(schedule());
1347 result = iam_check_full_path(path, 0);
1351 result = iam_index_advance(path);
1353 CERROR("cannot find cursor : %u\n",
1359 result = iam_check_full_path(path, 0);
1362 iam_unlock_array(c, lh);
1364 } while (result == -EAGAIN);
1368 iam_unlock_array(c, lh);
1373 * Move iterator one record right.
1375 * Return value: 0: success,
1376 * +1: end of container reached
1379 * precondition: (it_state(it) == IAM_IT_ATTACHED ||
1380 * it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1381 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1382 * ergo(result > 0, it_state(it) == IAM_IT_DETACHED)
1384 int iam_it_next(struct iam_iterator *it)
1387 struct iam_path *path;
1388 struct iam_leaf *leaf;
1390 do_corr(struct iam_ikey *ik_orig);
1392 /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1393 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1394 it_state(it) == IAM_IT_SKEWED);
1396 path = &it->ii_path;
1397 leaf = &path->ip_leaf;
1399 assert_corr(iam_leaf_is_locked(leaf));
1402 do_corr(ik_orig = it_at_rec(it) ?
1403 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1404 if (it_before(it)) {
1405 assert_corr(!iam_leaf_at_end(leaf));
1406 it->ii_state = IAM_IT_ATTACHED;
1408 if (!iam_leaf_at_end(leaf))
1409 /* advance within leaf node */
1410 iam_leaf_next(leaf);
1411 /* multiple iterations may be necessary due to empty leaves. */
1412 while (result == 0 && iam_leaf_at_end(leaf)) {
1413 do_corr(schedule());
1414 /* advance index portion of the path */
1415 result = iam_index_next(iam_it_container(it), path);
1416 assert_corr(iam_leaf_is_locked(leaf));
1418 struct dynlock_handle *lh;
1420 lh = iam_lock_htree(iam_it_container(it),
1421 path->ip_frame->leaf,
1424 iam_leaf_fini(leaf);
1426 result = iam_leaf_load(path);
1428 iam_leaf_start(leaf);
1431 } else if (result == 0)
1432 /* end of container reached */
1438 it->ii_state = IAM_IT_ATTACHED;
1440 assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1441 assert_corr(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
1442 assert_corr(ergo(result == 0 && ik_orig != NULL,
1443 it_ikeycmp(it, ik_orig) >= 0));
1448 * Return pointer to the record under iterator.
1450 * precondition: it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1451 * postcondition: it_state(it) == IAM_IT_ATTACHED
1453 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1455 assert_corr(it_state(it) == IAM_IT_ATTACHED);
1456 assert_corr(it_at_rec(it));
1457 return iam_leaf_rec(&it->ii_path.ip_leaf);
1460 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1462 struct iam_leaf *folio;
1464 folio = &it->ii_path.ip_leaf;
1465 iam_leaf_ops(folio)->rec_set(folio, r);
1469 * Replace contents of record under iterator.
1471 * precondition: it_state(it) == IAM_IT_ATTACHED &&
1472 * it->ii_flags&IAM_IT_WRITE
1473 * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1474 * ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1476 int iam_it_rec_set(handle_t *h,
1477 struct iam_iterator *it, const struct iam_rec *r)
1480 struct iam_path *path;
1481 struct buffer_head *bh;
1483 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1484 it->ii_flags&IAM_IT_WRITE);
1485 assert_corr(it_at_rec(it));
1487 path = &it->ii_path;
1488 bh = path->ip_leaf.il_bh;
1489 result = iam_txn_add(h, path, bh);
1491 iam_it_reccpy(it, r);
1492 result = iam_txn_dirty(h, path, bh);
1498 * Return pointer to the index key under iterator.
1500 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1501 * it_state(it) == IAM_IT_SKEWED
1503 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1504 struct iam_ikey *ikey)
1506 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1507 it_state(it) == IAM_IT_SKEWED);
1508 assert_corr(it_at_rec(it));
1509 return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1513 * Return pointer to the key under iterator.
1515 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1516 * it_state(it) == IAM_IT_SKEWED
1518 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1520 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1521 it_state(it) == IAM_IT_SKEWED);
1522 assert_corr(it_at_rec(it));
1523 return iam_leaf_key(&it->ii_path.ip_leaf);
1527 * Return size of key under iterator (in bytes)
1529 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1530 * it_state(it) == IAM_IT_SKEWED
1532 int iam_it_key_size(const struct iam_iterator *it)
1534 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1535 it_state(it) == IAM_IT_SKEWED);
1536 assert_corr(it_at_rec(it));
1537 return iam_leaf_key_size(&it->ii_path.ip_leaf);
1540 static struct buffer_head *
1541 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1543 struct inode *inode = c->ic_object;
1544 struct buffer_head *bh = NULL;
1545 struct iam_idle_head *head;
1546 struct buffer_head *idle;
1550 if (c->ic_idle_bh == NULL)
1553 mutex_lock(&c->ic_idle_mutex);
1554 if (unlikely(c->ic_idle_bh == NULL)) {
1555 mutex_unlock(&c->ic_idle_mutex);
1559 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1560 count = le16_to_cpu(head->iih_count);
1562 *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1569 *b = le32_to_cpu(head->iih_blks[count]);
1570 head->iih_count = cpu_to_le16(count);
1571 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1575 mutex_unlock(&c->ic_idle_mutex);
1576 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1577 if (IS_ERR_OR_NULL(bh)) {
1587 /* The block itself which contains the iam_idle_head is
1588 * also an idle block, and can be used as the new node.
1590 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1591 c->ic_descr->id_root_gap +
1592 sizeof(struct dx_countlimit));
1593 *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1599 *b = le32_to_cpu(*idle_blocks);
1600 iam_lock_bh(c->ic_root_bh);
1601 *idle_blocks = head->iih_next;
1602 iam_unlock_bh(c->ic_root_bh);
1603 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1605 iam_lock_bh(c->ic_root_bh);
1606 *idle_blocks = cpu_to_le32(*b);
1607 iam_unlock_bh(c->ic_root_bh);
1612 idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1613 if (idle != NULL && IS_ERR(idle)) {
1615 c->ic_idle_bh = NULL;
1620 c->ic_idle_bh = idle;
1621 mutex_unlock(&c->ic_idle_mutex);
1624 /* get write access for the found buffer head */
1625 *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb, bh,
1630 ldiskfs_std_error(inode->i_sb, *e);
1632 /* Clear the reused node as new node does. */
1633 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1634 set_buffer_uptodate(bh);
1639 bh = osd_ldiskfs_append(h, inode, b);
1648 mutex_unlock(&c->ic_idle_mutex);
1649 ldiskfs_std_error(inode->i_sb, *e);
1654 * Insertion of new record. Interaction with jbd during non-trivial case (when
1655 * split happens) is as following:
1657 * - new leaf node is involved into transaction by iam_new_node();
1659 * - old leaf node is involved into transaction by iam_add_rec();
1661 * - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1663 * - leaf without insertion point is marked dirty (as @new_leaf) by
1666 * - split index nodes are involved into transaction and marked dirty by
1667 * split_index_node().
1669 * - "safe" index node, which is no split, but where new pointer is inserted
1670 * is involved into transaction and marked dirty by split_index_node().
1672 * - index node where pointer to new leaf is inserted is involved into
1673 * transaction by split_index_node() and marked dirty by iam_add_rec().
1675 * - inode is marked dirty by iam_add_rec().
1679 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1683 struct buffer_head *new_leaf;
1684 struct buffer_head *old_leaf;
1685 struct iam_container *c;
1687 struct iam_path *path;
1689 c = iam_leaf_container(leaf);
1690 path = leaf->il_path;
1693 new_leaf = iam_new_node(handle, c, &blknr, &err);
1694 do_corr(schedule());
1695 if (new_leaf != NULL) {
1696 struct dynlock_handle *lh;
1698 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1699 do_corr(schedule());
1701 iam_leaf_ops(leaf)->init_new(c, new_leaf);
1702 do_corr(schedule());
1703 old_leaf = leaf->il_bh;
1704 iam_leaf_split(leaf, &new_leaf, blknr);
1705 if (old_leaf != leaf->il_bh) {
1706 /* Switched to the new leaf. */
1707 iam_leaf_unlock(leaf);
1709 path->ip_frame->leaf = blknr;
1711 iam_unlock_htree(path->ip_container, lh);
1712 do_corr(schedule());
1713 err = iam_txn_dirty(handle, path, new_leaf);
1715 err = ldiskfs_mark_inode_dirty(handle, obj);
1716 do_corr(schedule());
1721 assert_inv(iam_path_check(iam_leaf_path(leaf)));
1725 static inline void dx_set_limit(struct iam_entry *entries, unsigned int value)
1727 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1730 static int iam_shift_entries(struct iam_path *path,
1731 struct iam_frame *frame, unsigned int count,
1732 struct iam_entry *entries, struct iam_entry *entries2,
1735 unsigned int count1;
1736 unsigned int count2;
1739 struct iam_frame *parent = frame - 1;
1740 struct iam_ikey *pivot = iam_path_ikey(path, 3);
1742 delta = dx_index_is_compat(path) ? 0 : +1;
1744 count1 = count/2 + delta;
1745 count2 = count - count1;
1746 dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1748 dxtrace(pr_info("Split index %d/%d\n", count1, count2));
1750 memcpy((char *) iam_entry_shift(path, entries2, delta),
1751 (char *) iam_entry_shift(path, entries, count1),
1752 count2 * iam_entry_size(path));
1754 dx_set_count(entries2, count2 + delta);
1755 dx_set_limit(entries2, dx_node_limit(path));
1758 * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1759 * level index in root index, then we insert new index here and set
1760 * new count in that 2nd level index. so, dx_probe() may see 2nd level
1761 * index w/o hash it looks for. the solution is to check root index
1762 * after we locked just founded 2nd level index -bzzz
1764 iam_insert_key_lock(path, parent, pivot, newblock);
1767 * now old and new 2nd level index blocks contain all pointers, so
1768 * dx_probe() may find it in the both. it's OK -bzzz
1770 iam_lock_bh(frame->bh);
1771 dx_set_count(entries, count1);
1772 iam_unlock_bh(frame->bh);
1775 * now old 2nd level index block points to first half of leafs. it's
1776 * importand that dx_probe() must check root index block for changes
1777 * under dx_lock_bh(frame->bh) -bzzz
1784 int split_index_node(handle_t *handle, struct iam_path *path,
1785 struct dynlock_handle **lh)
1787 struct iam_entry *entries; /* old block contents */
1788 struct iam_entry *entries2; /* new block contents */
1789 struct iam_frame *frame, *safe;
1790 struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1791 u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1792 struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1793 struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1794 struct inode *dir = iam_path_obj(path);
1795 struct iam_descr *descr;
1799 descr = iam_path_descr(path);
1801 * Algorithm below depends on this.
1803 assert_corr(dx_root_limit(path) < dx_node_limit(path));
1805 frame = path->ip_frame;
1806 entries = frame->entries;
1809 * Tall-tree handling: we might have to split multiple index blocks
1810 * all the way up to tree root. Tricky point here is error handling:
1811 * to avoid complicated undo/rollback we
1813 * - first allocate all necessary blocks
1815 * - insert pointers into them atomically.
1819 * Locking: leaf is already locked. htree-locks are acquired on all
1820 * index nodes that require split bottom-to-top, on the "safe" node,
1821 * and on all new nodes
1824 dxtrace(printk("using %u of %u node entries\n",
1825 dx_get_count(entries), dx_get_limit(entries)));
1827 /* What levels need split? */
1828 for (nr_splet = 0; frame >= path->ip_frames &&
1829 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1830 --frame, ++nr_splet) {
1831 do_corr(schedule());
1832 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1834 * CWARN(dir->i_sb, __FUNCTION__,
1835 * "Directory index full!\n");
1844 /* Lock all nodes, bottom to top. */
1845 for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1846 do_corr(schedule());
1847 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1849 if (lock[i] == NULL) {
1856 * Check for concurrent index modification.
1858 err = iam_check_full_path(path, 1);
1861 /* And check that the same number of nodes is to be split. */
1862 for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1863 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1867 if (i != nr_splet) {
1872 /* Go back down, allocate blocks, lock them, and add to transaction */
1873 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1874 bh_new[i] = iam_new_node(handle, path->ip_container,
1875 &newblock[i], &err);
1876 do_corr(schedule());
1878 descr->id_ops->id_node_init(path->ip_container,
1882 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1884 if (new_lock[i] == NULL) {
1888 do_corr(schedule());
1889 BUFFER_TRACE(frame->bh, "get_write_access");
1890 err = osd_ldiskfs_journal_get_write_access(handle,
1897 /* Add "safe" node to transaction too */
1898 if (safe + 1 != path->ip_frames) {
1899 do_corr(schedule());
1900 err = osd_ldiskfs_journal_get_write_access(handle,
1908 /* Go through nodes once more, inserting pointers */
1909 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1912 struct buffer_head *bh2;
1913 struct buffer_head *bh;
1915 entries = frame->entries;
1916 count = dx_get_count(entries);
1917 idx = iam_entry_diff(path, frame->at, entries);
1920 entries2 = dx_get_entries(path, bh2->b_data, 0);
1923 if (frame == path->ip_frames) {
1924 /* splitting root node. Tricky point:
1926 * In the "normal" B-tree we'd split root *and* add
1927 * new root to the tree with pointers to the old root
1928 * and its sibling (thus introducing two new nodes).
1930 * In htree it's enough to add one node, because
1931 * capacity of the root node is smaller than that of
1934 struct iam_frame *frames;
1935 struct iam_entry *next;
1937 assert_corr(i == 0);
1939 do_corr(schedule());
1941 frames = path->ip_frames;
1942 memcpy((char *) entries2, (char *) entries,
1943 count * iam_entry_size(path));
1944 dx_set_limit(entries2, dx_node_limit(path));
1947 iam_lock_bh(frame->bh);
1948 next = descr->id_ops->id_root_inc(path->ip_container,
1950 dx_set_block(path, next, newblock[0]);
1951 iam_unlock_bh(frame->bh);
1953 do_corr(schedule());
1954 /* Shift frames in the path */
1955 memmove(frames + 2, frames + 1,
1956 (sizeof(path->ip_frames)) -
1957 2 * sizeof(frames[0]));
1958 /* Add new access path frame */
1959 frames[1].at = iam_entry_shift(path, entries2, idx);
1960 frames[1].entries = entries = entries2;
1962 assert_inv(dx_node_check(path, frame));
1965 assert_inv(dx_node_check(path, frame));
1966 bh_new[0] = NULL; /* buffer head is "consumed" */
1967 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1970 do_corr(schedule());
1972 /* splitting non-root index node. */
1973 struct iam_frame *parent = frame - 1;
1975 do_corr(schedule());
1976 count = iam_shift_entries(path, frame, count,
1977 entries, entries2, newblock[i]);
1978 /* Which index block gets the new entry? */
1980 int d = dx_index_is_compat(path) ? 0 : +1;
1982 frame->at = iam_entry_shift(path, entries2,
1984 frame->entries = entries = entries2;
1985 frame->curidx = newblock[i];
1986 swap(frame->bh, bh2);
1987 assert_corr(lock[i + 1] != NULL);
1988 assert_corr(new_lock[i] != NULL);
1989 swap(lock[i + 1], new_lock[i]);
1991 parent->at = iam_entry_shift(path,
1994 assert_inv(dx_node_check(path, frame));
1995 assert_inv(dx_node_check(path, parent));
1996 dxtrace(dx_show_index("node", frame->entries));
1997 dxtrace(dx_show_index("node",
1998 ((struct dx_node *) bh2->b_data)->entries));
1999 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2002 do_corr(schedule());
2003 err = ldiskfs_handle_dirty_metadata(handle, NULL,
2008 do_corr(schedule());
2009 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2014 * This function was called to make insertion of new leaf
2015 * possible. Check that it fulfilled its obligations.
2017 assert_corr(dx_get_count(path->ip_frame->entries) <
2018 dx_get_limit(path->ip_frame->entries));
2019 assert_corr(lock[nr_splet] != NULL);
2020 *lh = lock[nr_splet];
2021 lock[nr_splet] = NULL;
2023 /* Log ->i_size modification. */
2024 err = ldiskfs_mark_inode_dirty(handle, dir);
2030 ldiskfs_std_error(dir->i_sb, err);
2033 iam_unlock_array(path->ip_container, lock);
2034 iam_unlock_array(path->ip_container, new_lock);
2036 assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2038 do_corr(schedule());
2039 for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2040 if (bh_new[i] != NULL)
2046 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2047 struct iam_path *path,
2048 const struct iam_key *k, const struct iam_rec *r)
2051 struct iam_leaf *leaf;
2053 leaf = &path->ip_leaf;
2054 assert_inv(iam_path_check(path));
2055 err = iam_txn_add(handle, path, leaf->il_bh);
2057 do_corr(schedule());
2058 if (!iam_leaf_can_add(leaf, k, r)) {
2059 struct dynlock_handle *lh = NULL;
2062 assert_corr(lh == NULL);
2063 do_corr(schedule());
2064 err = split_index_node(handle, path, &lh);
2065 if (err == -EAGAIN) {
2066 assert_corr(lh == NULL);
2068 iam_path_fini(path);
2069 it->ii_state = IAM_IT_DETACHED;
2071 do_corr(schedule());
2072 err = iam_it_get_exact(it, k);
2074 err = 1; /* repeat split */
2079 assert_inv(iam_path_check(path));
2081 assert_corr(lh != NULL);
2082 do_corr(schedule());
2083 err = iam_new_leaf(handle, leaf);
2085 err = iam_txn_dirty(handle, path,
2086 path->ip_frame->bh);
2088 iam_unlock_htree(path->ip_container, lh);
2089 do_corr(schedule());
2092 iam_leaf_rec_add(leaf, k, r);
2093 err = iam_txn_dirty(handle, path, leaf->il_bh);
2096 assert_inv(iam_path_check(path));
2101 * Insert new record with key @k and contents from @r, shifting records to the
2102 * right. On success, iterator is positioned on the newly inserted record.
2104 * precondition: it->ii_flags&IAM_IT_WRITE &&
2105 * (it_state(it) == IAM_IT_ATTACHED ||
2106 * it_state(it) == IAM_IT_SKEWED) &&
2107 * ergo(it_state(it) == IAM_IT_ATTACHED,
2108 * it_keycmp(it, k) <= 0) &&
2109 * ergo(it_before(it), it_keycmp(it, k) > 0));
2110 * postcondition: ergo(result == 0,
2111 * it_state(it) == IAM_IT_ATTACHED &&
2112 * it_keycmp(it, k) == 0 &&
2113 * !memcmp(iam_it_rec_get(it), r, ...))
2115 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2116 const struct iam_key *k, const struct iam_rec *r)
2119 struct iam_path *path;
2121 path = &it->ii_path;
2123 assert_corr(it->ii_flags&IAM_IT_WRITE);
2124 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2125 it_state(it) == IAM_IT_SKEWED);
2126 assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2127 it_keycmp(it, k) <= 0));
2128 assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2129 result = iam_add_rec(h, it, path, k, r);
2131 it->ii_state = IAM_IT_ATTACHED;
2132 assert_corr(ergo(result == 0,
2133 it_state(it) == IAM_IT_ATTACHED &&
2134 it_keycmp(it, k) == 0));
2138 static inline int iam_idle_blocks_limit(struct inode *inode)
2140 return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2144 * If the leaf cannnot be recycled, we will lose one block for reusing.
2145 * It is not a serious issue because it almost the same of non-recycle.
2147 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2148 struct iam_leaf *l, struct buffer_head **bh)
2150 struct iam_container *c = p->ip_container;
2151 struct inode *inode = c->ic_object;
2152 struct iam_frame *frame = p->ip_frame;
2153 struct iam_entry *entries;
2154 struct iam_entry *pos;
2155 struct dynlock_handle *lh;
2159 if (c->ic_idle_failed)
2162 if (unlikely(frame == NULL))
2165 if (!iam_leaf_empty(l))
2168 lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2170 CWARN("%s: No memory to recycle idle blocks\n",
2171 osd_ino2name(inode));
2175 rc = iam_txn_add(h, p, frame->bh);
2177 iam_unlock_htree(c, lh);
2181 iam_lock_bh(frame->bh);
2182 entries = frame->entries;
2183 count = dx_get_count(entries);
2185 * NOT shrink the last entry in the index node, which can be reused
2186 * directly by next new node.
2189 iam_unlock_bh(frame->bh);
2190 iam_unlock_htree(c, lh);
2194 pos = iam_find_position(p, frame);
2196 * There may be some new leaf nodes have been added or empty leaf nodes
2197 * have been shrinked during my delete operation.
2199 * If the empty leaf is not under current index node because the index
2200 * node has been split, then just skip the empty leaf, which is rare.
2202 if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2203 iam_unlock_bh(frame->bh);
2204 iam_unlock_htree(c, lh);
2209 if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2210 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2212 memmove(frame->at, n,
2213 (char *)iam_entry_shift(p, entries, count) - (char *)n);
2214 frame->at_shifted = 1;
2216 dx_set_count(entries, count - 1);
2217 iam_unlock_bh(frame->bh);
2218 rc = iam_txn_dirty(h, p, frame->bh);
2219 iam_unlock_htree(c, lh);
2229 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2230 __u32 *idle_blocks, iam_ptr_t blk)
2232 struct iam_container *c = p->ip_container;
2233 struct buffer_head *old = c->ic_idle_bh;
2234 struct iam_idle_head *head;
2237 head = (struct iam_idle_head *)(bh->b_data);
2238 head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2239 head->iih_count = 0;
2240 head->iih_next = *idle_blocks;
2241 /* The bh already get_write_accessed. */
2242 rc = iam_txn_dirty(h, p, bh);
2246 rc = iam_txn_add(h, p, c->ic_root_bh);
2250 iam_lock_bh(c->ic_root_bh);
2251 *idle_blocks = cpu_to_le32(blk);
2252 iam_unlock_bh(c->ic_root_bh);
2253 rc = iam_txn_dirty(h, p, c->ic_root_bh);
2255 /* NOT release old before new assigned. */
2260 iam_lock_bh(c->ic_root_bh);
2261 *idle_blocks = head->iih_next;
2262 iam_unlock_bh(c->ic_root_bh);
2268 * If the leaf cannnot be recycled, we will lose one block for reusing.
2269 * It is not a serious issue because it almost the same of non-recycle.
2271 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2272 struct buffer_head *bh, iam_ptr_t blk)
2274 struct iam_container *c = p->ip_container;
2275 struct inode *inode = c->ic_object;
2276 struct iam_idle_head *head;
2281 mutex_lock(&c->ic_idle_mutex);
2282 if (unlikely(c->ic_idle_failed)) {
2287 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2288 c->ic_descr->id_root_gap +
2289 sizeof(struct dx_countlimit));
2290 /* It is the first idle block. */
2291 if (c->ic_idle_bh == NULL) {
2292 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2296 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2297 count = le16_to_cpu(head->iih_count);
2298 /* Current ic_idle_bh is full, to be replaced by the leaf. */
2299 if (count == iam_idle_blocks_limit(inode)) {
2300 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2304 /* Just add to ic_idle_bh. */
2305 rc = iam_txn_add(h, p, c->ic_idle_bh);
2309 head->iih_blks[count] = cpu_to_le32(blk);
2310 head->iih_count = cpu_to_le16(count + 1);
2311 rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2314 mutex_unlock(&c->ic_idle_mutex);
2316 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2317 osd_ino2name(inode), blk);
2321 * Delete record under iterator.
2323 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2324 * it->ii_flags&IAM_IT_WRITE &&
2326 * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2327 * it_state(it) == IAM_IT_DETACHED
2329 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2332 struct iam_leaf *leaf;
2333 struct iam_path *path;
2335 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2336 it->ii_flags&IAM_IT_WRITE);
2337 assert_corr(it_at_rec(it));
2339 path = &it->ii_path;
2340 leaf = &path->ip_leaf;
2342 assert_inv(iam_path_check(path));
2344 result = iam_txn_add(h, path, leaf->il_bh);
2345 /* no compaction for now. */
2347 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2348 result = iam_txn_dirty(h, path, leaf->il_bh);
2349 if (result == 0 && iam_leaf_at_end(leaf)) {
2350 struct buffer_head *bh = NULL;
2353 blk = iam_index_shrink(h, path, leaf, &bh);
2354 if (it->ii_flags & IAM_IT_MOVE) {
2355 result = iam_it_next(it);
2361 iam_recycle_leaf(h, path, bh, blk);
2366 assert_inv(iam_path_check(path));
2367 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2368 it_state(it) == IAM_IT_DETACHED);
2373 * Convert iterator to cookie.
2375 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2376 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2377 * postcondition: it_state(it) == IAM_IT_ATTACHED
2379 iam_pos_t iam_it_store(const struct iam_iterator *it)
2383 assert_corr(it_state(it) == IAM_IT_ATTACHED);
2384 assert_corr(it_at_rec(it));
2385 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2389 return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2393 * Restore iterator from cookie.
2395 * precondition: it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2396 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2397 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2398 * iam_it_store(it) == pos)
2400 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2402 assert_corr(it_state(it) == IAM_IT_DETACHED &&
2403 it->ii_flags&IAM_IT_MOVE);
2404 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2406 return iam_it_iget(it, (struct iam_ikey *)&pos);
2409 /***********************************************************************/
2411 /***********************************************************************/
2412 static inline int ptr_inside(void *base, size_t size, void *ptr)
2414 return (base <= ptr) && (ptr < base + size);
2417 static int iam_frame_invariant(struct iam_frame *f)
2421 f->bh->b_data != NULL &&
2422 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2423 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2424 f->entries <= f->at);
2427 static int iam_leaf_invariant(struct iam_leaf *l)
2431 l->il_bh->b_data != NULL &&
2432 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2433 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2434 l->il_entries <= l->il_at;
2437 static int iam_path_invariant(struct iam_path *p)
2441 if (p->ip_container == NULL ||
2442 p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2443 p->ip_frame != p->ip_frames + p->ip_indirect ||
2444 !iam_leaf_invariant(&p->ip_leaf))
2446 for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2447 if (i <= p->ip_indirect) {
2448 if (!iam_frame_invariant(&p->ip_frames[i]))
2455 int iam_it_invariant(struct iam_iterator *it)
2458 (it->ii_state == IAM_IT_DETACHED ||
2459 it->ii_state == IAM_IT_ATTACHED ||
2460 it->ii_state == IAM_IT_SKEWED) &&
2461 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2462 ergo(it->ii_state == IAM_IT_ATTACHED ||
2463 it->ii_state == IAM_IT_SKEWED,
2464 iam_path_invariant(&it->ii_path) &&
2465 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2469 * Search container @c for record with key @k. If record is found, its data
2470 * are moved into @r.
2472 * Return values: 0: found, -ENOENT: not-found, -ve: error
2474 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2475 struct iam_rec *r, struct iam_path_descr *pd)
2477 struct iam_iterator it;
2480 iam_it_init(&it, c, 0, pd);
2482 result = iam_it_get_exact(&it, k);
2484 /* record with required key found, copy it into user buffer */
2485 iam_reccpy(&it.ii_path.ip_leaf, r);
2492 * Insert new record @r with key @k into container @c (within context of
2495 * Return values: 0: success, -ve: error, including -EEXIST when record with
2496 * given key is already present.
2498 * postcondition: ergo(result == 0 || result == -EEXIST,
2499 * iam_lookup(c, k, r2) > 0;
2501 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2502 const struct iam_rec *r, struct iam_path_descr *pd)
2504 struct iam_iterator it;
2507 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2509 result = iam_it_get_exact(&it, k);
2510 if (result == -ENOENT)
2511 result = iam_it_rec_insert(h, &it, k, r);
2512 else if (result == 0)
2520 * Update record with the key @k in container @c (within context of
2521 * transaction @h), new record is given by @r.
2523 * Return values: +1: skip because of the same rec value, 0: success,
2524 * -ve: error, including -ENOENT if no record with the given key found.
2526 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2527 const struct iam_rec *r, struct iam_path_descr *pd)
2529 struct iam_iterator it;
2530 struct iam_leaf *folio;
2533 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2535 result = iam_it_get_exact(&it, k);
2537 folio = &it.ii_path.ip_leaf;
2538 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2540 iam_it_rec_set(h, &it, r);
2550 * Delete existing record with key @k.
2552 * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2554 * postcondition: ergo(result == 0 || result == -ENOENT,
2555 * !iam_lookup(c, k, *));
2557 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2558 struct iam_path_descr *pd)
2560 struct iam_iterator it;
2563 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2565 result = iam_it_get_exact(&it, k);
2567 iam_it_rec_delete(h, &it);
2573 int iam_root_limit(int rootgap, int blocksize, int size)
2578 limit = (blocksize - rootgap) / size;
2579 nlimit = blocksize / size;
2580 if (limit == nlimit)