4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 * Top-level entry points into iam module
34 * Author: Wang Di <wangdi@clusterfs.com>
35 * Author: Nikita Danilov <nikita@clusterfs.com>
39 * iam: big theory statement.
41 * iam (Index Access Module) is a module providing abstraction of persistent
42 * transactional container on top of generalized ldiskfs htree.
46 * - key, pointer, and record size specifiable per container.
48 * - trees taller than 2 index levels.
50 * - read/write to existing ldiskfs htree directories as iam containers.
52 * iam container is a tree, consisting of leaf nodes containing keys and
53 * records stored in this container, and index nodes, containing keys and
54 * pointers to leaf or index nodes.
56 * iam does not work with keys directly, instead it calls user-supplied key
57 * comparison function (->dpo_keycmp()).
59 * Pointers are (currently) interpreted as logical offsets (measured in
60 * blocksful) within underlying flat file on top of which iam tree lives.
64 * iam mostly tries to reuse existing htree formats.
66 * Format of index node:
68 * +-----+-------+-------+-------+------+-------+------------+
69 * | | count | | | | | |
70 * | gap | / | entry | entry | .... | entry | free space |
71 * | | limit | | | | | |
72 * +-----+-------+-------+-------+------+-------+------------+
74 * gap this part of node is never accessed by iam code. It
75 * exists for binary compatibility with ldiskfs htree (that,
76 * in turn, stores fake struct ext2_dirent for ext2
77 * compatibility), and to keep some unspecified per-node
78 * data. Gap can be different for root and non-root index
79 * nodes. Gap size can be specified for each container
80 * (gap of 0 is allowed).
82 * count/limit current number of entries in this node, and the maximal
83 * number of entries that can fit into node. count/limit
84 * has the same size as entry, and is itself counted in
87 * entry index entry: consists of a key immediately followed by
88 * a pointer to a child node. Size of a key and size of a
89 * pointer depends on container. Entry has neither
90 * alignment nor padding.
92 * free space portion of node new entries are added to
94 * Entries in index node are sorted by their key value.
96 * Format of a leaf node is not specified. Generic iam code accesses leaf
97 * nodes through ->id_leaf methods in struct iam_descr.
99 * The IAM root block is a special node, which contains the IAM descriptor.
100 * It is on disk format:
102 * +---------+-------+--------+---------+-------+------+-------+------------+
103 * |IAM desc | count | idle | | | | | |
104 * |(fix/var)| / | blocks | padding | entry | .... | entry | free space |
105 * | | limit | | | | | | |
106 * +---------+-------+--------+---------+-------+------+-------+------------+
108 * The padding length is calculated with the parameters in the IAM descriptor.
110 * The field "idle_blocks" is used to record empty leaf nodes, which have not
111 * been released but all contained entries in them have been removed. Usually,
112 * the idle blocks in the IAM should be reused when need to allocate new leaf
113 * nodes for new entries, it depends on the IAM hash functions to map the new
114 * entries to these idle blocks. Unfortunately, it is not easy to design some
115 * hash functions for such clever mapping, especially considering the insert/
116 * lookup performance.
118 * So the IAM recycles the empty leaf nodes, and put them into a per-file based
119 * idle blocks pool. If need some new leaf node, it will try to take idle block
120 * from such pool with priority, in spite of how the IAM hash functions to map
123 * The idle blocks pool is organized as a series of tables, and each table
124 * can be described as following (on-disk format):
126 * +---------+---------+---------+---------+------+---------+-------+
127 * | magic | count | next | logic | | logic | free |
128 * |(16 bits)|(16 bits)| table | blk # | .... | blk # | space |
129 * | | |(32 bits)|(32 bits)| |(32 bits)| |
130 * +---------+---------+---------+---------+------+---------+-------+
132 * The logic blk# for the first table is stored in the root node "idle_blocks".
136 #include <linux/module.h>
137 #include <linux/fs.h>
138 #include <linux/pagemap.h>
139 #include <linux/time.h>
140 #include <linux/fcntl.h>
141 #include <linux/stat.h>
142 #include <linux/string.h>
143 #include <linux/quotaops.h>
144 #include <linux/buffer_head.h>
146 #include <ldiskfs/ldiskfs.h>
147 #include <ldiskfs/xattr.h>
150 #include "osd_internal.h"
152 #include <ldiskfs/acl.h>
154 static struct buffer_head *
155 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
157 struct inode *inode = c->ic_object;
158 struct iam_idle_head *head;
159 struct buffer_head *bh;
161 LASSERT(mutex_is_locked(&c->ic_idle_mutex));
166 bh = __ldiskfs_bread(NULL, inode, blk, 0);
167 if (IS_ERR_OR_NULL(bh)) {
168 CERROR("%s: cannot load idle blocks, blk = %u: rc = %ld\n",
169 osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
170 c->ic_idle_failed = 1;
176 head = (struct iam_idle_head *)(bh->b_data);
177 if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
180 CERROR("%s: invalid idle block head, blk = %u, magic = %x: rc = %d\n",
181 osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic),
184 c->ic_idle_failed = 1;
192 * Determine format of given container. This is done by scanning list of
193 * registered formats and calling ->if_guess() method of each in turn.
195 static int iam_format_guess(struct iam_container *c)
199 result = iam_lvar_guess(c);
201 result = iam_lfix_guess(c);
204 struct buffer_head *bh;
207 LASSERT(c->ic_root_bh != NULL);
209 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
210 c->ic_descr->id_root_gap +
211 sizeof(struct dx_countlimit));
212 mutex_lock(&c->ic_idle_mutex);
213 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
214 if (bh != NULL && IS_ERR(bh))
215 result = PTR_ERR(bh);
218 mutex_unlock(&c->ic_idle_mutex);
225 * Initialize container @c.
227 int iam_container_init(struct iam_container *c,
228 struct iam_descr *descr, struct inode *inode)
230 memset(c, 0, sizeof *c);
232 c->ic_object = inode;
233 dynlock_init(&c->ic_tree_lock);
234 mutex_init(&c->ic_idle_mutex);
239 * Determine container format.
241 int iam_container_setup(struct iam_container *c)
243 return iam_format_guess(c);
247 * Finalize container @c, release all resources.
249 void iam_container_fini(struct iam_container *c)
251 brelse(c->ic_idle_bh);
252 c->ic_idle_bh = NULL;
253 brelse(c->ic_root_bh);
254 c->ic_root_bh = NULL;
257 void iam_path_init(struct iam_path *path, struct iam_container *c,
258 struct iam_path_descr *pd)
260 memset(path, 0, sizeof *path);
261 path->ip_container = c;
262 path->ip_frame = path->ip_frames;
264 path->ip_leaf.il_path = path;
267 static void iam_leaf_fini(struct iam_leaf *leaf);
269 void iam_path_release(struct iam_path *path)
273 for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
274 if (path->ip_frames[i].bh != NULL) {
275 path->ip_frames[i].at_shifted = 0;
276 brelse(path->ip_frames[i].bh);
277 path->ip_frames[i].bh = NULL;
282 void iam_path_fini(struct iam_path *path)
284 iam_leaf_fini(&path->ip_leaf);
285 iam_path_release(path);
289 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
293 path->ipc_hinfo = &path->ipc_hinfo_area;
294 for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
295 path->ipc_descr.ipd_key_scratch[i] =
296 (struct iam_ikey *)&path->ipc_scratch[i];
298 iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
301 void iam_path_compat_fini(struct iam_path_compat *path)
303 iam_path_fini(&path->ipc_path);
307 * Helper function initializing iam_path_descr and its key scratch area.
309 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
311 struct iam_path_descr *ipd;
317 for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
318 ipd->ipd_key_scratch[i] = karea;
322 void iam_ipd_free(struct iam_path_descr *ipd)
326 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
327 handle_t *h, struct buffer_head **bh)
330 * NB: it can be called by iam_lfix_guess() which is still at
331 * very early stage, c->ic_root_bh and c->ic_descr->id_ops
332 * haven't been intialized yet.
333 * Also, we don't have this for IAM dir.
335 if (c->ic_root_bh != NULL &&
336 c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
337 get_bh(c->ic_root_bh);
342 *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
353 * Return pointer to current leaf record. Pointer is valid while corresponding
354 * leaf node is locked and pinned.
356 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
358 return iam_leaf_ops(leaf)->rec(leaf);
362 * Return pointer to the current leaf key. This function returns pointer to
363 * the key stored in node.
365 * Caller should assume that returned pointer is only valid while leaf node is
368 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
370 return iam_leaf_ops(leaf)->key(leaf);
373 static int iam_leaf_key_size(const struct iam_leaf *leaf)
375 return iam_leaf_ops(leaf)->key_size(leaf);
378 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
379 struct iam_ikey *key)
381 return iam_leaf_ops(leaf)->ikey(leaf, key);
384 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
385 const struct iam_key *key)
387 return iam_leaf_ops(leaf)->key_cmp(leaf, key);
390 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
391 const struct iam_key *key)
393 return iam_leaf_ops(leaf)->key_eq(leaf, key);
396 #if LDISKFS_INVARIANT_ON
397 static int iam_path_check(struct iam_path *p)
402 struct iam_descr *param;
405 param = iam_path_descr(p);
406 for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
407 f = &p->ip_frames[i];
409 result = dx_node_check(p, f);
411 result = !param->id_ops->id_node_check(p, f);
414 if (result && p->ip_leaf.il_bh != NULL)
417 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
423 static int iam_leaf_load(struct iam_path *path)
427 struct iam_container *c;
428 struct buffer_head *bh;
429 struct iam_leaf *leaf;
430 struct iam_descr *descr;
432 c = path->ip_container;
433 leaf = &path->ip_leaf;
434 descr = iam_path_descr(path);
435 block = path->ip_frame->leaf;
438 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
439 (long unsigned)path->ip_frame->leaf,
440 dx_get_count(dx_node_get_entries(path, path->ip_frame)),
441 path->ip_frames[0].bh, path->ip_frames[1].bh,
442 path->ip_frames[2].bh);
444 err = descr->id_ops->id_node_read(c, block, NULL, &bh);
447 leaf->il_curidx = block;
448 err = iam_leaf_ops(leaf)->init(leaf);
453 static void iam_unlock_htree(struct iam_container *ic,
454 struct dynlock_handle *lh)
457 dynlock_unlock(&ic->ic_tree_lock, lh);
461 static void iam_leaf_unlock(struct iam_leaf *leaf)
463 if (leaf->il_lock != NULL) {
464 iam_unlock_htree(iam_leaf_container(leaf),
467 leaf->il_lock = NULL;
471 static void iam_leaf_fini(struct iam_leaf *leaf)
473 if (leaf->il_path != NULL) {
474 iam_leaf_unlock(leaf);
475 iam_leaf_ops(leaf)->fini(leaf);
484 static void iam_leaf_start(struct iam_leaf *folio)
486 iam_leaf_ops(folio)->start(folio);
489 void iam_leaf_next(struct iam_leaf *folio)
491 iam_leaf_ops(folio)->next(folio);
494 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
495 const struct iam_rec *rec)
497 iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
500 static void iam_rec_del(struct iam_leaf *leaf, int shift)
502 iam_leaf_ops(leaf)->rec_del(leaf, shift);
505 int iam_leaf_at_end(const struct iam_leaf *leaf)
507 return iam_leaf_ops(leaf)->at_end(leaf);
510 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
513 iam_leaf_ops(l)->split(l, bh, nr);
516 static inline int iam_leaf_empty(struct iam_leaf *l)
518 return iam_leaf_ops(l)->leaf_empty(l);
521 int iam_leaf_can_add(const struct iam_leaf *l,
522 const struct iam_key *k, const struct iam_rec *r)
524 return iam_leaf_ops(l)->can_add(l, k, r);
527 static int iam_txn_dirty(handle_t *handle,
528 struct iam_path *path, struct buffer_head *bh)
532 result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
534 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
538 static int iam_txn_add(handle_t *handle,
539 struct iam_path *path, struct buffer_head *bh)
543 result = ldiskfs_journal_get_write_access(handle, bh);
545 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
549 /***********************************************************************/
550 /* iterator interface */
551 /***********************************************************************/
553 static enum iam_it_state it_state(const struct iam_iterator *it)
559 * Helper function returning scratch key.
561 static struct iam_container *iam_it_container(const struct iam_iterator *it)
563 return it->ii_path.ip_container;
566 static inline int it_keycmp(const struct iam_iterator *it,
567 const struct iam_key *k)
569 return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
572 static inline int it_keyeq(const struct iam_iterator *it,
573 const struct iam_key *k)
575 return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
578 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
580 return iam_ikeycmp(it->ii_path.ip_container,
581 iam_leaf_ikey(&it->ii_path.ip_leaf,
582 iam_path_ikey(&it->ii_path, 0)), ik);
585 static inline int it_at_rec(const struct iam_iterator *it)
587 return !iam_leaf_at_end(&it->ii_path.ip_leaf);
590 static inline int it_before(const struct iam_iterator *it)
592 return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
596 * Helper wrapper around iam_it_get(): returns 0 (success) only when record
597 * with exactly the same key as asked is found.
599 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
603 result = iam_it_get(it, k);
606 else if (result == 0)
608 * Return -ENOENT if cursor is located above record with a key
609 * different from one specified, or in the empty leaf.
611 * XXX returning -ENOENT only works if iam_it_get() never
612 * returns -ENOENT as a legitimate error.
619 * Initialize iterator to IAM_IT_DETACHED state.
621 * postcondition: it_state(it) == IAM_IT_DETACHED
623 int iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
624 struct iam_path_descr *pd)
626 memset(it, 0, sizeof *it);
627 it->ii_flags = flags;
628 it->ii_state = IAM_IT_DETACHED;
629 iam_path_init(&it->ii_path, c, pd);
634 * Finalize iterator and release all resources.
636 * precondition: it_state(it) == IAM_IT_DETACHED
638 void iam_it_fini(struct iam_iterator *it)
640 assert_corr(it_state(it) == IAM_IT_DETACHED);
641 iam_path_fini(&it->ii_path);
645 * this locking primitives are used to protect parts
646 * of dir's htree. protection unit is block: leaf or index
648 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
650 enum dynlock_type lt)
652 return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
655 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
659 for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
661 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
669 * Fast check for frame consistency.
671 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
673 struct iam_container *bag;
674 struct iam_entry *next;
675 struct iam_entry *last;
676 struct iam_entry *entries;
677 struct iam_entry *at;
679 bag = path->ip_container;
681 entries = frame->entries;
682 last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
684 if (unlikely(at > last))
687 if (unlikely(dx_get_block(path, at) != frame->leaf))
690 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
691 path->ip_ikey_target) > 0))
694 next = iam_entry_shift(path, at, +1);
696 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
697 path->ip_ikey_target) <= 0))
703 int dx_index_is_compat(struct iam_path *path)
705 return iam_path_descr(path) == NULL;
711 * search position of specified hash in index
715 static struct iam_entry *iam_find_position(struct iam_path *path,
716 struct iam_frame *frame)
723 count = dx_get_count(frame->entries);
724 assert_corr(count && count <= dx_get_limit(frame->entries));
725 p = iam_entry_shift(path, frame->entries,
726 dx_index_is_compat(path) ? 1 : 2);
727 q = iam_entry_shift(path, frame->entries, count - 1);
729 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
730 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
731 path->ip_ikey_target) > 0)
732 q = iam_entry_shift(path, m, -1);
734 p = iam_entry_shift(path, m, +1);
736 return iam_entry_shift(path, p, -1);
741 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
743 return dx_get_block(path, iam_find_position(path, frame));
746 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
747 const struct iam_ikey *key, iam_ptr_t ptr)
749 struct iam_entry *entries = frame->entries;
750 struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
751 int count = dx_get_count(entries);
754 * Unfortunately we cannot assert this, as this function is sometimes
755 * called by VFS under i_sem and without pdirops lock.
757 assert_corr(1 || iam_frame_is_locked(path, frame));
758 assert_corr(count < dx_get_limit(entries));
759 assert_corr(frame->at < iam_entry_shift(path, entries, count));
760 assert_inv(dx_node_check(path, frame));
761 /* Prevent memory corruption outside of buffer_head */
762 BUG_ON(count >= dx_get_limit(entries));
763 BUG_ON((char *)iam_entry_shift(path, entries, count + 1) >
764 (frame->bh->b_data + frame->bh->b_size));
766 memmove(iam_entry_shift(path, new, 1), new,
767 (char *)iam_entry_shift(path, entries, count) - (char *)new);
768 dx_set_ikey(path, new, key);
769 dx_set_block(path, new, ptr);
770 dx_set_count(entries, count + 1);
772 BUG_ON(count > dx_get_limit(entries));
773 assert_inv(dx_node_check(path, frame));
776 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
777 const struct iam_ikey *key, iam_ptr_t ptr)
779 iam_lock_bh(frame->bh);
780 iam_insert_key(path, frame, key, ptr);
781 iam_unlock_bh(frame->bh);
784 * returns 0 if path was unchanged, -EAGAIN otherwise.
786 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
790 iam_lock_bh(frame->bh);
791 equal = iam_check_fast(path, frame) == 0 ||
792 frame->leaf == iam_find_ptr(path, frame);
793 DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
794 iam_unlock_bh(frame->bh);
796 return equal ? 0 : -EAGAIN;
799 static int iam_lookup_try(struct iam_path *path)
805 struct iam_descr *param;
806 struct iam_frame *frame;
807 struct iam_container *c;
809 param = iam_path_descr(path);
810 c = path->ip_container;
812 ptr = param->id_ops->id_root_ptr(c);
813 for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
815 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
819 iam_lock_bh(frame->bh);
821 * node must be initialized under bh lock because concurrent
822 * creation procedure may change it and iam_lookup_try() will
823 * see obsolete tree height. -bzzz
828 if (LDISKFS_INVARIANT_ON) {
829 err = param->id_ops->id_node_check(path, frame);
834 err = param->id_ops->id_node_load(path, frame);
838 assert_inv(dx_node_check(path, frame));
840 * splitting may change root index block and move hash we're
841 * looking for into another index block so, we have to check
842 * this situation and repeat from begining if path got changed
846 err = iam_check_path(path, frame - 1);
851 frame->at = iam_find_position(path, frame);
853 frame->leaf = ptr = dx_get_block(path, frame->at);
855 iam_unlock_bh(frame->bh);
859 iam_unlock_bh(frame->bh);
860 path->ip_frame = --frame;
864 static int __iam_path_lookup(struct iam_path *path)
869 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
870 assert(path->ip_frames[i].bh == NULL);
873 err = iam_lookup_try(path);
877 } while (err == -EAGAIN);
883 * returns 0 if path was unchanged, -EAGAIN otherwise.
885 static int iam_check_full_path(struct iam_path *path, int search)
887 struct iam_frame *bottom;
888 struct iam_frame *scan;
894 for (bottom = path->ip_frames, i = 0;
895 i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
896 ; /* find last filled in frame */
900 * Lock frames, bottom to top.
902 for (scan = bottom - 1; scan >= path->ip_frames; --scan)
903 iam_lock_bh(scan->bh);
905 * Check them top to bottom.
908 for (scan = path->ip_frames; scan < bottom; ++scan) {
909 struct iam_entry *pos;
912 if (iam_check_fast(path, scan) == 0)
915 pos = iam_find_position(path, scan);
916 if (scan->leaf != dx_get_block(path, pos)) {
922 pos = iam_entry_shift(path, scan->entries,
923 dx_get_count(scan->entries) - 1);
924 if (scan->at > pos ||
925 scan->leaf != dx_get_block(path, scan->at)) {
933 * Unlock top to bottom.
935 for (scan = path->ip_frames; scan < bottom; ++scan)
936 iam_unlock_bh(scan->bh);
937 DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
945 * Performs path lookup and returns with found leaf (if any) locked by htree
948 static int iam_lookup_lock(struct iam_path *path,
949 struct dynlock_handle **dl, enum dynlock_type lt)
953 while ((result = __iam_path_lookup(path)) == 0) {
955 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
964 * while locking leaf we just found may get split so we need
965 * to check this -bzzz
967 if (iam_check_full_path(path, 1) == 0)
969 iam_unlock_htree(path->ip_container, *dl);
976 * Performs tree top-to-bottom traversal starting from root, and loads leaf
979 static int iam_path_lookup(struct iam_path *path, int index)
981 struct iam_leaf *leaf;
984 leaf = &path->ip_leaf;
985 result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
986 assert_inv(iam_path_check(path));
989 result = iam_leaf_load(path);
993 result = iam_leaf_ops(leaf)->
994 ilookup(leaf, path->ip_ikey_target);
996 result = iam_leaf_ops(leaf)->
997 lookup(leaf, path->ip_key_target);
1001 iam_leaf_unlock(leaf);
1007 * Common part of iam_it_{i,}get().
1009 static int __iam_it_get(struct iam_iterator *it, int index)
1013 assert_corr(it_state(it) == IAM_IT_DETACHED);
1015 result = iam_path_lookup(&it->ii_path, index);
1019 collision = result & IAM_LOOKUP_LAST;
1020 switch (result & ~IAM_LOOKUP_LAST) {
1021 case IAM_LOOKUP_EXACT:
1023 it->ii_state = IAM_IT_ATTACHED;
1027 it->ii_state = IAM_IT_ATTACHED;
1029 case IAM_LOOKUP_BEFORE:
1030 case IAM_LOOKUP_EMPTY:
1032 it->ii_state = IAM_IT_SKEWED;
1037 result |= collision;
1040 * See iam_it_get_exact() for explanation.
1042 assert_corr(result != -ENOENT);
1047 * Correct hash, but not the same key was found, iterate through hash
1048 * collision chain, looking for correct record.
1050 static int iam_it_collision(struct iam_iterator *it)
1054 assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1056 while ((result = iam_it_next(it)) == 0) {
1057 do_corr(schedule());
1058 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1060 if (it_keyeq(it, it->ii_path.ip_key_target))
1067 * Attach iterator. After successful completion, @it points to record with
1068 * least key not larger than @k.
1070 * Return value: 0: positioned on existing record,
1071 * +ve: exact position found,
1074 * precondition: it_state(it) == IAM_IT_DETACHED
1075 * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1076 * it_keycmp(it, k) <= 0)
1078 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1082 assert_corr(it_state(it) == IAM_IT_DETACHED);
1084 it->ii_path.ip_ikey_target = NULL;
1085 it->ii_path.ip_key_target = k;
1087 result = __iam_it_get(it, 0);
1089 if (result == IAM_LOOKUP_LAST) {
1090 result = iam_it_collision(it);
1094 result = __iam_it_get(it, 0);
1099 result &= ~IAM_LOOKUP_LAST;
1101 assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1102 assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1103 it_keycmp(it, k) <= 0));
1108 * Attach iterator by index key.
1110 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1112 assert_corr(it_state(it) == IAM_IT_DETACHED);
1114 it->ii_path.ip_ikey_target = k;
1115 return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1119 * Attach iterator, and assure it points to the record (not skewed).
1121 * Return value: 0: positioned on existing record,
1122 * +ve: exact position found,
1125 * precondition: it_state(it) == IAM_IT_DETACHED &&
1126 * !(it->ii_flags&IAM_IT_WRITE)
1127 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1129 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1133 assert_corr(it_state(it) == IAM_IT_DETACHED &&
1134 !(it->ii_flags&IAM_IT_WRITE));
1135 result = iam_it_get(it, k);
1137 if (it_state(it) != IAM_IT_ATTACHED) {
1138 assert_corr(it_state(it) == IAM_IT_SKEWED);
1139 result = iam_it_next(it);
1142 assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1147 * Duplicates iterator.
1149 * postcondition: it_state(dst) == it_state(src) &&
1150 * iam_it_container(dst) == iam_it_container(src) &&
1151 * dst->ii_flags = src->ii_flags &&
1152 * ergo(it_state(src) == IAM_IT_ATTACHED,
1153 * iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1154 * iam_it_key_get(dst) == iam_it_key_get(src))
1156 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1158 dst->ii_flags = src->ii_flags;
1159 dst->ii_state = src->ii_state;
1160 /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1162 * XXX: duplicate lock.
1164 assert_corr(it_state(dst) == it_state(src));
1165 assert_corr(iam_it_container(dst) == iam_it_container(src));
1166 assert_corr(dst->ii_flags = src->ii_flags);
1167 assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1168 iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1169 iam_it_key_get(dst) == iam_it_key_get(src)));
1173 * Detach iterator. Does nothing it detached state.
1175 * postcondition: it_state(it) == IAM_IT_DETACHED
1177 void iam_it_put(struct iam_iterator *it)
1179 if (it->ii_state != IAM_IT_DETACHED) {
1180 it->ii_state = IAM_IT_DETACHED;
1181 iam_leaf_fini(&it->ii_path.ip_leaf);
1185 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1186 struct iam_ikey *ikey);
1190 * This function increments the frame pointer to search the next leaf
1191 * block, and reads in the necessary intervening nodes if the search
1192 * should be necessary. Whether or not the search is necessary is
1193 * controlled by the hash parameter. If the hash value is even, then
1194 * the search is only continued if the next block starts with that
1195 * hash value. This is used if we are searching for a specific file.
1197 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1199 * This function returns 1 if the caller should continue to search,
1200 * or 0 if it should not. If there is an error reading one of the
1201 * index blocks, it will a negative error code.
1203 * If start_hash is non-null, it will be filled in with the starting
1204 * hash of the next page.
1206 static int iam_htree_advance(struct inode *dir, __u32 hash,
1207 struct iam_path *path, __u32 *start_hash,
1210 struct iam_frame *p;
1211 struct buffer_head *bh;
1212 int err, num_frames = 0;
1217 * Find the next leaf page by incrementing the frame pointer.
1218 * If we run out of entries in the interior node, loop around and
1219 * increment pointer in the parent node. When we break out of
1220 * this loop, num_frames indicates the number of interior
1221 * nodes need to be read.
1224 do_corr(schedule());
1229 p->at = iam_entry_shift(path, p->at, +1);
1230 if (p->at < iam_entry_shift(path, p->entries,
1231 dx_get_count(p->entries))) {
1232 p->leaf = dx_get_block(path, p->at);
1233 iam_unlock_bh(p->bh);
1236 iam_unlock_bh(p->bh);
1237 if (p == path->ip_frames)
1249 * If the hash is 1, then continue only if the next page has a
1250 * continuation hash of any value. This is used for readdir
1251 * handling. Otherwise, check to see if the hash matches the
1252 * desired contiuation hash. If it doesn't, return since
1253 * there's no point to read in the successive index pages.
1255 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1257 *start_hash = bhash;
1258 if ((hash & 1) == 0) {
1259 if ((bhash & ~1) != hash)
1264 * If the hash is HASH_NB_ALWAYS, we always go to the next
1265 * block so no check is necessary
1267 while (num_frames--) {
1270 do_corr(schedule());
1272 idx = p->leaf = dx_get_block(path, p->at);
1273 iam_unlock_bh(p->bh);
1274 err = iam_path_descr(path)->id_ops->
1275 id_node_read(path->ip_container, idx, NULL, &bh);
1277 return err; /* Failure */
1280 assert_corr(p->bh != bh);
1282 p->entries = dx_node_get_entries(path, p);
1283 p->at = iam_entry_shift(path, p->entries, !compat);
1284 assert_corr(p->curidx != idx);
1287 assert_corr(p->leaf != dx_get_block(path, p->at));
1288 p->leaf = dx_get_block(path, p->at);
1289 iam_unlock_bh(p->bh);
1290 assert_inv(dx_node_check(path, p));
1295 static inline int iam_index_advance(struct iam_path *path)
1297 return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1300 static void iam_unlock_array(struct iam_container *ic,
1301 struct dynlock_handle **lh)
1305 for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1307 iam_unlock_htree(ic, *lh);
1313 * Advance index part of @path to point to the next leaf. Returns 1 on
1314 * success, 0, when end of container was reached. Leaf node is locked.
1316 int iam_index_next(struct iam_container *c, struct iam_path *path)
1319 struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1323 * Locking for iam_index_next()... is to be described.
1326 cursor = path->ip_frame->leaf;
1329 result = iam_index_lock(path, lh);
1330 do_corr(schedule());
1334 result = iam_check_full_path(path, 0);
1335 if (result == 0 && cursor == path->ip_frame->leaf) {
1336 result = iam_index_advance(path);
1338 assert_corr(result == 0 ||
1339 cursor != path->ip_frame->leaf);
1343 iam_unlock_array(c, lh);
1345 iam_path_release(path);
1346 do_corr(schedule());
1348 result = __iam_path_lookup(path);
1352 while (path->ip_frame->leaf != cursor) {
1353 do_corr(schedule());
1355 result = iam_index_lock(path, lh);
1356 do_corr(schedule());
1360 result = iam_check_full_path(path, 0);
1364 result = iam_index_advance(path);
1366 CERROR("cannot find cursor : %u\n",
1372 result = iam_check_full_path(path, 0);
1375 iam_unlock_array(c, lh);
1377 } while (result == -EAGAIN);
1381 iam_unlock_array(c, lh);
1386 * Move iterator one record right.
1388 * Return value: 0: success,
1389 * +1: end of container reached
1392 * precondition: (it_state(it) == IAM_IT_ATTACHED ||
1393 * it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1394 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1395 * ergo(result > 0, it_state(it) == IAM_IT_DETACHED)
1397 int iam_it_next(struct iam_iterator *it)
1400 struct iam_path *path;
1401 struct iam_leaf *leaf;
1403 do_corr(struct iam_ikey *ik_orig);
1405 /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1406 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1407 it_state(it) == IAM_IT_SKEWED);
1409 path = &it->ii_path;
1410 leaf = &path->ip_leaf;
1412 assert_corr(iam_leaf_is_locked(leaf));
1415 do_corr(ik_orig = it_at_rec(it) ?
1416 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1417 if (it_before(it)) {
1418 assert_corr(!iam_leaf_at_end(leaf));
1419 it->ii_state = IAM_IT_ATTACHED;
1421 if (!iam_leaf_at_end(leaf))
1422 /* advance within leaf node */
1423 iam_leaf_next(leaf);
1425 * multiple iterations may be necessary due to empty leaves.
1427 while (result == 0 && iam_leaf_at_end(leaf)) {
1428 do_corr(schedule());
1429 /* advance index portion of the path */
1430 result = iam_index_next(iam_it_container(it), path);
1431 assert_corr(iam_leaf_is_locked(leaf));
1433 struct dynlock_handle *lh;
1434 lh = iam_lock_htree(iam_it_container(it),
1435 path->ip_frame->leaf,
1438 iam_leaf_fini(leaf);
1440 result = iam_leaf_load(path);
1442 iam_leaf_start(leaf);
1445 } else if (result == 0)
1446 /* end of container reached */
1452 it->ii_state = IAM_IT_ATTACHED;
1454 assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1455 assert_corr(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
1456 assert_corr(ergo(result == 0 && ik_orig != NULL,
1457 it_ikeycmp(it, ik_orig) >= 0));
1462 * Return pointer to the record under iterator.
1464 * precondition: it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1465 * postcondition: it_state(it) == IAM_IT_ATTACHED
1467 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1469 assert_corr(it_state(it) == IAM_IT_ATTACHED);
1470 assert_corr(it_at_rec(it));
1471 return iam_leaf_rec(&it->ii_path.ip_leaf);
1474 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1476 struct iam_leaf *folio;
1478 folio = &it->ii_path.ip_leaf;
1479 iam_leaf_ops(folio)->rec_set(folio, r);
1483 * Replace contents of record under iterator.
1485 * precondition: it_state(it) == IAM_IT_ATTACHED &&
1486 * it->ii_flags&IAM_IT_WRITE
1487 * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1488 * ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1490 int iam_it_rec_set(handle_t *h,
1491 struct iam_iterator *it, const struct iam_rec *r)
1494 struct iam_path *path;
1495 struct buffer_head *bh;
1497 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1498 it->ii_flags&IAM_IT_WRITE);
1499 assert_corr(it_at_rec(it));
1501 path = &it->ii_path;
1502 bh = path->ip_leaf.il_bh;
1503 result = iam_txn_add(h, path, bh);
1505 iam_it_reccpy(it, r);
1506 result = iam_txn_dirty(h, path, bh);
1512 * Return pointer to the index key under iterator.
1514 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1515 * it_state(it) == IAM_IT_SKEWED
1517 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1518 struct iam_ikey *ikey)
1520 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1521 it_state(it) == IAM_IT_SKEWED);
1522 assert_corr(it_at_rec(it));
1523 return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1527 * Return pointer to the key under iterator.
1529 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1530 * it_state(it) == IAM_IT_SKEWED
1532 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1534 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1535 it_state(it) == IAM_IT_SKEWED);
1536 assert_corr(it_at_rec(it));
1537 return iam_leaf_key(&it->ii_path.ip_leaf);
1541 * Return size of key under iterator (in bytes)
1543 * precondition: it_state(it) == IAM_IT_ATTACHED ||
1544 * it_state(it) == IAM_IT_SKEWED
1546 int iam_it_key_size(const struct iam_iterator *it)
1548 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1549 it_state(it) == IAM_IT_SKEWED);
1550 assert_corr(it_at_rec(it));
1551 return iam_leaf_key_size(&it->ii_path.ip_leaf);
1554 static struct buffer_head *
1555 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1557 struct inode *inode = c->ic_object;
1558 struct buffer_head *bh = NULL;
1559 struct iam_idle_head *head;
1560 struct buffer_head *idle;
1564 if (c->ic_idle_bh == NULL)
1567 mutex_lock(&c->ic_idle_mutex);
1568 if (unlikely(c->ic_idle_bh == NULL)) {
1569 mutex_unlock(&c->ic_idle_mutex);
1573 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1574 count = le16_to_cpu(head->iih_count);
1576 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1581 *b = le32_to_cpu(head->iih_blks[count]);
1582 head->iih_count = cpu_to_le16(count);
1583 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1587 mutex_unlock(&c->ic_idle_mutex);
1588 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1589 if (IS_ERR_OR_NULL(bh)) {
1599 /* The block itself which contains the iam_idle_head is
1600 * also an idle block, and can be used as the new node. */
1601 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1602 c->ic_descr->id_root_gap +
1603 sizeof(struct dx_countlimit));
1604 *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1608 *b = le32_to_cpu(*idle_blocks);
1609 iam_lock_bh(c->ic_root_bh);
1610 *idle_blocks = head->iih_next;
1611 iam_unlock_bh(c->ic_root_bh);
1612 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1614 iam_lock_bh(c->ic_root_bh);
1615 *idle_blocks = cpu_to_le32(*b);
1616 iam_unlock_bh(c->ic_root_bh);
1621 idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1622 if (idle != NULL && IS_ERR(idle)) {
1624 c->ic_idle_bh = NULL;
1629 c->ic_idle_bh = idle;
1630 mutex_unlock(&c->ic_idle_mutex);
1633 /* get write access for the found buffer head */
1634 *e = ldiskfs_journal_get_write_access(h, bh);
1638 ldiskfs_std_error(inode->i_sb, *e);
1640 /* Clear the reused node as new node does. */
1641 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1642 set_buffer_uptodate(bh);
1647 bh = osd_ldiskfs_append(h, inode, b);
1656 mutex_unlock(&c->ic_idle_mutex);
1657 ldiskfs_std_error(inode->i_sb, *e);
1662 * Insertion of new record. Interaction with jbd during non-trivial case (when
1663 * split happens) is as following:
1665 * - new leaf node is involved into transaction by iam_new_node();
1667 * - old leaf node is involved into transaction by iam_add_rec();
1669 * - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1671 * - leaf without insertion point is marked dirty (as @new_leaf) by
1674 * - split index nodes are involved into transaction and marked dirty by
1675 * split_index_node().
1677 * - "safe" index node, which is no split, but where new pointer is inserted
1678 * is involved into transaction and marked dirty by split_index_node().
1680 * - index node where pointer to new leaf is inserted is involved into
1681 * transaction by split_index_node() and marked dirty by iam_add_rec().
1683 * - inode is marked dirty by iam_add_rec().
1687 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1691 struct buffer_head *new_leaf;
1692 struct buffer_head *old_leaf;
1693 struct iam_container *c;
1695 struct iam_path *path;
1697 c = iam_leaf_container(leaf);
1698 path = leaf->il_path;
1701 new_leaf = iam_new_node(handle, c, &blknr, &err);
1702 do_corr(schedule());
1703 if (new_leaf != NULL) {
1704 struct dynlock_handle *lh;
1706 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1707 do_corr(schedule());
1709 iam_leaf_ops(leaf)->init_new(c, new_leaf);
1710 do_corr(schedule());
1711 old_leaf = leaf->il_bh;
1712 iam_leaf_split(leaf, &new_leaf, blknr);
1713 if (old_leaf != leaf->il_bh) {
1715 * Switched to the new leaf.
1717 iam_leaf_unlock(leaf);
1719 path->ip_frame->leaf = blknr;
1721 iam_unlock_htree(path->ip_container, lh);
1722 do_corr(schedule());
1723 err = iam_txn_dirty(handle, path, new_leaf);
1725 err = ldiskfs_mark_inode_dirty(handle, obj);
1726 do_corr(schedule());
1731 assert_inv(iam_path_check(iam_leaf_path(leaf)));
1735 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1737 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1740 static int iam_shift_entries(struct iam_path *path,
1741 struct iam_frame *frame, unsigned count,
1742 struct iam_entry *entries, struct iam_entry *entries2,
1749 struct iam_frame *parent = frame - 1;
1750 struct iam_ikey *pivot = iam_path_ikey(path, 3);
1752 delta = dx_index_is_compat(path) ? 0 : +1;
1754 count1 = count/2 + delta;
1755 count2 = count - count1;
1756 dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1758 dxtrace(printk("Split index %d/%d\n", count1, count2));
1760 memcpy((char *) iam_entry_shift(path, entries2, delta),
1761 (char *) iam_entry_shift(path, entries, count1),
1762 count2 * iam_entry_size(path));
1764 dx_set_count(entries2, count2 + delta);
1765 dx_set_limit(entries2, dx_node_limit(path));
1768 * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1769 * level index in root index, then we insert new index here and set
1770 * new count in that 2nd level index. so, dx_probe() may see 2nd level
1771 * index w/o hash it looks for. the solution is to check root index
1772 * after we locked just founded 2nd level index -bzzz
1774 iam_insert_key_lock(path, parent, pivot, newblock);
1777 * now old and new 2nd level index blocks contain all pointers, so
1778 * dx_probe() may find it in the both. it's OK -bzzz
1780 iam_lock_bh(frame->bh);
1781 dx_set_count(entries, count1);
1782 iam_unlock_bh(frame->bh);
1785 * now old 2nd level index block points to first half of leafs. it's
1786 * importand that dx_probe() must check root index block for changes
1787 * under dx_lock_bh(frame->bh) -bzzz
1794 int split_index_node(handle_t *handle, struct iam_path *path,
1795 struct dynlock_handle **lh)
1797 struct iam_entry *entries; /* old block contents */
1798 struct iam_entry *entries2; /* new block contents */
1799 struct iam_frame *frame, *safe;
1800 struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1801 u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1802 struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1803 struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1804 struct inode *dir = iam_path_obj(path);
1805 struct iam_descr *descr;
1809 descr = iam_path_descr(path);
1811 * Algorithm below depends on this.
1813 assert_corr(dx_root_limit(path) < dx_node_limit(path));
1815 frame = path->ip_frame;
1816 entries = frame->entries;
1819 * Tall-tree handling: we might have to split multiple index blocks
1820 * all the way up to tree root. Tricky point here is error handling:
1821 * to avoid complicated undo/rollback we
1823 * - first allocate all necessary blocks
1825 * - insert pointers into them atomically.
1829 * Locking: leaf is already locked. htree-locks are acquired on all
1830 * index nodes that require split bottom-to-top, on the "safe" node,
1831 * and on all new nodes
1834 dxtrace(printk("using %u of %u node entries\n",
1835 dx_get_count(entries), dx_get_limit(entries)));
1837 /* What levels need split? */
1838 for (nr_splet = 0; frame >= path->ip_frames &&
1839 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1840 --frame, ++nr_splet) {
1841 do_corr(schedule());
1842 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1844 * CWARN(dir->i_sb, __FUNCTION__,
1845 * "Directory index full!\n");
1855 * Lock all nodes, bottom to top.
1857 for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1858 do_corr(schedule());
1859 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1861 if (lock[i] == NULL) {
1868 * Check for concurrent index modification.
1870 err = iam_check_full_path(path, 1);
1874 * And check that the same number of nodes is to be split.
1876 for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1877 dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1881 if (i != nr_splet) {
1887 * Go back down, allocating blocks, locking them, and adding into
1890 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1891 bh_new[i] = iam_new_node(handle, path->ip_container,
1892 &newblock[i], &err);
1893 do_corr(schedule());
1895 descr->id_ops->id_node_init(path->ip_container,
1899 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1901 if (new_lock[i] == NULL) {
1905 do_corr(schedule());
1906 BUFFER_TRACE(frame->bh, "get_write_access");
1907 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1911 /* Add "safe" node to transaction too */
1912 if (safe + 1 != path->ip_frames) {
1913 do_corr(schedule());
1914 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1919 /* Go through nodes once more, inserting pointers */
1920 for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1923 struct buffer_head *bh2;
1924 struct buffer_head *bh;
1926 entries = frame->entries;
1927 count = dx_get_count(entries);
1928 idx = iam_entry_diff(path, frame->at, entries);
1931 entries2 = dx_get_entries(path, bh2->b_data, 0);
1934 if (frame == path->ip_frames) {
1935 /* splitting root node. Tricky point:
1937 * In the "normal" B-tree we'd split root *and* add
1938 * new root to the tree with pointers to the old root
1939 * and its sibling (thus introducing two new nodes).
1941 * In htree it's enough to add one node, because
1942 * capacity of the root node is smaller than that of
1945 struct iam_frame *frames;
1946 struct iam_entry *next;
1948 assert_corr(i == 0);
1950 do_corr(schedule());
1952 frames = path->ip_frames;
1953 memcpy((char *) entries2, (char *) entries,
1954 count * iam_entry_size(path));
1955 dx_set_limit(entries2, dx_node_limit(path));
1958 iam_lock_bh(frame->bh);
1959 next = descr->id_ops->id_root_inc(path->ip_container,
1961 dx_set_block(path, next, newblock[0]);
1962 iam_unlock_bh(frame->bh);
1964 do_corr(schedule());
1965 /* Shift frames in the path */
1966 memmove(frames + 2, frames + 1,
1967 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1968 /* Add new access path frame */
1969 frames[1].at = iam_entry_shift(path, entries2, idx);
1970 frames[1].entries = entries = entries2;
1972 assert_inv(dx_node_check(path, frame));
1975 assert_inv(dx_node_check(path, frame));
1976 bh_new[0] = NULL; /* buffer head is "consumed" */
1977 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1980 do_corr(schedule());
1982 /* splitting non-root index node. */
1983 struct iam_frame *parent = frame - 1;
1985 do_corr(schedule());
1986 count = iam_shift_entries(path, frame, count,
1987 entries, entries2, newblock[i]);
1988 /* Which index block gets the new entry? */
1990 int d = dx_index_is_compat(path) ? 0 : +1;
1992 frame->at = iam_entry_shift(path, entries2,
1994 frame->entries = entries = entries2;
1995 frame->curidx = newblock[i];
1996 swap(frame->bh, bh2);
1997 assert_corr(lock[i + 1] != NULL);
1998 assert_corr(new_lock[i] != NULL);
1999 swap(lock[i + 1], new_lock[i]);
2001 parent->at = iam_entry_shift(path,
2004 assert_inv(dx_node_check(path, frame));
2005 assert_inv(dx_node_check(path, parent));
2006 dxtrace(dx_show_index("node", frame->entries));
2007 dxtrace(dx_show_index("node",
2008 ((struct dx_node *) bh2->b_data)->entries));
2009 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2012 do_corr(schedule());
2013 err = ldiskfs_handle_dirty_metadata(handle, NULL,
2018 do_corr(schedule());
2019 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2024 * This function was called to make insertion of new leaf
2025 * possible. Check that it fulfilled its obligations.
2027 assert_corr(dx_get_count(path->ip_frame->entries) <
2028 dx_get_limit(path->ip_frame->entries));
2029 assert_corr(lock[nr_splet] != NULL);
2030 *lh = lock[nr_splet];
2031 lock[nr_splet] = NULL;
2034 * Log ->i_size modification.
2036 err = ldiskfs_mark_inode_dirty(handle, dir);
2042 ldiskfs_std_error(dir->i_sb, err);
2045 iam_unlock_array(path->ip_container, lock);
2046 iam_unlock_array(path->ip_container, new_lock);
2048 assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2050 do_corr(schedule());
2051 for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2052 if (bh_new[i] != NULL)
2058 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2059 struct iam_path *path,
2060 const struct iam_key *k, const struct iam_rec *r)
2063 struct iam_leaf *leaf;
2065 leaf = &path->ip_leaf;
2066 assert_inv(iam_path_check(path));
2067 err = iam_txn_add(handle, path, leaf->il_bh);
2069 do_corr(schedule());
2070 if (!iam_leaf_can_add(leaf, k, r)) {
2071 struct dynlock_handle *lh = NULL;
2074 assert_corr(lh == NULL);
2075 do_corr(schedule());
2076 err = split_index_node(handle, path, &lh);
2077 if (err == -EAGAIN) {
2078 assert_corr(lh == NULL);
2080 iam_path_fini(path);
2081 it->ii_state = IAM_IT_DETACHED;
2083 do_corr(schedule());
2084 err = iam_it_get_exact(it, k);
2086 err = +1; /* repeat split */
2091 assert_inv(iam_path_check(path));
2093 assert_corr(lh != NULL);
2094 do_corr(schedule());
2095 err = iam_new_leaf(handle, leaf);
2097 err = iam_txn_dirty(handle, path,
2098 path->ip_frame->bh);
2100 iam_unlock_htree(path->ip_container, lh);
2101 do_corr(schedule());
2104 iam_leaf_rec_add(leaf, k, r);
2105 err = iam_txn_dirty(handle, path, leaf->il_bh);
2108 assert_inv(iam_path_check(path));
2113 * Insert new record with key @k and contents from @r, shifting records to the
2114 * right. On success, iterator is positioned on the newly inserted record.
2116 * precondition: it->ii_flags&IAM_IT_WRITE &&
2117 * (it_state(it) == IAM_IT_ATTACHED ||
2118 * it_state(it) == IAM_IT_SKEWED) &&
2119 * ergo(it_state(it) == IAM_IT_ATTACHED,
2120 * it_keycmp(it, k) <= 0) &&
2121 * ergo(it_before(it), it_keycmp(it, k) > 0));
2122 * postcondition: ergo(result == 0,
2123 * it_state(it) == IAM_IT_ATTACHED &&
2124 * it_keycmp(it, k) == 0 &&
2125 * !memcmp(iam_it_rec_get(it), r, ...))
2127 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2128 const struct iam_key *k, const struct iam_rec *r)
2131 struct iam_path *path;
2133 path = &it->ii_path;
2135 assert_corr(it->ii_flags&IAM_IT_WRITE);
2136 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2137 it_state(it) == IAM_IT_SKEWED);
2138 assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2139 it_keycmp(it, k) <= 0));
2140 assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2141 result = iam_add_rec(h, it, path, k, r);
2143 it->ii_state = IAM_IT_ATTACHED;
2144 assert_corr(ergo(result == 0,
2145 it_state(it) == IAM_IT_ATTACHED &&
2146 it_keycmp(it, k) == 0));
2150 static inline int iam_idle_blocks_limit(struct inode *inode)
2152 return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2156 * If the leaf cannnot be recycled, we will lose one block for reusing.
2157 * It is not a serious issue because it almost the same of non-recycle.
2159 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2160 struct iam_leaf *l, struct buffer_head **bh)
2162 struct iam_container *c = p->ip_container;
2163 struct inode *inode = c->ic_object;
2164 struct iam_frame *frame = p->ip_frame;
2165 struct iam_entry *entries;
2166 struct iam_entry *pos;
2167 struct dynlock_handle *lh;
2171 if (c->ic_idle_failed)
2174 if (unlikely(frame == NULL))
2177 if (!iam_leaf_empty(l))
2180 lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2182 CWARN("%s: No memory to recycle idle blocks\n",
2183 osd_ino2name(inode));
2187 rc = iam_txn_add(h, p, frame->bh);
2189 iam_unlock_htree(c, lh);
2193 iam_lock_bh(frame->bh);
2194 entries = frame->entries;
2195 count = dx_get_count(entries);
2197 * NOT shrink the last entry in the index node, which can be reused
2198 * directly by next new node.
2201 iam_unlock_bh(frame->bh);
2202 iam_unlock_htree(c, lh);
2206 pos = iam_find_position(p, frame);
2208 * There may be some new leaf nodes have been added or empty leaf nodes
2209 * have been shrinked during my delete operation.
2211 * If the empty leaf is not under current index node because the index
2212 * node has been split, then just skip the empty leaf, which is rare.
2214 if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2215 iam_unlock_bh(frame->bh);
2216 iam_unlock_htree(c, lh);
2221 if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2222 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2224 memmove(frame->at, n,
2225 (char *)iam_entry_shift(p, entries, count) - (char *)n);
2226 frame->at_shifted = 1;
2228 dx_set_count(entries, count - 1);
2229 iam_unlock_bh(frame->bh);
2230 rc = iam_txn_dirty(h, p, frame->bh);
2231 iam_unlock_htree(c, lh);
2241 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2242 __u32 *idle_blocks, iam_ptr_t blk)
2244 struct iam_container *c = p->ip_container;
2245 struct buffer_head *old = c->ic_idle_bh;
2246 struct iam_idle_head *head;
2249 head = (struct iam_idle_head *)(bh->b_data);
2250 head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2251 head->iih_count = 0;
2252 head->iih_next = *idle_blocks;
2253 /* The bh already get_write_accessed. */
2254 rc = iam_txn_dirty(h, p, bh);
2258 rc = iam_txn_add(h, p, c->ic_root_bh);
2262 iam_lock_bh(c->ic_root_bh);
2263 *idle_blocks = cpu_to_le32(blk);
2264 iam_unlock_bh(c->ic_root_bh);
2265 rc = iam_txn_dirty(h, p, c->ic_root_bh);
2267 /* NOT release old before new assigned. */
2272 iam_lock_bh(c->ic_root_bh);
2273 *idle_blocks = head->iih_next;
2274 iam_unlock_bh(c->ic_root_bh);
2280 * If the leaf cannnot be recycled, we will lose one block for reusing.
2281 * It is not a serious issue because it almost the same of non-recycle.
2283 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2284 struct buffer_head *bh, iam_ptr_t blk)
2286 struct iam_container *c = p->ip_container;
2287 struct inode *inode = c->ic_object;
2288 struct iam_idle_head *head;
2293 mutex_lock(&c->ic_idle_mutex);
2294 if (unlikely(c->ic_idle_failed)) {
2299 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2300 c->ic_descr->id_root_gap +
2301 sizeof(struct dx_countlimit));
2302 /* It is the first idle block. */
2303 if (c->ic_idle_bh == NULL) {
2304 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2308 head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2309 count = le16_to_cpu(head->iih_count);
2310 /* Current ic_idle_bh is full, to be replaced by the leaf. */
2311 if (count == iam_idle_blocks_limit(inode)) {
2312 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2316 /* Just add to ic_idle_bh. */
2317 rc = iam_txn_add(h, p, c->ic_idle_bh);
2321 head->iih_blks[count] = cpu_to_le32(blk);
2322 head->iih_count = cpu_to_le16(count + 1);
2323 rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2326 mutex_unlock(&c->ic_idle_mutex);
2328 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2329 osd_ino2name(inode), blk);
2333 * Delete record under iterator.
2335 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2336 * it->ii_flags&IAM_IT_WRITE &&
2338 * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2339 * it_state(it) == IAM_IT_DETACHED
2341 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2344 struct iam_leaf *leaf;
2345 struct iam_path *path;
2347 assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2348 it->ii_flags&IAM_IT_WRITE);
2349 assert_corr(it_at_rec(it));
2351 path = &it->ii_path;
2352 leaf = &path->ip_leaf;
2354 assert_inv(iam_path_check(path));
2356 result = iam_txn_add(h, path, leaf->il_bh);
2358 * no compaction for now.
2361 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2362 result = iam_txn_dirty(h, path, leaf->il_bh);
2363 if (result == 0 && iam_leaf_at_end(leaf)) {
2364 struct buffer_head *bh = NULL;
2367 blk = iam_index_shrink(h, path, leaf, &bh);
2368 if (it->ii_flags & IAM_IT_MOVE) {
2369 result = iam_it_next(it);
2375 iam_recycle_leaf(h, path, bh, blk);
2380 assert_inv(iam_path_check(path));
2381 assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2382 it_state(it) == IAM_IT_DETACHED);
2387 * Convert iterator to cookie.
2389 * precondition: it_state(it) == IAM_IT_ATTACHED &&
2390 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2391 * postcondition: it_state(it) == IAM_IT_ATTACHED
2393 iam_pos_t iam_it_store(const struct iam_iterator *it)
2397 assert_corr(it_state(it) == IAM_IT_ATTACHED);
2398 assert_corr(it_at_rec(it));
2399 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2403 return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2407 * Restore iterator from cookie.
2409 * precondition: it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2410 * iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2411 * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2412 * iam_it_store(it) == pos)
2414 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2416 assert_corr(it_state(it) == IAM_IT_DETACHED &&
2417 it->ii_flags&IAM_IT_MOVE);
2418 assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2419 return iam_it_iget(it, (struct iam_ikey *)&pos);
2422 /***********************************************************************/
2424 /***********************************************************************/
2426 static inline int ptr_inside(void *base, size_t size, void *ptr)
2428 return (base <= ptr) && (ptr < base + size);
2431 static int iam_frame_invariant(struct iam_frame *f)
2435 f->bh->b_data != NULL &&
2436 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2437 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2438 f->entries <= f->at);
2441 static int iam_leaf_invariant(struct iam_leaf *l)
2445 l->il_bh->b_data != NULL &&
2446 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2447 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2448 l->il_entries <= l->il_at;
2451 static int iam_path_invariant(struct iam_path *p)
2455 if (p->ip_container == NULL ||
2456 p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2457 p->ip_frame != p->ip_frames + p->ip_indirect ||
2458 !iam_leaf_invariant(&p->ip_leaf))
2460 for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2461 if (i <= p->ip_indirect) {
2462 if (!iam_frame_invariant(&p->ip_frames[i]))
2469 int iam_it_invariant(struct iam_iterator *it)
2472 (it->ii_state == IAM_IT_DETACHED ||
2473 it->ii_state == IAM_IT_ATTACHED ||
2474 it->ii_state == IAM_IT_SKEWED) &&
2475 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2476 ergo(it->ii_state == IAM_IT_ATTACHED ||
2477 it->ii_state == IAM_IT_SKEWED,
2478 iam_path_invariant(&it->ii_path) &&
2479 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2483 * Search container @c for record with key @k. If record is found, its data
2484 * are moved into @r.
2486 * Return values: 0: found, -ENOENT: not-found, -ve: error
2488 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2489 struct iam_rec *r, struct iam_path_descr *pd)
2491 struct iam_iterator it;
2494 iam_it_init(&it, c, 0, pd);
2496 result = iam_it_get_exact(&it, k);
2499 * record with required key found, copy it into user buffer
2501 iam_reccpy(&it.ii_path.ip_leaf, r);
2508 * Insert new record @r with key @k into container @c (within context of
2511 * Return values: 0: success, -ve: error, including -EEXIST when record with
2512 * given key is already present.
2514 * postcondition: ergo(result == 0 || result == -EEXIST,
2515 * iam_lookup(c, k, r2) > 0;
2517 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2518 const struct iam_rec *r, struct iam_path_descr *pd)
2520 struct iam_iterator it;
2523 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2525 result = iam_it_get_exact(&it, k);
2526 if (result == -ENOENT)
2527 result = iam_it_rec_insert(h, &it, k, r);
2528 else if (result == 0)
2536 * Update record with the key @k in container @c (within context of
2537 * transaction @h), new record is given by @r.
2539 * Return values: +1: skip because of the same rec value, 0: success,
2540 * -ve: error, including -ENOENT if no record with the given key found.
2542 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2543 const struct iam_rec *r, struct iam_path_descr *pd)
2545 struct iam_iterator it;
2546 struct iam_leaf *folio;
2549 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2551 result = iam_it_get_exact(&it, k);
2553 folio = &it.ii_path.ip_leaf;
2554 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2556 iam_it_rec_set(h, &it, r);
2566 * Delete existing record with key @k.
2568 * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2570 * postcondition: ergo(result == 0 || result == -ENOENT,
2571 * !iam_lookup(c, k, *));
2573 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2574 struct iam_path_descr *pd)
2576 struct iam_iterator it;
2579 iam_it_init(&it, c, IAM_IT_WRITE, pd);
2581 result = iam_it_get_exact(&it, k);
2583 iam_it_rec_delete(h, &it);
2589 int iam_root_limit(int rootgap, int blocksize, int size)
2594 limit = (blocksize - rootgap) / size;
2595 nlimit = blocksize / size;
2596 if (limit == nlimit)