Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150 #include "osd_internal.h"
151
152 #include "xattr.h"
153 #include "acl.h"
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static CFS_LIST_HEAD(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         cfs_list_add(&fmt->if_linkage, &iam_formats);
165 }
166 EXPORT_SYMBOL(iam_format_register);
167
168 static struct buffer_head *
169 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
170 {
171         struct inode *inode = c->ic_object;
172         struct iam_idle_head *head;
173         struct buffer_head *bh;
174         int err;
175
176         LASSERT_SEM_LOCKED(&c->ic_idle_sem);
177
178         if (blk == 0)
179                 return NULL;
180
181         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
182         if (bh == NULL) {
183                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
184                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
185                 c->ic_idle_failed = 1;
186                 return ERR_PTR(err);
187         }
188
189         head = (struct iam_idle_head *)(bh->b_data);
190         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
191                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
192                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
193                        le16_to_cpu(head->iih_magic));
194                 brelse(bh);
195                 c->ic_idle_failed = 1;
196                 return ERR_PTR(-EBADF);
197         }
198
199         return bh;
200 }
201
202 /*
203  * Determine format of given container. This is done by scanning list of
204  * registered formats and calling ->if_guess() method of each in turn.
205  */
206 static int iam_format_guess(struct iam_container *c)
207 {
208         int result;
209         struct iam_format *fmt;
210
211         /*
212          * XXX temporary initialization hook.
213          */
214         {
215                 static int initialized = 0;
216
217                 if (!initialized) {
218                         iam_lvar_format_init();
219                         iam_lfix_format_init();
220                         initialized = 1;
221                 }
222         }
223
224         result = -ENOENT;
225         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
226                 result = fmt->if_guess(c);
227                 if (result == 0)
228                         break;
229         }
230
231         if (result == 0) {
232                 struct buffer_head *bh;
233                 __u32 *idle_blocks;
234
235                 LASSERT(c->ic_root_bh != NULL);
236
237                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
238                                         c->ic_descr->id_root_gap +
239                                         sizeof(struct dx_countlimit));
240                 down(&c->ic_idle_sem);
241                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
242                 if (bh != NULL && IS_ERR(bh))
243                         result = PTR_ERR(bh);
244                 else
245                         c->ic_idle_bh = bh;
246                 up(&c->ic_idle_sem);
247         }
248
249         return result;
250 }
251
252 /*
253  * Initialize container @c.
254  */
255 int iam_container_init(struct iam_container *c,
256                        struct iam_descr *descr, struct inode *inode)
257 {
258         memset(c, 0, sizeof *c);
259         c->ic_descr  = descr;
260         c->ic_object = inode;
261         init_rwsem(&c->ic_sem);
262         dynlock_init(&c->ic_tree_lock);
263         sema_init(&c->ic_idle_sem, 1);
264         return 0;
265 }
266 EXPORT_SYMBOL(iam_container_init);
267
268 /*
269  * Determine container format.
270  */
271 int iam_container_setup(struct iam_container *c)
272 {
273         return iam_format_guess(c);
274 }
275 EXPORT_SYMBOL(iam_container_setup);
276
277 /*
278  * Finalize container @c, release all resources.
279  */
280 void iam_container_fini(struct iam_container *c)
281 {
282         brelse(c->ic_idle_bh);
283         c->ic_idle_bh = NULL;
284         brelse(c->ic_root_bh);
285         c->ic_root_bh = NULL;
286 }
287 EXPORT_SYMBOL(iam_container_fini);
288
289 void iam_path_init(struct iam_path *path, struct iam_container *c,
290                    struct iam_path_descr *pd)
291 {
292         memset(path, 0, sizeof *path);
293         path->ip_container = c;
294         path->ip_frame = path->ip_frames;
295         path->ip_data = pd;
296         path->ip_leaf.il_path = path;
297 }
298
299 static void iam_leaf_fini(struct iam_leaf *leaf);
300
301 void iam_path_release(struct iam_path *path)
302 {
303         int i;
304
305         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
306                 if (path->ip_frames[i].bh != NULL) {
307                         path->ip_frames[i].at_shifted = 0;
308                         brelse(path->ip_frames[i].bh);
309                         path->ip_frames[i].bh = NULL;
310                 }
311         }
312 }
313
314 void iam_path_fini(struct iam_path *path)
315 {
316         iam_leaf_fini(&path->ip_leaf);
317         iam_path_release(path);
318 }
319
320
321 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
322 {
323         int i;
324
325         path->ipc_hinfo = &path->ipc_hinfo_area;
326         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
327                 path->ipc_descr.ipd_key_scratch[i] =
328                         (struct iam_ikey *)&path->ipc_scratch[i];
329
330         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
331 }
332
333 void iam_path_compat_fini(struct iam_path_compat *path)
334 {
335         iam_path_fini(&path->ipc_path);
336 }
337
338 /*
339  * Helper function initializing iam_path_descr and its key scratch area.
340  */
341 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
342 {
343         struct iam_path_descr *ipd;
344         void *karea;
345         int i;
346
347         ipd = area;
348         karea = ipd + 1;
349         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
350                 ipd->ipd_key_scratch[i] = karea;
351         return ipd;
352 }
353 EXPORT_SYMBOL(iam_ipd_alloc);
354
355 void iam_ipd_free(struct iam_path_descr *ipd)
356 {
357 }
358 EXPORT_SYMBOL(iam_ipd_free);
359
360 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
361                   handle_t *h, struct buffer_head **bh)
362 {
363         int result = 0;
364
365         /* NB: it can be called by iam_lfix_guess() which is still at
366          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
367          * haven't been intialized yet.
368          * Also, we don't have this for IAM dir.
369          */
370         if (c->ic_root_bh != NULL &&
371             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
372                 get_bh(c->ic_root_bh);
373                 *bh = c->ic_root_bh;
374                 return 0;
375         }
376
377         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
378         if (*bh == NULL)
379                 result = -EIO;
380         return result;
381 }
382
383 /*
384  * Return pointer to current leaf record. Pointer is valid while corresponding
385  * leaf node is locked and pinned.
386  */
387 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
388 {
389         return iam_leaf_ops(leaf)->rec(leaf);
390 }
391
392 /*
393  * Return pointer to the current leaf key. This function returns pointer to
394  * the key stored in node.
395  *
396  * Caller should assume that returned pointer is only valid while leaf node is
397  * pinned and locked.
398  */
399 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
400 {
401         return iam_leaf_ops(leaf)->key(leaf);
402 }
403
404 static int iam_leaf_key_size(const struct iam_leaf *leaf)
405 {
406         return iam_leaf_ops(leaf)->key_size(leaf);
407 }
408
409 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
410                                       struct iam_ikey *key)
411 {
412         return iam_leaf_ops(leaf)->ikey(leaf, key);
413 }
414
415 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
416                            const struct iam_key *key)
417 {
418         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
419 }
420
421 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
422                           const struct iam_key *key)
423 {
424         return iam_leaf_ops(leaf)->key_eq(leaf, key);
425 }
426
427 #if LDISKFS_INVARIANT_ON
428 static int iam_leaf_check(struct iam_leaf *leaf);
429 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
430
431 static int iam_path_check(struct iam_path *p)
432 {
433         int i;
434         int result;
435         struct iam_frame *f;
436         struct iam_descr *param;
437
438         result = 1;
439         param = iam_path_descr(p);
440         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
441                 f = &p->ip_frames[i];
442                 if (f->bh != NULL) {
443                         result = dx_node_check(p, f);
444                         if (result)
445                                 result = !param->id_ops->id_node_check(p, f);
446                 }
447         }
448         if (result && p->ip_leaf.il_bh != NULL)
449                 result = iam_leaf_check(&p->ip_leaf);
450         if (result == 0) {
451                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
452         }
453         return result;
454 }
455 #endif
456
457 static int iam_leaf_load(struct iam_path *path)
458 {
459         iam_ptr_t block;
460         int err;
461         struct iam_container *c;
462         struct buffer_head   *bh;
463         struct iam_leaf      *leaf;
464         struct iam_descr     *descr;
465
466         c     = path->ip_container;
467         leaf  = &path->ip_leaf;
468         descr = iam_path_descr(path);
469         block = path->ip_frame->leaf;
470         if (block == 0) {
471                 /* XXX bug 11027 */
472                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
473                        (long unsigned)path->ip_frame->leaf,
474                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
475                        path->ip_frames[0].bh, path->ip_frames[1].bh,
476                        path->ip_frames[2].bh);
477         }
478         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
479         if (err == 0) {
480                 leaf->il_bh = bh;
481                 leaf->il_curidx = block;
482                 err = iam_leaf_ops(leaf)->init(leaf);
483                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
484         }
485         return err;
486 }
487
488 static void iam_unlock_htree(struct iam_container *ic,
489                              struct dynlock_handle *lh)
490 {
491         if (lh != NULL)
492                 dynlock_unlock(&ic->ic_tree_lock, lh);
493 }
494
495
496 static void iam_leaf_unlock(struct iam_leaf *leaf)
497 {
498         if (leaf->il_lock != NULL) {
499                 iam_unlock_htree(iam_leaf_container(leaf),
500                                  leaf->il_lock);
501                 do_corr(schedule());
502                 leaf->il_lock = NULL;
503         }
504 }
505
506 static void iam_leaf_fini(struct iam_leaf *leaf)
507 {
508         if (leaf->il_path != NULL) {
509                 iam_leaf_unlock(leaf);
510                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
511                 iam_leaf_ops(leaf)->fini(leaf);
512                 if (leaf->il_bh) {
513                         brelse(leaf->il_bh);
514                         leaf->il_bh = NULL;
515                         leaf->il_curidx = 0;
516                 }
517         }
518 }
519
520 static void iam_leaf_start(struct iam_leaf *folio)
521 {
522         iam_leaf_ops(folio)->start(folio);
523 }
524
525 void iam_leaf_next(struct iam_leaf *folio)
526 {
527         iam_leaf_ops(folio)->next(folio);
528 }
529
530 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
531                              const struct iam_rec *rec)
532 {
533         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
534 }
535
536 static void iam_rec_del(struct iam_leaf *leaf, int shift)
537 {
538         iam_leaf_ops(leaf)->rec_del(leaf, shift);
539 }
540
541 int iam_leaf_at_end(const struct iam_leaf *leaf)
542 {
543         return iam_leaf_ops(leaf)->at_end(leaf);
544 }
545
546 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
547 {
548         iam_leaf_ops(l)->split(l, bh, nr);
549 }
550
551 static inline int iam_leaf_empty(struct iam_leaf *l)
552 {
553         return iam_leaf_ops(l)->leaf_empty(l);
554 }
555
556 int iam_leaf_can_add(const struct iam_leaf *l,
557                      const struct iam_key *k, const struct iam_rec *r)
558 {
559         return iam_leaf_ops(l)->can_add(l, k, r);
560 }
561
562 #if LDISKFS_INVARIANT_ON
563 static int iam_leaf_check(struct iam_leaf *leaf)
564 {
565         return 1;
566 #if 0
567         struct iam_lentry    *orig;
568         struct iam_path      *path;
569         struct iam_container *bag;
570         struct iam_ikey       *k0;
571         struct iam_ikey       *k1;
572         int result;
573         int first;
574
575         orig = leaf->il_at;
576         path = iam_leaf_path(leaf);
577         bag  = iam_leaf_container(leaf);
578
579         result = iam_leaf_ops(leaf)->init(leaf);
580         if (result != 0)
581                 return result;
582
583         first = 1;
584         iam_leaf_start(leaf);
585         k0 = iam_path_ikey(path, 0);
586         k1 = iam_path_ikey(path, 1);
587         while (!iam_leaf_at_end(leaf)) {
588                 iam_ikeycpy(bag, k0, k1);
589                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
590                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
591                         return 0;
592                 }
593                 first = 0;
594                 iam_leaf_next(leaf);
595         }
596         leaf->il_at = orig;
597         return 1;
598 #endif
599 }
600 #endif
601
602 static int iam_txn_dirty(handle_t *handle,
603                          struct iam_path *path, struct buffer_head *bh)
604 {
605         int result;
606
607         result = ldiskfs_journal_dirty_metadata(handle, bh);
608         if (result != 0)
609                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
610         return result;
611 }
612
613 static int iam_txn_add(handle_t *handle,
614                        struct iam_path *path, struct buffer_head *bh)
615 {
616         int result;
617
618         result = ldiskfs_journal_get_write_access(handle, bh);
619         if (result != 0)
620                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
621         return result;
622 }
623
624 /***********************************************************************/
625 /* iterator interface                                                  */
626 /***********************************************************************/
627
628 static enum iam_it_state it_state(const struct iam_iterator *it)
629 {
630         return it->ii_state;
631 }
632
633 /*
634  * Helper function returning scratch key.
635  */
636 static struct iam_container *iam_it_container(const struct iam_iterator *it)
637 {
638         return it->ii_path.ip_container;
639 }
640
641 static inline int it_keycmp(const struct iam_iterator *it,
642                             const struct iam_key *k)
643 {
644         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
645 }
646
647 static inline int it_keyeq(const struct iam_iterator *it,
648                            const struct iam_key *k)
649 {
650         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
651 }
652
653 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
654 {
655         return iam_ikeycmp(it->ii_path.ip_container,
656                            iam_leaf_ikey(&it->ii_path.ip_leaf,
657                                          iam_path_ikey(&it->ii_path, 0)), ik);
658 }
659
660 static inline int it_at_rec(const struct iam_iterator *it)
661 {
662         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
663 }
664
665 static inline int it_before(const struct iam_iterator *it)
666 {
667         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
668 }
669
670 /*
671  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
672  * with exactly the same key as asked is found.
673  */
674 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
675 {
676         int result;
677
678         result = iam_it_get(it, k);
679         if (result > 0)
680                 result = 0;
681         else if (result == 0)
682                 /*
683                  * Return -ENOENT if cursor is located above record with a key
684                  * different from one specified, or in the empty leaf.
685                  *
686                  * XXX returning -ENOENT only works if iam_it_get() never
687                  * returns -ENOENT as a legitimate error.
688                  */
689                 result = -ENOENT;
690         return result;
691 }
692
693 void iam_container_write_lock(struct iam_container *ic)
694 {
695         down_write(&ic->ic_sem);
696 }
697
698 void iam_container_write_unlock(struct iam_container *ic)
699 {
700         up_write(&ic->ic_sem);
701 }
702
703 void iam_container_read_lock(struct iam_container *ic)
704 {
705         down_read(&ic->ic_sem);
706 }
707
708 void iam_container_read_unlock(struct iam_container *ic)
709 {
710         up_read(&ic->ic_sem);
711 }
712
713 /*
714  * Initialize iterator to IAM_IT_DETACHED state.
715  *
716  * postcondition: it_state(it) == IAM_IT_DETACHED
717  */
718 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
719                  struct iam_path_descr *pd)
720 {
721         memset(it, 0, sizeof *it);
722         it->ii_flags  = flags;
723         it->ii_state  = IAM_IT_DETACHED;
724         iam_path_init(&it->ii_path, c, pd);
725         return 0;
726 }
727 EXPORT_SYMBOL(iam_it_init);
728
729 /*
730  * Finalize iterator and release all resources.
731  *
732  * precondition: it_state(it) == IAM_IT_DETACHED
733  */
734 void iam_it_fini(struct iam_iterator *it)
735 {
736         assert_corr(it_state(it) == IAM_IT_DETACHED);
737         iam_path_fini(&it->ii_path);
738 }
739 EXPORT_SYMBOL(iam_it_fini);
740
741 /*
742  * this locking primitives are used to protect parts
743  * of dir's htree. protection unit is block: leaf or index
744  */
745 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
746                                              unsigned long value,
747                                              enum dynlock_type lt)
748 {
749         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
750 }
751
752 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
753 {
754         struct iam_frame *f;
755
756         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
757                 do_corr(schedule());
758                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
759                 if (*lh == NULL)
760                         return -ENOMEM;
761         }
762         return 0;
763 }
764
765 /*
766  * Fast check for frame consistency.
767  */
768 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
769 {
770         struct iam_container *bag;
771         struct iam_entry *next;
772         struct iam_entry *last;
773         struct iam_entry *entries;
774         struct iam_entry *at;
775
776         bag     = path->ip_container;
777         at      = frame->at;
778         entries = frame->entries;
779         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
780
781         if (unlikely(at > last))
782                 return -EAGAIN;
783
784         if (unlikely(dx_get_block(path, at) != frame->leaf))
785                 return -EAGAIN;
786
787         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
788                                  path->ip_ikey_target) > 0))
789                 return -EAGAIN;
790
791         next = iam_entry_shift(path, at, +1);
792         if (next <= last) {
793                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
794                                          path->ip_ikey_target) <= 0))
795                         return -EAGAIN;
796         }
797         return 0;
798 }
799
800 int dx_index_is_compat(struct iam_path *path)
801 {
802         return iam_path_descr(path) == NULL;
803 }
804
805 /*
806  * dx_find_position
807  *
808  * search position of specified hash in index
809  *
810  */
811
812 struct iam_entry *iam_find_position(struct iam_path *path,
813                                    struct iam_frame *frame)
814 {
815         int count;
816         struct iam_entry *p;
817         struct iam_entry *q;
818         struct iam_entry *m;
819
820         count = dx_get_count(frame->entries);
821         assert_corr(count && count <= dx_get_limit(frame->entries));
822         p = iam_entry_shift(path, frame->entries,
823                             dx_index_is_compat(path) ? 1 : 2);
824         q = iam_entry_shift(path, frame->entries, count - 1);
825         while (p <= q) {
826                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
827                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
828                                 path->ip_ikey_target) > 0)
829                         q = iam_entry_shift(path, m, -1);
830                 else
831                         p = iam_entry_shift(path, m, +1);
832         }
833         return iam_entry_shift(path, p, -1);
834 }
835
836
837
838 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
839 {
840         return dx_get_block(path, iam_find_position(path, frame));
841 }
842
843 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
844                     const struct iam_ikey *key, iam_ptr_t ptr)
845 {
846         struct iam_entry *entries = frame->entries;
847         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
848         int count = dx_get_count(entries);
849
850         /*
851          * Unfortunately we cannot assert this, as this function is sometimes
852          * called by VFS under i_sem and without pdirops lock.
853          */
854         assert_corr(1 || iam_frame_is_locked(path, frame));
855         assert_corr(count < dx_get_limit(entries));
856         assert_corr(frame->at < iam_entry_shift(path, entries, count));
857         assert_inv(dx_node_check(path, frame));
858
859         memmove(iam_entry_shift(path, new, 1), new,
860                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
861         dx_set_ikey(path, new, key);
862         dx_set_block(path, new, ptr);
863         dx_set_count(entries, count + 1);
864         assert_inv(dx_node_check(path, frame));
865 }
866
867 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
868                          const struct iam_ikey *key, iam_ptr_t ptr)
869 {
870         iam_lock_bh(frame->bh);
871         iam_insert_key(path, frame, key, ptr);
872         iam_unlock_bh(frame->bh);
873 }
874 /*
875  * returns 0 if path was unchanged, -EAGAIN otherwise.
876  */
877 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
878 {
879         int equal;
880
881         iam_lock_bh(frame->bh);
882         equal = iam_check_fast(path, frame) == 0 ||
883                 frame->leaf == iam_find_ptr(path, frame);
884         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
885         iam_unlock_bh(frame->bh);
886
887         return equal ? 0 : -EAGAIN;
888 }
889
890 static int iam_lookup_try(struct iam_path *path)
891 {
892         u32 ptr;
893         int err = 0;
894         int i;
895
896         struct iam_descr *param;
897         struct iam_frame *frame;
898         struct iam_container *c;
899
900         param = iam_path_descr(path);
901         c = path->ip_container;
902
903         ptr = param->id_ops->id_root_ptr(c);
904         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
905              ++frame, ++i) {
906                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
907                                                   &frame->bh);
908                 do_corr(schedule());
909
910                 iam_lock_bh(frame->bh);
911                 /*
912                  * node must be initialized under bh lock because concurrent
913                  * creation procedure may change it and iam_lookup_try() will
914                  * see obsolete tree height. -bzzz
915                  */
916                 if (err != 0)
917                         break;
918
919                 if (LDISKFS_INVARIANT_ON) {
920                         err = param->id_ops->id_node_check(path, frame);
921                         if (err != 0)
922                                 break;
923                 }
924
925                 err = param->id_ops->id_node_load(path, frame);
926                 if (err != 0)
927                         break;
928
929                 assert_inv(dx_node_check(path, frame));
930                 /*
931                  * splitting may change root index block and move hash we're
932                  * looking for into another index block so, we have to check
933                  * this situation and repeat from begining if path got changed
934                  * -bzzz
935                  */
936                 if (i > 0) {
937                         err = iam_check_path(path, frame - 1);
938                         if (err != 0)
939                                 break;
940                 }
941
942                 frame->at = iam_find_position(path, frame);
943                 frame->curidx = ptr;
944                 frame->leaf = ptr = dx_get_block(path, frame->at);
945
946                 iam_unlock_bh(frame->bh);
947                 do_corr(schedule());
948         }
949         if (err != 0)
950                 iam_unlock_bh(frame->bh);
951         path->ip_frame = --frame;
952         return err;
953 }
954
955 static int __iam_path_lookup(struct iam_path *path)
956 {
957         int err;
958         int i;
959
960         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
961                 assert(path->ip_frames[i].bh == NULL);
962
963         do {
964                 err = iam_lookup_try(path);
965                 do_corr(schedule());
966                 if (err != 0)
967                         iam_path_fini(path);
968         } while (err == -EAGAIN);
969
970         return err;
971 }
972
973 /*
974  * returns 0 if path was unchanged, -EAGAIN otherwise.
975  */
976 static int iam_check_full_path(struct iam_path *path, int search)
977 {
978         struct iam_frame *bottom;
979         struct iam_frame *scan;
980         int i;
981         int result;
982
983         do_corr(schedule());
984
985         for (bottom = path->ip_frames, i = 0;
986              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
987                 ; /* find last filled in frame */
988         }
989
990         /*
991          * Lock frames, bottom to top.
992          */
993         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
994                 iam_lock_bh(scan->bh);
995         /*
996          * Check them top to bottom.
997          */
998         result = 0;
999         for (scan = path->ip_frames; scan < bottom; ++scan) {
1000                 struct iam_entry *pos;
1001
1002                 if (search) {
1003                         if (iam_check_fast(path, scan) == 0)
1004                                 continue;
1005
1006                         pos = iam_find_position(path, scan);
1007                         if (scan->leaf != dx_get_block(path, pos)) {
1008                                 result = -EAGAIN;
1009                                 break;
1010                         }
1011                         scan->at = pos;
1012                 } else {
1013                         pos = iam_entry_shift(path, scan->entries,
1014                                               dx_get_count(scan->entries) - 1);
1015                         if (scan->at > pos ||
1016                             scan->leaf != dx_get_block(path, scan->at)) {
1017                                 result = -EAGAIN;
1018                                 break;
1019                         }
1020                 }
1021         }
1022
1023         /*
1024          * Unlock top to bottom.
1025          */
1026         for (scan = path->ip_frames; scan < bottom; ++scan)
1027                 iam_unlock_bh(scan->bh);
1028         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1029         do_corr(schedule());
1030
1031         return result;
1032 }
1033
1034
1035 /*
1036  * Performs path lookup and returns with found leaf (if any) locked by htree
1037  * lock.
1038  */
1039 int iam_lookup_lock(struct iam_path *path,
1040                    struct dynlock_handle **dl, enum dynlock_type lt)
1041 {
1042         int result;
1043         struct inode *dir;
1044
1045         dir = iam_path_obj(path);
1046         while ((result = __iam_path_lookup(path)) == 0) {
1047                 do_corr(schedule());
1048                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1049                                      lt);
1050                 if (*dl == NULL) {
1051                         iam_path_fini(path);
1052                         result = -ENOMEM;
1053                         break;
1054                 }
1055                 do_corr(schedule());
1056                 /*
1057                  * while locking leaf we just found may get split so we need
1058                  * to check this -bzzz
1059                  */
1060                 if (iam_check_full_path(path, 1) == 0)
1061                         break;
1062                 iam_unlock_htree(path->ip_container, *dl);
1063                 *dl = NULL;
1064                 iam_path_fini(path);
1065         }
1066         return result;
1067 }
1068 /*
1069  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1070  * node.
1071  */
1072 static int iam_path_lookup(struct iam_path *path, int index)
1073 {
1074         struct iam_container *c;
1075         struct iam_descr *descr;
1076         struct iam_leaf  *leaf;
1077         int result;
1078
1079         c = path->ip_container;
1080         leaf = &path->ip_leaf;
1081         descr = iam_path_descr(path);
1082         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1083         assert_inv(iam_path_check(path));
1084         do_corr(schedule());
1085         if (result == 0) {
1086                 result = iam_leaf_load(path);
1087                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1088                 if (result == 0) {
1089                         do_corr(schedule());
1090                         if (index)
1091                                 result = iam_leaf_ops(leaf)->
1092                                         ilookup(leaf, path->ip_ikey_target);
1093                         else
1094                                 result = iam_leaf_ops(leaf)->
1095                                         lookup(leaf, path->ip_key_target);
1096                         do_corr(schedule());
1097                 }
1098                 if (result < 0)
1099                         iam_leaf_unlock(leaf);
1100         }
1101         return result;
1102 }
1103
1104 /*
1105  * Common part of iam_it_{i,}get().
1106  */
1107 static int __iam_it_get(struct iam_iterator *it, int index)
1108 {
1109         int result;
1110         assert_corr(it_state(it) == IAM_IT_DETACHED);
1111
1112         result = iam_path_lookup(&it->ii_path, index);
1113         if (result >= 0) {
1114                 int collision;
1115
1116                 collision = result & IAM_LOOKUP_LAST;
1117                 switch (result & ~IAM_LOOKUP_LAST) {
1118                 case IAM_LOOKUP_EXACT:
1119                         result = +1;
1120                         it->ii_state = IAM_IT_ATTACHED;
1121                         break;
1122                 case IAM_LOOKUP_OK:
1123                         result = 0;
1124                         it->ii_state = IAM_IT_ATTACHED;
1125                         break;
1126                 case IAM_LOOKUP_BEFORE:
1127                 case IAM_LOOKUP_EMPTY:
1128                         result = 0;
1129                         it->ii_state = IAM_IT_SKEWED;
1130                         break;
1131                 default:
1132                         assert(0);
1133                 }
1134                 result |= collision;
1135         }
1136         /*
1137          * See iam_it_get_exact() for explanation.
1138          */
1139         assert_corr(result != -ENOENT);
1140         return result;
1141 }
1142
1143 /*
1144  * Correct hash, but not the same key was found, iterate through hash
1145  * collision chain, looking for correct record.
1146  */
1147 static int iam_it_collision(struct iam_iterator *it)
1148 {
1149         int result;
1150
1151         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1152
1153         while ((result = iam_it_next(it)) == 0) {
1154                 do_corr(schedule());
1155                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1156                         return -ENOENT;
1157                 if (it_keyeq(it, it->ii_path.ip_key_target))
1158                         return 0;
1159         }
1160         return result;
1161 }
1162
1163 /*
1164  * Attach iterator. After successful completion, @it points to record with
1165  * least key not larger than @k.
1166  *
1167  * Return value: 0: positioned on existing record,
1168  *             +ve: exact position found,
1169  *             -ve: error.
1170  *
1171  * precondition:  it_state(it) == IAM_IT_DETACHED
1172  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1173  *                     it_keycmp(it, k) <= 0)
1174  */
1175 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1176 {
1177         int result;
1178         assert_corr(it_state(it) == IAM_IT_DETACHED);
1179
1180         it->ii_path.ip_ikey_target = NULL;
1181         it->ii_path.ip_key_target  = k;
1182
1183         result = __iam_it_get(it, 0);
1184
1185         if (result == IAM_LOOKUP_LAST) {
1186                 result = iam_it_collision(it);
1187                 if (result != 0) {
1188                         iam_it_put(it);
1189                         iam_it_fini(it);
1190                         result = __iam_it_get(it, 0);
1191                 } else
1192                         result = +1;
1193         }
1194         if (result > 0)
1195                 result &= ~IAM_LOOKUP_LAST;
1196
1197         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1198         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1199                          it_keycmp(it, k) <= 0));
1200         return result;
1201 }
1202 EXPORT_SYMBOL(iam_it_get);
1203
1204 /*
1205  * Attach iterator by index key.
1206  */
1207 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1208 {
1209         assert_corr(it_state(it) == IAM_IT_DETACHED);
1210
1211         it->ii_path.ip_ikey_target = k;
1212         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1213 }
1214
1215 /*
1216  * Attach iterator, and assure it points to the record (not skewed).
1217  *
1218  * Return value: 0: positioned on existing record,
1219  *             +ve: exact position found,
1220  *             -ve: error.
1221  *
1222  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1223  *                !(it->ii_flags&IAM_IT_WRITE)
1224  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1225  */
1226 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1227 {
1228         int result;
1229         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1230                     !(it->ii_flags&IAM_IT_WRITE));
1231         result = iam_it_get(it, k);
1232         if (result == 0) {
1233                 if (it_state(it) != IAM_IT_ATTACHED) {
1234                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1235                         result = iam_it_next(it);
1236                 }
1237         }
1238         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1239         return result;
1240 }
1241 EXPORT_SYMBOL(iam_it_get_at);
1242
1243 /*
1244  * Duplicates iterator.
1245  *
1246  * postcondition: it_state(dst) == it_state(src) &&
1247  *                iam_it_container(dst) == iam_it_container(src) &&
1248  *                dst->ii_flags = src->ii_flags &&
1249  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1250  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1251  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1252  */
1253 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1254 {
1255         dst->ii_flags     = src->ii_flags;
1256         dst->ii_state     = src->ii_state;
1257         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1258         /*
1259          * XXX: duplicate lock.
1260          */
1261         assert_corr(it_state(dst) == it_state(src));
1262         assert_corr(iam_it_container(dst) == iam_it_container(src));
1263         assert_corr(dst->ii_flags = src->ii_flags);
1264         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1265                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1266                     iam_it_key_get(dst) == iam_it_key_get(src)));
1267
1268 }
1269
1270 /*
1271  * Detach iterator. Does nothing it detached state.
1272  *
1273  * postcondition: it_state(it) == IAM_IT_DETACHED
1274  */
1275 void iam_it_put(struct iam_iterator *it)
1276 {
1277         if (it->ii_state != IAM_IT_DETACHED) {
1278                 it->ii_state = IAM_IT_DETACHED;
1279                 iam_leaf_fini(&it->ii_path.ip_leaf);
1280         }
1281 }
1282 EXPORT_SYMBOL(iam_it_put);
1283
1284 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1285                                         struct iam_ikey *ikey);
1286
1287
1288 /*
1289  * This function increments the frame pointer to search the next leaf
1290  * block, and reads in the necessary intervening nodes if the search
1291  * should be necessary.  Whether or not the search is necessary is
1292  * controlled by the hash parameter.  If the hash value is even, then
1293  * the search is only continued if the next block starts with that
1294  * hash value.  This is used if we are searching for a specific file.
1295  *
1296  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1297  *
1298  * This function returns 1 if the caller should continue to search,
1299  * or 0 if it should not.  If there is an error reading one of the
1300  * index blocks, it will a negative error code.
1301  *
1302  * If start_hash is non-null, it will be filled in with the starting
1303  * hash of the next page.
1304  */
1305 static int iam_htree_advance(struct inode *dir, __u32 hash,
1306                               struct iam_path *path, __u32 *start_hash,
1307                               int compat)
1308 {
1309         struct iam_frame *p;
1310         struct buffer_head *bh;
1311         int err, num_frames = 0;
1312         __u32 bhash;
1313
1314         p = path->ip_frame;
1315         /*
1316          * Find the next leaf page by incrementing the frame pointer.
1317          * If we run out of entries in the interior node, loop around and
1318          * increment pointer in the parent node.  When we break out of
1319          * this loop, num_frames indicates the number of interior
1320          * nodes need to be read.
1321          */
1322         while (1) {
1323                 do_corr(schedule());
1324                 iam_lock_bh(p->bh);
1325                 if (p->at_shifted)
1326                         p->at_shifted = 0;
1327                 else
1328                         p->at = iam_entry_shift(path, p->at, +1);
1329                 if (p->at < iam_entry_shift(path, p->entries,
1330                                             dx_get_count(p->entries))) {
1331                         p->leaf = dx_get_block(path, p->at);
1332                         iam_unlock_bh(p->bh);
1333                         break;
1334                 }
1335                 iam_unlock_bh(p->bh);
1336                 if (p == path->ip_frames)
1337                         return 0;
1338                 num_frames++;
1339                 --p;
1340         }
1341
1342         if (compat) {
1343                 /*
1344                  * Htree hash magic.
1345                  */
1346         /*
1347          * If the hash is 1, then continue only if the next page has a
1348          * continuation hash of any value.  This is used for readdir
1349          * handling.  Otherwise, check to see if the hash matches the
1350          * desired contiuation hash.  If it doesn't, return since
1351          * there's no point to read in the successive index pages.
1352          */
1353                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1354         if (start_hash)
1355                 *start_hash = bhash;
1356         if ((hash & 1) == 0) {
1357                 if ((bhash & ~1) != hash)
1358                         return 0;
1359         }
1360         }
1361         /*
1362          * If the hash is HASH_NB_ALWAYS, we always go to the next
1363          * block so no check is necessary
1364          */
1365         while (num_frames--) {
1366                 iam_ptr_t idx;
1367
1368                 do_corr(schedule());
1369                 iam_lock_bh(p->bh);
1370                 idx = p->leaf = dx_get_block(path, p->at);
1371                 iam_unlock_bh(p->bh);
1372                 err = iam_path_descr(path)->id_ops->
1373                         id_node_read(path->ip_container, idx, NULL, &bh);
1374                 if (err != 0)
1375                         return err; /* Failure */
1376                 ++p;
1377                 brelse(p->bh);
1378                 assert_corr(p->bh != bh);
1379                 p->bh = bh;
1380                 p->entries = dx_node_get_entries(path, p);
1381                 p->at = iam_entry_shift(path, p->entries, !compat);
1382                 assert_corr(p->curidx != idx);
1383                 p->curidx = idx;
1384                 iam_lock_bh(p->bh);
1385                 assert_corr(p->leaf != dx_get_block(path, p->at));
1386                 p->leaf = dx_get_block(path, p->at);
1387                 iam_unlock_bh(p->bh);
1388                 assert_inv(dx_node_check(path, p));
1389         }
1390         return 1;
1391 }
1392
1393
1394 static inline int iam_index_advance(struct iam_path *path)
1395 {
1396         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1397 }
1398
1399 static void iam_unlock_array(struct iam_container *ic,
1400                              struct dynlock_handle **lh)
1401 {
1402         int i;
1403
1404         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1405                 if (*lh != NULL) {
1406                         iam_unlock_htree(ic, *lh);
1407                         *lh = NULL;
1408                 }
1409         }
1410 }
1411 /*
1412  * Advance index part of @path to point to the next leaf. Returns 1 on
1413  * success, 0, when end of container was reached. Leaf node is locked.
1414  */
1415 int iam_index_next(struct iam_container *c, struct iam_path *path)
1416 {
1417         iam_ptr_t cursor;
1418         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1419         int result;
1420         struct inode *object;
1421
1422         /*
1423          * Locking for iam_index_next()... is to be described.
1424          */
1425
1426         object = c->ic_object;
1427         cursor = path->ip_frame->leaf;
1428
1429         while (1) {
1430                 result = iam_index_lock(path, lh);
1431                 do_corr(schedule());
1432                 if (result < 0)
1433                         break;
1434
1435                 result = iam_check_full_path(path, 0);
1436                 if (result == 0 && cursor == path->ip_frame->leaf) {
1437                         result = iam_index_advance(path);
1438
1439                         assert_corr(result == 0 ||
1440                                     cursor != path->ip_frame->leaf);
1441                         break;
1442                 }
1443                 do {
1444                         iam_unlock_array(c, lh);
1445
1446                         iam_path_release(path);
1447                         do_corr(schedule());
1448
1449                         result = __iam_path_lookup(path);
1450                         if (result < 0)
1451                                 break;
1452
1453                         while (path->ip_frame->leaf != cursor) {
1454                                 do_corr(schedule());
1455
1456                                 result = iam_index_lock(path, lh);
1457                                 do_corr(schedule());
1458                                 if (result < 0)
1459                                         break;
1460
1461                                 result = iam_check_full_path(path, 0);
1462                                 if (result != 0)
1463                                         break;
1464
1465                                 result = iam_index_advance(path);
1466                                 if (result == 0) {
1467                                         CERROR("cannot find cursor : %u\n",
1468                                                 cursor);
1469                                         result = -EIO;
1470                                 }
1471                                 if (result < 0)
1472                                         break;
1473                                 result = iam_check_full_path(path, 0);
1474                                 if (result != 0)
1475                                         break;
1476                                 iam_unlock_array(c, lh);
1477                         }
1478                 } while (result == -EAGAIN);
1479                 if (result < 0)
1480                         break;
1481         }
1482         iam_unlock_array(c, lh);
1483         return result;
1484 }
1485
1486 /*
1487  * Move iterator one record right.
1488  *
1489  * Return value: 0: success,
1490  *              +1: end of container reached
1491  *             -ve: error
1492  *
1493  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1494  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1495  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1496  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1497  */
1498 int iam_it_next(struct iam_iterator *it)
1499 {
1500         int result;
1501         struct iam_path      *path;
1502         struct iam_leaf      *leaf;
1503         struct inode         *obj;
1504         do_corr(struct iam_ikey *ik_orig);
1505
1506         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1507         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1508                     it_state(it) == IAM_IT_SKEWED);
1509
1510         path = &it->ii_path;
1511         leaf = &path->ip_leaf;
1512         obj  = iam_path_obj(path);
1513
1514         assert_corr(iam_leaf_is_locked(leaf));
1515
1516         result = 0;
1517         do_corr(ik_orig = it_at_rec(it) ?
1518                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1519         if (it_before(it)) {
1520                 assert_corr(!iam_leaf_at_end(leaf));
1521                 it->ii_state = IAM_IT_ATTACHED;
1522         } else {
1523                 if (!iam_leaf_at_end(leaf))
1524                         /* advance within leaf node */
1525                         iam_leaf_next(leaf);
1526                 /*
1527                  * multiple iterations may be necessary due to empty leaves.
1528                  */
1529                 while (result == 0 && iam_leaf_at_end(leaf)) {
1530                         do_corr(schedule());
1531                         /* advance index portion of the path */
1532                         result = iam_index_next(iam_it_container(it), path);
1533                         assert_corr(iam_leaf_is_locked(leaf));
1534                         if (result == 1) {
1535                                 struct dynlock_handle *lh;
1536                                 lh = iam_lock_htree(iam_it_container(it),
1537                                                     path->ip_frame->leaf,
1538                                                     DLT_WRITE);
1539                                 if (lh != NULL) {
1540                                         iam_leaf_fini(leaf);
1541                                         leaf->il_lock = lh;
1542                                         result = iam_leaf_load(path);
1543                                         if (result == 0)
1544                                                 iam_leaf_start(leaf);
1545                                 } else
1546                                         result = -ENOMEM;
1547                         } else if (result == 0)
1548                                 /* end of container reached */
1549                                 result = +1;
1550                         if (result != 0)
1551                                 iam_it_put(it);
1552                 }
1553                 if (result == 0)
1554                         it->ii_state = IAM_IT_ATTACHED;
1555         }
1556         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1557         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1558         assert_corr(ergo(result == 0 && ik_orig != NULL,
1559                          it_ikeycmp(it, ik_orig) >= 0));
1560         return result;
1561 }
1562 EXPORT_SYMBOL(iam_it_next);
1563
1564 /*
1565  * Return pointer to the record under iterator.
1566  *
1567  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1568  * postcondition: it_state(it) == IAM_IT_ATTACHED
1569  */
1570 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1571 {
1572         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1573         assert_corr(it_at_rec(it));
1574         return iam_leaf_rec(&it->ii_path.ip_leaf);
1575 }
1576 EXPORT_SYMBOL(iam_it_rec_get);
1577
1578 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1579 {
1580         struct iam_leaf *folio;
1581
1582         folio = &it->ii_path.ip_leaf;
1583         iam_leaf_ops(folio)->rec_set(folio, r);
1584 }
1585
1586 /*
1587  * Replace contents of record under iterator.
1588  *
1589  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1590  *                it->ii_flags&IAM_IT_WRITE
1591  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1592  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1593  */
1594 int iam_it_rec_set(handle_t *h,
1595                    struct iam_iterator *it, const struct iam_rec *r)
1596 {
1597         int result;
1598         struct iam_path *path;
1599         struct buffer_head *bh;
1600
1601         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1602                     it->ii_flags&IAM_IT_WRITE);
1603         assert_corr(it_at_rec(it));
1604
1605         path = &it->ii_path;
1606         bh   = path->ip_leaf.il_bh;
1607         result = iam_txn_add(h, path, bh);
1608         if (result == 0) {
1609                 iam_it_reccpy(it, r);
1610                 result = iam_txn_dirty(h, path, bh);
1611         }
1612         return result;
1613 }
1614 EXPORT_SYMBOL(iam_it_rec_set);
1615
1616 /*
1617  * Return pointer to the index key under iterator.
1618  *
1619  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1620  *                it_state(it) == IAM_IT_SKEWED
1621  */
1622 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1623                                         struct iam_ikey *ikey)
1624 {
1625         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1626                     it_state(it) == IAM_IT_SKEWED);
1627         assert_corr(it_at_rec(it));
1628         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1629 }
1630
1631 /*
1632  * Return pointer to the key under iterator.
1633  *
1634  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1635  *                it_state(it) == IAM_IT_SKEWED
1636  */
1637 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1638 {
1639         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1640                     it_state(it) == IAM_IT_SKEWED);
1641         assert_corr(it_at_rec(it));
1642         return iam_leaf_key(&it->ii_path.ip_leaf);
1643 }
1644 EXPORT_SYMBOL(iam_it_key_get);
1645
1646 /*
1647  * Return size of key under iterator (in bytes)
1648  *
1649  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1650  *                it_state(it) == IAM_IT_SKEWED
1651  */
1652 int iam_it_key_size(const struct iam_iterator *it)
1653 {
1654         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1655                     it_state(it) == IAM_IT_SKEWED);
1656         assert_corr(it_at_rec(it));
1657         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1658 }
1659 EXPORT_SYMBOL(iam_it_key_size);
1660
1661 struct buffer_head *
1662 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1663 {
1664         struct inode *inode = c->ic_object;
1665         struct buffer_head *bh = NULL;
1666         struct iam_idle_head *head;
1667         struct buffer_head *idle;
1668         __u32 *idle_blocks;
1669         __u16 count;
1670
1671         if (c->ic_idle_bh == NULL)
1672                 goto newblock;
1673
1674         down(&c->ic_idle_sem);
1675         if (unlikely(c->ic_idle_bh == NULL)) {
1676                 up(&c->ic_idle_sem);
1677                 goto newblock;
1678         }
1679
1680         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1681         count = le16_to_cpu(head->iih_count);
1682         if (count > 0) {
1683                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1684                 if (*e != 0)
1685                         goto fail;
1686
1687                 --count;
1688                 *b = le32_to_cpu(head->iih_blks[count]);
1689                 head->iih_count = cpu_to_le16(count);
1690                 *e = ldiskfs_journal_dirty_metadata(h, c->ic_idle_bh);
1691                 if (*e != 0)
1692                         goto fail;
1693
1694                 up(&c->ic_idle_sem);
1695                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1696                 if (bh == NULL)
1697                         return NULL;
1698                 goto got;
1699         }
1700
1701         /* The block itself which contains the iam_idle_head is
1702          * also an idle block, and can be used as the new node. */
1703         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1704                                 c->ic_descr->id_root_gap +
1705                                 sizeof(struct dx_countlimit));
1706         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1707         if (*e != 0)
1708                 goto fail;
1709
1710         *b = le32_to_cpu(*idle_blocks);
1711         iam_lock_bh(c->ic_root_bh);
1712         *idle_blocks = head->iih_next;
1713         iam_unlock_bh(c->ic_root_bh);
1714         *e = ldiskfs_journal_dirty_metadata(h, c->ic_root_bh);
1715         if (*e != 0) {
1716                 iam_lock_bh(c->ic_root_bh);
1717                 *idle_blocks = cpu_to_le32(*b);
1718                 iam_unlock_bh(c->ic_root_bh);
1719                 goto fail;
1720         }
1721
1722         bh = c->ic_idle_bh;
1723         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1724         if (idle != NULL && IS_ERR(idle)) {
1725                 *e = PTR_ERR(idle);
1726                 c->ic_idle_bh = NULL;
1727                 brelse(bh);
1728                 goto fail;
1729         }
1730
1731         c->ic_idle_bh = idle;
1732         up(&c->ic_idle_sem);
1733
1734 got:
1735         /* get write access for the found buffer head */
1736         *e = ldiskfs_journal_get_write_access(h, bh);
1737         if (*e != 0) {
1738                 brelse(bh);
1739                 bh = NULL;
1740                 ldiskfs_std_error(inode->i_sb, *e);
1741         } else {
1742                 /* Clear the reused node as new node does. */
1743                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1744                 set_buffer_uptodate(bh);
1745         }
1746         return bh;
1747
1748 newblock:
1749         bh = ldiskfs_append(h, inode, b, e);
1750         return bh;
1751
1752 fail:
1753         up(&c->ic_idle_sem);
1754         ldiskfs_std_error(inode->i_sb, *e);
1755         return NULL;
1756 }
1757
1758 /*
1759  * Insertion of new record. Interaction with jbd during non-trivial case (when
1760  * split happens) is as following:
1761  *
1762  *  - new leaf node is involved into transaction by iam_new_node();
1763  *
1764  *  - old leaf node is involved into transaction by iam_add_rec();
1765  *
1766  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1767  *
1768  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1769  *  iam_new_leaf();
1770  *
1771  *  - split index nodes are involved into transaction and marked dirty by
1772  *  split_index_node().
1773  *
1774  *  - "safe" index node, which is no split, but where new pointer is inserted
1775  *  is involved into transaction and marked dirty by split_index_node().
1776  *
1777  *  - index node where pointer to new leaf is inserted is involved into
1778  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1779  *
1780  *  - inode is marked dirty by iam_add_rec().
1781  *
1782  */
1783
1784 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1785 {
1786         int err;
1787         iam_ptr_t blknr;
1788         struct buffer_head   *new_leaf;
1789         struct buffer_head   *old_leaf;
1790         struct iam_container *c;
1791         struct inode         *obj;
1792         struct iam_path      *path;
1793
1794         assert_inv(iam_leaf_check(leaf));
1795
1796         c = iam_leaf_container(leaf);
1797         path = leaf->il_path;
1798
1799         obj = c->ic_object;
1800         new_leaf = iam_new_node(handle, c, &blknr, &err);
1801         do_corr(schedule());
1802         if (new_leaf != NULL) {
1803                 struct dynlock_handle *lh;
1804
1805                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1806                 do_corr(schedule());
1807                 if (lh != NULL) {
1808                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1809                         do_corr(schedule());
1810                         old_leaf = leaf->il_bh;
1811                         iam_leaf_split(leaf, &new_leaf, blknr);
1812                         if (old_leaf != leaf->il_bh) {
1813                                 /*
1814                                  * Switched to the new leaf.
1815                                  */
1816                                 iam_leaf_unlock(leaf);
1817                                 leaf->il_lock = lh;
1818                                 path->ip_frame->leaf = blknr;
1819                         } else
1820                                 iam_unlock_htree(path->ip_container, lh);
1821                         do_corr(schedule());
1822                         err = iam_txn_dirty(handle, path, new_leaf);
1823                         brelse(new_leaf);
1824                         if (err == 0)
1825                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1826                         do_corr(schedule());
1827                 } else
1828                         err = -ENOMEM;
1829         }
1830         assert_inv(iam_leaf_check(leaf));
1831         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1832         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1833         return err;
1834 }
1835
1836 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1837 {
1838         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1839 }
1840
1841 static int iam_shift_entries(struct iam_path *path,
1842                          struct iam_frame *frame, unsigned count,
1843                          struct iam_entry *entries, struct iam_entry *entries2,
1844                          u32 newblock)
1845 {
1846         unsigned count1;
1847         unsigned count2;
1848         int delta;
1849
1850         struct iam_frame *parent = frame - 1;
1851         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1852
1853         delta = dx_index_is_compat(path) ? 0 : +1;
1854
1855         count1 = count/2 + delta;
1856         count2 = count - count1;
1857         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1858
1859         dxtrace(printk("Split index %d/%d\n", count1, count2));
1860
1861         memcpy((char *) iam_entry_shift(path, entries2, delta),
1862                (char *) iam_entry_shift(path, entries, count1),
1863                count2 * iam_entry_size(path));
1864
1865         dx_set_count(entries2, count2 + delta);
1866         dx_set_limit(entries2, dx_node_limit(path));
1867
1868         /*
1869          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1870          * level index in root index, then we insert new index here and set
1871          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1872          * index w/o hash it looks for. the solution is to check root index
1873          * after we locked just founded 2nd level index -bzzz
1874          */
1875         iam_insert_key_lock(path, parent, pivot, newblock);
1876
1877         /*
1878          * now old and new 2nd level index blocks contain all pointers, so
1879          * dx_probe() may find it in the both.  it's OK -bzzz
1880          */
1881         iam_lock_bh(frame->bh);
1882         dx_set_count(entries, count1);
1883         iam_unlock_bh(frame->bh);
1884
1885         /*
1886          * now old 2nd level index block points to first half of leafs. it's
1887          * importand that dx_probe() must check root index block for changes
1888          * under dx_lock_bh(frame->bh) -bzzz
1889          */
1890
1891         return count1;
1892 }
1893
1894
1895 int split_index_node(handle_t *handle, struct iam_path *path,
1896                      struct dynlock_handle **lh)
1897 {
1898
1899         struct iam_entry *entries;   /* old block contents */
1900         struct iam_entry *entries2;  /* new block contents */
1901          struct iam_frame *frame, *safe;
1902         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1903         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1904         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1905         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1906         struct inode *dir = iam_path_obj(path);
1907         struct iam_descr *descr;
1908         int nr_splet;
1909         int i, err;
1910
1911         descr = iam_path_descr(path);
1912         /*
1913          * Algorithm below depends on this.
1914          */
1915         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1916
1917         frame = path->ip_frame;
1918         entries = frame->entries;
1919
1920         /*
1921          * Tall-tree handling: we might have to split multiple index blocks
1922          * all the way up to tree root. Tricky point here is error handling:
1923          * to avoid complicated undo/rollback we
1924          *
1925          *   - first allocate all necessary blocks
1926          *
1927          *   - insert pointers into them atomically.
1928          */
1929
1930         /*
1931          * Locking: leaf is already locked. htree-locks are acquired on all
1932          * index nodes that require split bottom-to-top, on the "safe" node,
1933          * and on all new nodes
1934          */
1935
1936         dxtrace(printk("using %u of %u node entries\n",
1937                        dx_get_count(entries), dx_get_limit(entries)));
1938
1939         /* What levels need split? */
1940         for (nr_splet = 0; frame >= path->ip_frames &&
1941              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1942              --frame, ++nr_splet) {
1943                 do_corr(schedule());
1944                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1945                         /*
1946                         CWARN(dir->i_sb, __FUNCTION__,
1947                                      "Directory index full!\n");
1948                                      */
1949                         err = -ENOSPC;
1950                         goto cleanup;
1951                 }
1952         }
1953
1954         safe = frame;
1955
1956         /*
1957          * Lock all nodes, bottom to top.
1958          */
1959         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1960                 do_corr(schedule());
1961                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1962                                          DLT_WRITE);
1963                 if (lock[i] == NULL) {
1964                         err = -ENOMEM;
1965                         goto cleanup;
1966                 }
1967         }
1968
1969         /*
1970          * Check for concurrent index modification.
1971          */
1972         err = iam_check_full_path(path, 1);
1973         if (err)
1974                 goto cleanup;
1975         /*
1976          * And check that the same number of nodes is to be split.
1977          */
1978         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1979              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1980              --frame, ++i) {
1981                 ;
1982         }
1983         if (i != nr_splet) {
1984                 err = -EAGAIN;
1985                 goto cleanup;
1986         }
1987
1988         /* Go back down, allocating blocks, locking them, and adding into
1989          * transaction... */
1990         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1991                 bh_new[i] = iam_new_node(handle, path->ip_container,
1992                                          &newblock[i], &err);
1993                 do_corr(schedule());
1994                 if (!bh_new[i] ||
1995                     descr->id_ops->id_node_init(path->ip_container,
1996                                                 bh_new[i], 0) != 0)
1997                         goto cleanup;
1998                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1999                                              DLT_WRITE);
2000                 if (new_lock[i] == NULL) {
2001                         err = -ENOMEM;
2002                         goto cleanup;
2003                 }
2004                 do_corr(schedule());
2005                 BUFFER_TRACE(frame->bh, "get_write_access");
2006                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
2007                 if (err)
2008                         goto journal_error;
2009         }
2010         /* Add "safe" node to transaction too */
2011         if (safe + 1 != path->ip_frames) {
2012                 do_corr(schedule());
2013                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
2014                 if (err)
2015                         goto journal_error;
2016         }
2017
2018         /* Go through nodes once more, inserting pointers */
2019         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
2020                 unsigned count;
2021                 int idx;
2022                 struct buffer_head *bh2;
2023                 struct buffer_head *bh;
2024
2025                 entries = frame->entries;
2026                 count = dx_get_count(entries);
2027                 idx = iam_entry_diff(path, frame->at, entries);
2028
2029                 bh2 = bh_new[i];
2030                 entries2 = dx_get_entries(path, bh2->b_data, 0);
2031
2032                 bh = frame->bh;
2033                 if (frame == path->ip_frames) {
2034                         /* splitting root node. Tricky point:
2035                          *
2036                          * In the "normal" B-tree we'd split root *and* add
2037                          * new root to the tree with pointers to the old root
2038                          * and its sibling (thus introducing two new nodes).
2039                          *
2040                          * In htree it's enough to add one node, because
2041                          * capacity of the root node is smaller than that of
2042                          * non-root one.
2043                          */
2044                         struct iam_frame *frames;
2045                         struct iam_entry *next;
2046
2047                         assert_corr(i == 0);
2048
2049                         do_corr(schedule());
2050
2051                         frames = path->ip_frames;
2052                         memcpy((char *) entries2, (char *) entries,
2053                                count * iam_entry_size(path));
2054                         dx_set_limit(entries2, dx_node_limit(path));
2055
2056                         /* Set up root */
2057                           iam_lock_bh(frame->bh);
2058                         next = descr->id_ops->id_root_inc(path->ip_container,
2059                                                           path, frame);
2060                         dx_set_block(path, next, newblock[0]);
2061                           iam_unlock_bh(frame->bh);
2062
2063                         do_corr(schedule());
2064                         /* Shift frames in the path */
2065                         memmove(frames + 2, frames + 1,
2066                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2067                         /* Add new access path frame */
2068                         frames[1].at = iam_entry_shift(path, entries2, idx);
2069                         frames[1].entries = entries = entries2;
2070                         frames[1].bh = bh2;
2071                         assert_inv(dx_node_check(path, frame));
2072                         ++ path->ip_frame;
2073                         ++ frame;
2074                         assert_inv(dx_node_check(path, frame));
2075                         bh_new[0] = NULL; /* buffer head is "consumed" */
2076                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
2077                         if (err)
2078                                 goto journal_error;
2079                         do_corr(schedule());
2080                 } else {
2081                         /* splitting non-root index node. */
2082                         struct iam_frame *parent = frame - 1;
2083
2084                         do_corr(schedule());
2085                         count = iam_shift_entries(path, frame, count,
2086                                               entries, entries2, newblock[i]);
2087                         /* Which index block gets the new entry? */
2088                         if (idx >= count) {
2089                                 int d = dx_index_is_compat(path) ? 0 : +1;
2090
2091                                 frame->at = iam_entry_shift(path, entries2,
2092                                                             idx - count + d);
2093                                 frame->entries = entries = entries2;
2094                                 frame->curidx = newblock[i];
2095                                 swap(frame->bh, bh2);
2096                                 assert_corr(lock[i + 1] != NULL);
2097                                 assert_corr(new_lock[i] != NULL);
2098                                 swap(lock[i + 1], new_lock[i]);
2099                                 bh_new[i] = bh2;
2100                                 parent->at = iam_entry_shift(path,
2101                                                              parent->at, +1);
2102                         }
2103                         assert_inv(dx_node_check(path, frame));
2104                         assert_inv(dx_node_check(path, parent));
2105                         dxtrace(dx_show_index ("node", frame->entries));
2106                         dxtrace(dx_show_index ("node",
2107                                ((struct dx_node *) bh2->b_data)->entries));
2108                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
2109                         if (err)
2110                                 goto journal_error;
2111                         do_corr(schedule());
2112                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
2113                         if (err)
2114                                 goto journal_error;
2115                 }
2116                 do_corr(schedule());
2117                 err = ldiskfs_journal_dirty_metadata(handle, bh);
2118                 if (err)
2119                         goto journal_error;
2120         }
2121                 /*
2122                  * This function was called to make insertion of new leaf
2123                  * possible. Check that it fulfilled its obligations.
2124                  */
2125                 assert_corr(dx_get_count(path->ip_frame->entries) <
2126                             dx_get_limit(path->ip_frame->entries));
2127         assert_corr(lock[nr_splet] != NULL);
2128         *lh = lock[nr_splet];
2129         lock[nr_splet] = NULL;
2130         if (nr_splet > 0) {
2131                 /*
2132                  * Log ->i_size modification.
2133                  */
2134                 err = ldiskfs_mark_inode_dirty(handle, dir);
2135                 if (err)
2136                         goto journal_error;
2137         }
2138         goto cleanup;
2139 journal_error:
2140         ldiskfs_std_error(dir->i_sb, err);
2141
2142 cleanup:
2143         iam_unlock_array(path->ip_container, lock);
2144         iam_unlock_array(path->ip_container, new_lock);
2145
2146         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2147
2148         do_corr(schedule());
2149         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2150                 if (bh_new[i] != NULL)
2151                         brelse(bh_new[i]);
2152         }
2153         return err;
2154 }
2155
2156 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2157                        struct iam_path *path,
2158                        const struct iam_key *k, const struct iam_rec *r)
2159 {
2160         int err;
2161         struct iam_leaf *leaf;
2162
2163         leaf = &path->ip_leaf;
2164         assert_inv(iam_leaf_check(leaf));
2165         assert_inv(iam_path_check(path));
2166         err = iam_txn_add(handle, path, leaf->il_bh);
2167         if (err == 0) {
2168                 do_corr(schedule());
2169                 if (!iam_leaf_can_add(leaf, k, r)) {
2170                         struct dynlock_handle *lh = NULL;
2171
2172                         do {
2173                                 assert_corr(lh == NULL);
2174                                 do_corr(schedule());
2175                                 err = split_index_node(handle, path, &lh);
2176                                 if (err == -EAGAIN) {
2177                                         assert_corr(lh == NULL);
2178
2179                                         iam_path_fini(path);
2180                                         it->ii_state = IAM_IT_DETACHED;
2181
2182                                         do_corr(schedule());
2183                                         err = iam_it_get_exact(it, k);
2184                                         if (err == -ENOENT)
2185                                                 err = +1; /* repeat split */
2186                                         else if (err == 0)
2187                                                 err = -EEXIST;
2188                                 }
2189                         } while (err > 0);
2190                         assert_inv(iam_path_check(path));
2191                         if (err == 0) {
2192                                 assert_corr(lh != NULL);
2193                                 do_corr(schedule());
2194                                 err = iam_new_leaf(handle, leaf);
2195                                 if (err == 0)
2196                                         err = iam_txn_dirty(handle, path,
2197                                                             path->ip_frame->bh);
2198                         }
2199                         iam_unlock_htree(path->ip_container, lh);
2200                         do_corr(schedule());
2201                 }
2202                 if (err == 0) {
2203                         iam_leaf_rec_add(leaf, k, r);
2204                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2205                 }
2206         }
2207         assert_inv(iam_leaf_check(leaf));
2208         assert_inv(iam_leaf_check(&path->ip_leaf));
2209         assert_inv(iam_path_check(path));
2210         return err;
2211 }
2212
2213 /*
2214  * Insert new record with key @k and contents from @r, shifting records to the
2215  * right. On success, iterator is positioned on the newly inserted record.
2216  *
2217  * precondition: it->ii_flags&IAM_IT_WRITE &&
2218  *               (it_state(it) == IAM_IT_ATTACHED ||
2219  *                it_state(it) == IAM_IT_SKEWED) &&
2220  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2221  *                    it_keycmp(it, k) <= 0) &&
2222  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2223  * postcondition: ergo(result == 0,
2224  *                     it_state(it) == IAM_IT_ATTACHED &&
2225  *                     it_keycmp(it, k) == 0 &&
2226  *                     !memcmp(iam_it_rec_get(it), r, ...))
2227  */
2228 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2229                       const struct iam_key *k, const struct iam_rec *r)
2230 {
2231         int result;
2232         struct iam_path *path;
2233
2234         path = &it->ii_path;
2235
2236         assert_corr(it->ii_flags&IAM_IT_WRITE);
2237         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2238                     it_state(it) == IAM_IT_SKEWED);
2239         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2240                          it_keycmp(it, k) <= 0));
2241         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2242         result = iam_add_rec(h, it, path, k, r);
2243         if (result == 0)
2244                 it->ii_state = IAM_IT_ATTACHED;
2245         assert_corr(ergo(result == 0,
2246                          it_state(it) == IAM_IT_ATTACHED &&
2247                          it_keycmp(it, k) == 0));
2248         return result;
2249 }
2250 EXPORT_SYMBOL(iam_it_rec_insert);
2251
2252 static inline int iam_idle_blocks_limit(struct inode *inode)
2253 {
2254         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2255 }
2256
2257 /*
2258  * If the leaf cannnot be recycled, we will lose one block for reusing.
2259  * It is not a serious issue because it almost the same of non-recycle.
2260  */
2261 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2262                                   struct iam_leaf *l, struct buffer_head **bh)
2263 {
2264         struct iam_container *c = p->ip_container;
2265         struct inode *inode = c->ic_object;
2266         struct iam_frame *frame = p->ip_frame;
2267         struct iam_entry *entries;
2268         struct iam_entry *pos;
2269         struct dynlock_handle *lh;
2270         int count;
2271         int rc;
2272
2273         if (c->ic_idle_failed)
2274                 return 0;
2275
2276         if (unlikely(frame == NULL))
2277                 return 0;
2278
2279         if (!iam_leaf_empty(l))
2280                 return 0;
2281
2282         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2283         if (lh == NULL) {
2284                 CWARN("%.16s: No memory to recycle idle blocks\n",
2285                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2286                 return 0;
2287         }
2288
2289         rc = iam_txn_add(h, p, frame->bh);
2290         if (rc != 0) {
2291                 iam_unlock_htree(c, lh);
2292                 return 0;
2293         }
2294
2295         iam_lock_bh(frame->bh);
2296         entries = frame->entries;
2297         count = dx_get_count(entries);
2298         /* NOT shrink the last entry in the index node, which can be reused
2299          * directly by next new node. */
2300         if (count == 2) {
2301                 iam_unlock_bh(frame->bh);
2302                 iam_unlock_htree(c, lh);
2303                 return 0;
2304         }
2305
2306         pos = iam_find_position(p, frame);
2307         /* There may be some new leaf nodes have been added or empty leaf nodes
2308          * have been shrinked during my delete operation.
2309          *
2310          * If the empty leaf is not under current index node because the index
2311          * node has been split, then just skip the empty leaf, which is rare. */
2312         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2313                 iam_unlock_bh(frame->bh);
2314                 iam_unlock_htree(c, lh);
2315                 return 0;
2316         }
2317
2318         frame->at = pos;
2319         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2320                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2321
2322                 memmove(frame->at, n,
2323                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2324                 frame->at_shifted = 1;
2325         }
2326         dx_set_count(entries, count - 1);
2327         iam_unlock_bh(frame->bh);
2328         rc = iam_txn_dirty(h, p, frame->bh);
2329         iam_unlock_htree(c, lh);
2330         if (rc != 0)
2331                 return 0;
2332
2333         get_bh(l->il_bh);
2334         *bh = l->il_bh;
2335         return frame->leaf;
2336 }
2337
2338 static int
2339 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2340                         __u32 *idle_blocks, iam_ptr_t blk)
2341 {
2342         struct iam_container *c = p->ip_container;
2343         struct buffer_head *old = c->ic_idle_bh;
2344         struct iam_idle_head *head;
2345         int rc;
2346
2347         head = (struct iam_idle_head *)(bh->b_data);
2348         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2349         head->iih_count = 0;
2350         head->iih_next = *idle_blocks;
2351         /* The bh already get_write_accessed. */
2352         rc = iam_txn_dirty(h, p, bh);
2353         if (rc != 0)
2354                 return rc;
2355
2356         rc = iam_txn_add(h, p, c->ic_root_bh);
2357         if (rc != 0)
2358                 return rc;
2359
2360         iam_lock_bh(c->ic_root_bh);
2361         *idle_blocks = cpu_to_le32(blk);
2362         iam_unlock_bh(c->ic_root_bh);
2363         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2364         if (rc == 0) {
2365                 /* NOT release old before new assigned. */
2366                 get_bh(bh);
2367                 c->ic_idle_bh = bh;
2368                 brelse(old);
2369         } else {
2370                 iam_lock_bh(c->ic_root_bh);
2371                 *idle_blocks = head->iih_next;
2372                 iam_unlock_bh(c->ic_root_bh);
2373         }
2374         return rc;
2375 }
2376
2377 /*
2378  * If the leaf cannnot be recycled, we will lose one block for reusing.
2379  * It is not a serious issue because it almost the same of non-recycle.
2380  */
2381 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2382                              struct buffer_head *bh, iam_ptr_t blk)
2383 {
2384         struct iam_container *c = p->ip_container;
2385         struct inode *inode = c->ic_object;
2386         struct iam_idle_head *head;
2387         __u32 *idle_blocks;
2388         int count;
2389         int rc;
2390
2391         down(&c->ic_idle_sem);
2392         if (unlikely(c->ic_idle_failed)) {
2393                 rc = -EFAULT;
2394                 goto unlock;
2395         }
2396
2397         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2398                                 c->ic_descr->id_root_gap +
2399                                 sizeof(struct dx_countlimit));
2400         /* It is the first idle block. */
2401         if (c->ic_idle_bh == NULL) {
2402                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2403                 goto unlock;
2404         }
2405
2406         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2407         count = le16_to_cpu(head->iih_count);
2408         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2409         if (count == iam_idle_blocks_limit(inode)) {
2410                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2411                 goto unlock;
2412         }
2413
2414         /* Just add to ic_idle_bh. */
2415         rc = iam_txn_add(h, p, c->ic_idle_bh);
2416         if (rc != 0)
2417                 goto unlock;
2418
2419         head->iih_blks[count] = cpu_to_le32(blk);
2420         head->iih_count = cpu_to_le16(count + 1);
2421         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2422
2423 unlock:
2424         up(&c->ic_idle_sem);
2425         if (rc != 0)
2426                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2427                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2428 }
2429
2430 /*
2431  * Delete record under iterator.
2432  *
2433  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2434  *                it->ii_flags&IAM_IT_WRITE &&
2435  *                it_at_rec(it)
2436  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2437  *                it_state(it) == IAM_IT_DETACHED
2438  */
2439 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2440 {
2441         int result;
2442         struct iam_leaf *leaf;
2443         struct iam_path *path;
2444
2445         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2446                     it->ii_flags&IAM_IT_WRITE);
2447         assert_corr(it_at_rec(it));
2448
2449         path = &it->ii_path;
2450         leaf = &path->ip_leaf;
2451
2452         assert_inv(iam_leaf_check(leaf));
2453         assert_inv(iam_path_check(path));
2454
2455         result = iam_txn_add(h, path, leaf->il_bh);
2456         /*
2457          * no compaction for now.
2458          */
2459         if (result == 0) {
2460                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2461                 result = iam_txn_dirty(h, path, leaf->il_bh);
2462                 if (result == 0 && iam_leaf_at_end(leaf)) {
2463                         struct buffer_head *bh = NULL;
2464                         iam_ptr_t blk;
2465
2466                         blk = iam_index_shrink(h, path, leaf, &bh);
2467                         if (it->ii_flags & IAM_IT_MOVE) {
2468                                 result = iam_it_next(it);
2469                                 if (result > 0)
2470                                         result = 0;
2471                         }
2472
2473                         if (bh != NULL) {
2474                                 iam_recycle_leaf(h, path, bh, blk);
2475                                 brelse(bh);
2476                         }
2477                 }
2478         }
2479         assert_inv(iam_leaf_check(leaf));
2480         assert_inv(iam_path_check(path));
2481         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2482                     it_state(it) == IAM_IT_DETACHED);
2483         return result;
2484 }
2485 EXPORT_SYMBOL(iam_it_rec_delete);
2486
2487 /*
2488  * Convert iterator to cookie.
2489  *
2490  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2491  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2492  * postcondition: it_state(it) == IAM_IT_ATTACHED
2493  */
2494 iam_pos_t iam_it_store(const struct iam_iterator *it)
2495 {
2496         iam_pos_t result;
2497
2498         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2499         assert_corr(it_at_rec(it));
2500         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2501                     sizeof result);
2502
2503         result = 0;
2504         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2505 }
2506 EXPORT_SYMBOL(iam_it_store);
2507
2508 /*
2509  * Restore iterator from cookie.
2510  *
2511  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2512  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2513  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2514  *                                  iam_it_store(it) == pos)
2515  */
2516 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2517 {
2518         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2519                     it->ii_flags&IAM_IT_MOVE);
2520         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2521         return iam_it_iget(it, (struct iam_ikey *)&pos);
2522 }
2523 EXPORT_SYMBOL(iam_it_load);
2524
2525 /***********************************************************************/
2526 /* invariants                                                          */
2527 /***********************************************************************/
2528
2529 static inline int ptr_inside(void *base, size_t size, void *ptr)
2530 {
2531         return (base <= ptr) && (ptr < base + size);
2532 }
2533
2534 int iam_frame_invariant(struct iam_frame *f)
2535 {
2536         return
2537                 (f->bh != NULL &&
2538                 f->bh->b_data != NULL &&
2539                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2540                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2541                 f->entries <= f->at);
2542 }
2543 int iam_leaf_invariant(struct iam_leaf *l)
2544 {
2545         return
2546                 l->il_bh != NULL &&
2547                 l->il_bh->b_data != NULL &&
2548                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2549                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2550                 l->il_entries <= l->il_at;
2551 }
2552
2553 int iam_path_invariant(struct iam_path *p)
2554 {
2555         int i;
2556
2557         if (p->ip_container == NULL ||
2558             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2559             p->ip_frame != p->ip_frames + p->ip_indirect ||
2560             !iam_leaf_invariant(&p->ip_leaf))
2561                 return 0;
2562         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2563                 if (i <= p->ip_indirect) {
2564                         if (!iam_frame_invariant(&p->ip_frames[i]))
2565                                 return 0;
2566                 }
2567         }
2568         return 1;
2569 }
2570
2571 int iam_it_invariant(struct iam_iterator *it)
2572 {
2573         return
2574                 (it->ii_state == IAM_IT_DETACHED ||
2575                  it->ii_state == IAM_IT_ATTACHED ||
2576                  it->ii_state == IAM_IT_SKEWED) &&
2577                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2578                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2579                      it->ii_state == IAM_IT_SKEWED,
2580                      iam_path_invariant(&it->ii_path) &&
2581                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2582 }
2583
2584 /*
2585  * Search container @c for record with key @k. If record is found, its data
2586  * are moved into @r.
2587  *
2588  * Return values: 0: found, -ENOENT: not-found, -ve: error
2589  */
2590 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2591                struct iam_rec *r, struct iam_path_descr *pd)
2592 {
2593         struct iam_iterator it;
2594         int result;
2595
2596         iam_it_init(&it, c, 0, pd);
2597
2598         result = iam_it_get_exact(&it, k);
2599         if (result == 0)
2600                 /*
2601                  * record with required key found, copy it into user buffer
2602                  */
2603                 iam_reccpy(&it.ii_path.ip_leaf, r);
2604         iam_it_put(&it);
2605         iam_it_fini(&it);
2606         return result;
2607 }
2608 EXPORT_SYMBOL(iam_lookup);
2609
2610 /*
2611  * Insert new record @r with key @k into container @c (within context of
2612  * transaction @h).
2613  *
2614  * Return values: 0: success, -ve: error, including -EEXIST when record with
2615  * given key is already present.
2616  *
2617  * postcondition: ergo(result == 0 || result == -EEXIST,
2618  *                                  iam_lookup(c, k, r2) > 0;
2619  */
2620 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2621                const struct iam_rec *r, struct iam_path_descr *pd)
2622 {
2623         struct iam_iterator it;
2624         int result;
2625
2626         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2627
2628         result = iam_it_get_exact(&it, k);
2629         if (result == -ENOENT)
2630                 result = iam_it_rec_insert(h, &it, k, r);
2631         else if (result == 0)
2632                 result = -EEXIST;
2633         iam_it_put(&it);
2634         iam_it_fini(&it);
2635         return result;
2636 }
2637 EXPORT_SYMBOL(iam_insert);
2638
2639 /*
2640  * Update record with the key @k in container @c (within context of
2641  * transaction @h), new record is given by @r.
2642  *
2643  * Return values: +1: skip because of the same rec value, 0: success,
2644  * -ve: error, including -ENOENT if no record with the given key found.
2645  */
2646 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2647                const struct iam_rec *r, struct iam_path_descr *pd)
2648 {
2649         struct iam_iterator it;
2650         struct iam_leaf *folio;
2651         int result;
2652
2653         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2654
2655         result = iam_it_get_exact(&it, k);
2656         if (result == 0) {
2657                 folio = &it.ii_path.ip_leaf;
2658                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2659                 if (result == 0)
2660                         iam_it_rec_set(h, &it, r);
2661                 else
2662                         result = 1;
2663         }
2664         iam_it_put(&it);
2665         iam_it_fini(&it);
2666         return result;
2667 }
2668 EXPORT_SYMBOL(iam_update);
2669
2670 /*
2671  * Delete existing record with key @k.
2672  *
2673  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2674  *
2675  * postcondition: ergo(result == 0 || result == -ENOENT,
2676  *                                 !iam_lookup(c, k, *));
2677  */
2678 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2679                struct iam_path_descr *pd)
2680 {
2681         struct iam_iterator it;
2682         int result;
2683
2684         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2685
2686         result = iam_it_get_exact(&it, k);
2687         if (result == 0)
2688                 iam_it_rec_delete(h, &it);
2689         iam_it_put(&it);
2690         iam_it_fini(&it);
2691         return result;
2692 }
2693 EXPORT_SYMBOL(iam_delete);
2694
2695 int iam_root_limit(int rootgap, int blocksize, int size)
2696 {
2697         int limit;
2698         int nlimit;
2699
2700         limit = (blocksize - rootgap) / size;
2701         nlimit = blocksize / size;
2702         if (limit == nlimit)
2703                 limit--;
2704         return limit;
2705 }