Whamcloud - gitweb
ORNL-22 general ptlrpcd threads pool support
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  */
105
106 #include <linux/module.h>
107 #include <linux/fs.h>
108 #include <linux/pagemap.h>
109 #include <linux/time.h>
110 #include <linux/fcntl.h>
111 #include <linux/stat.h>
112 #include <linux/string.h>
113 #include <linux/quotaops.h>
114 #include <linux/buffer_head.h>
115 #include <linux/smp_lock.h>
116 #include "osd_internal.h"
117
118 #include "xattr.h"
119 #include "acl.h"
120
121 /*
122  * List of all registered formats.
123  *
124  * No locking. Callers synchronize.
125  */
126 static CFS_LIST_HEAD(iam_formats);
127
128 void iam_format_register(struct iam_format *fmt)
129 {
130         cfs_list_add(&fmt->if_linkage, &iam_formats);
131 }
132 EXPORT_SYMBOL(iam_format_register);
133
134 /*
135  * Determine format of given container. This is done by scanning list of
136  * registered formats and calling ->if_guess() method of each in turn.
137  */
138 static int iam_format_guess(struct iam_container *c)
139 {
140         int result;
141         struct iam_format *fmt;
142
143         /*
144          * XXX temporary initialization hook.
145          */
146         {
147                 static int initialized = 0;
148
149                 if (!initialized) {
150                         iam_lvar_format_init();
151                         iam_lfix_format_init();
152                         initialized = 1;
153                 }
154         }
155
156         result = -ENOENT;
157         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
158                 result = fmt->if_guess(c);
159                 if (result == 0)
160                         break;
161         }
162         return result;
163 }
164
165 /*
166  * Initialize container @c.
167  */
168 int iam_container_init(struct iam_container *c,
169                        struct iam_descr *descr, struct inode *inode)
170 {
171         memset(c, 0, sizeof *c);
172         c->ic_descr  = descr;
173         c->ic_object = inode;
174         cfs_init_rwsem(&c->ic_sem);
175         return 0;
176 }
177 EXPORT_SYMBOL(iam_container_init);
178
179 /*
180  * Determine container format.
181  */
182 int iam_container_setup(struct iam_container *c)
183 {
184         return iam_format_guess(c);
185 }
186 EXPORT_SYMBOL(iam_container_setup);
187
188 /*
189  * Finalize container @c, release all resources.
190  */
191 void iam_container_fini(struct iam_container *c)
192 {
193 }
194 EXPORT_SYMBOL(iam_container_fini);
195
196 void iam_path_init(struct iam_path *path, struct iam_container *c,
197                    struct iam_path_descr *pd)
198 {
199         memset(path, 0, sizeof *path);
200         path->ip_container = c;
201         path->ip_frame = path->ip_frames;
202         path->ip_data = pd;
203         path->ip_leaf.il_path = path;
204 }
205
206 static void iam_leaf_fini(struct iam_leaf *leaf);
207
208 void iam_path_release(struct iam_path *path)
209 {
210         int i;
211
212         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
213                 if (path->ip_frames[i].bh != NULL) {
214                         brelse(path->ip_frames[i].bh);
215                         path->ip_frames[i].bh = NULL;
216                 }
217         }
218 }
219
220 void iam_path_fini(struct iam_path *path)
221 {
222         iam_leaf_fini(&path->ip_leaf);
223         iam_path_release(path);
224 }
225
226
227 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
228 {
229         int i;
230
231         path->ipc_hinfo = &path->ipc_hinfo_area;
232         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
233                 path->ipc_descr.ipd_key_scratch[i] =
234                         (struct iam_ikey *)&path->ipc_scratch[i];
235
236         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
237 }
238
239 void iam_path_compat_fini(struct iam_path_compat *path)
240 {
241         iam_path_fini(&path->ipc_path);
242 }
243
244 /*
245  * Helper function initializing iam_path_descr and its key scratch area.
246  */
247 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
248 {
249         struct iam_path_descr *ipd;
250         void *karea;
251         int i;
252
253         ipd = area;
254         karea = ipd + 1;
255         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
256                 ipd->ipd_key_scratch[i] = karea;
257         return ipd;
258 }
259 EXPORT_SYMBOL(iam_ipd_alloc);
260
261 void iam_ipd_free(struct iam_path_descr *ipd)
262 {
263 }
264 EXPORT_SYMBOL(iam_ipd_free);
265
266 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
267                   handle_t *h, struct buffer_head **bh)
268 {
269         int result = 0;
270
271         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
272         if (*bh == NULL)
273                 result = -EIO;
274         return result;
275 }
276
277 /*
278  * Return pointer to current leaf record. Pointer is valid while corresponding
279  * leaf node is locked and pinned.
280  */
281 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
282 {
283         return iam_leaf_ops(leaf)->rec(leaf);
284 }
285
286 /*
287  * Return pointer to the current leaf key. This function returns pointer to
288  * the key stored in node.
289  *
290  * Caller should assume that returned pointer is only valid while leaf node is
291  * pinned and locked.
292  */
293 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
294 {
295         return iam_leaf_ops(leaf)->key(leaf);
296 }
297
298 static int iam_leaf_key_size(const struct iam_leaf *leaf)
299 {
300         return iam_leaf_ops(leaf)->key_size(leaf);
301 }
302
303 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
304                                       struct iam_ikey *key)
305 {
306         return iam_leaf_ops(leaf)->ikey(leaf, key);
307 }
308
309 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
310                            const struct iam_key *key)
311 {
312         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
313 }
314
315 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
316                           const struct iam_key *key)
317 {
318         return iam_leaf_ops(leaf)->key_eq(leaf, key);
319 }
320
321 #if LDISKFS_INVARIANT_ON
322 static int iam_leaf_check(struct iam_leaf *leaf);
323 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
324
325 static int iam_path_check(struct iam_path *p)
326 {
327         int i;
328         int result;
329         struct iam_frame *f;
330         struct iam_descr *param;
331
332         result = 1;
333         param = iam_path_descr(p);
334         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
335                 f = &p->ip_frames[i];
336                 if (f->bh != NULL) {
337                         result = dx_node_check(p, f);
338                         if (result)
339                                 result = !param->id_ops->id_node_check(p, f);
340                 }
341         }
342         if (result && p->ip_leaf.il_bh != NULL)
343                 result = iam_leaf_check(&p->ip_leaf);
344         if (result == 0) {
345                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
346         }
347         return result;
348 }
349 #endif
350
351 static int iam_leaf_load(struct iam_path *path)
352 {
353         iam_ptr_t block;
354         int err;
355         struct iam_container *c;
356         struct buffer_head   *bh;
357         struct iam_leaf      *leaf;
358         struct iam_descr     *descr;
359
360         c     = path->ip_container;
361         leaf  = &path->ip_leaf;
362         descr = iam_path_descr(path);
363         block = path->ip_frame->leaf;
364         if (block == 0) {
365                 /* XXX bug 11027 */
366                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
367                        (long unsigned)path->ip_frame->leaf,
368                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
369                        path->ip_frames[0].bh, path->ip_frames[1].bh,
370                        path->ip_frames[2].bh);
371         }
372         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
373         if (err == 0) {
374                 leaf->il_bh = bh;
375                 leaf->il_curidx = block;
376                 err = iam_leaf_ops(leaf)->init(leaf);
377                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
378         }
379         return err;
380 }
381
382 static void iam_unlock_htree(struct inode *dir, struct dynlock_handle *lh)
383 {
384         if (lh != NULL)
385                 dynlock_unlock(&LDISKFS_I(dir)->i_htree_lock, lh);
386 }
387
388
389 static void iam_leaf_unlock(struct iam_leaf *leaf)
390 {
391         if (leaf->il_lock != NULL) {
392                 iam_unlock_htree(iam_leaf_container(leaf)->ic_object,
393                                 leaf->il_lock);
394                 do_corr(schedule());
395                 leaf->il_lock = NULL;
396         }
397 }
398
399 static void iam_leaf_fini(struct iam_leaf *leaf)
400 {
401         if (leaf->il_path != NULL) {
402                 iam_leaf_unlock(leaf);
403                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
404                 iam_leaf_ops(leaf)->fini(leaf);
405                 if (leaf->il_bh) {
406                         brelse(leaf->il_bh);
407                         leaf->il_bh = NULL;
408                         leaf->il_curidx = 0;
409                 }
410         }
411 }
412
413 static void iam_leaf_start(struct iam_leaf *folio)
414 {
415         iam_leaf_ops(folio)->start(folio);
416 }
417
418 void iam_leaf_next(struct iam_leaf *folio)
419 {
420         iam_leaf_ops(folio)->next(folio);
421 }
422
423 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
424                              const struct iam_rec *rec)
425 {
426         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
427 }
428
429 static void iam_rec_del(struct iam_leaf *leaf, int shift)
430 {
431         iam_leaf_ops(leaf)->rec_del(leaf, shift);
432 }
433
434 int iam_leaf_at_end(const struct iam_leaf *leaf)
435 {
436         return iam_leaf_ops(leaf)->at_end(leaf);
437 }
438
439 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
440 {
441         iam_leaf_ops(l)->split(l, bh, nr);
442 }
443
444 int iam_leaf_can_add(const struct iam_leaf *l,
445                      const struct iam_key *k, const struct iam_rec *r)
446 {
447         return iam_leaf_ops(l)->can_add(l, k, r);
448 }
449
450 #if LDISKFS_INVARIANT_ON
451 static int iam_leaf_check(struct iam_leaf *leaf)
452 {
453         return 1;
454 #if 0
455         struct iam_lentry    *orig;
456         struct iam_path      *path;
457         struct iam_container *bag;
458         struct iam_ikey       *k0;
459         struct iam_ikey       *k1;
460         int result;
461         int first;
462
463         orig = leaf->il_at;
464         path = iam_leaf_path(leaf);
465         bag  = iam_leaf_container(leaf);
466
467         result = iam_leaf_ops(leaf)->init(leaf);
468         if (result != 0)
469                 return result;
470
471         first = 1;
472         iam_leaf_start(leaf);
473         k0 = iam_path_ikey(path, 0);
474         k1 = iam_path_ikey(path, 1);
475         while (!iam_leaf_at_end(leaf)) {
476                 iam_ikeycpy(bag, k0, k1);
477                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
478                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
479                         return 0;
480                 }
481                 first = 0;
482                 iam_leaf_next(leaf);
483         }
484         leaf->il_at = orig;
485         return 1;
486 #endif
487 }
488 #endif
489
490 static int iam_txn_dirty(handle_t *handle,
491                          struct iam_path *path, struct buffer_head *bh)
492 {
493         int result;
494
495         result = ldiskfs_journal_dirty_metadata(handle, bh);
496         if (result != 0)
497                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
498         return result;
499 }
500
501 static int iam_txn_add(handle_t *handle,
502                        struct iam_path *path, struct buffer_head *bh)
503 {
504         int result;
505
506         result = ldiskfs_journal_get_write_access(handle, bh);
507         if (result != 0)
508                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
509         return result;
510 }
511
512 /***********************************************************************/
513 /* iterator interface                                                  */
514 /***********************************************************************/
515
516 static enum iam_it_state it_state(const struct iam_iterator *it)
517 {
518         return it->ii_state;
519 }
520
521 /*
522  * Helper function returning scratch key.
523  */
524 static struct iam_container *iam_it_container(const struct iam_iterator *it)
525 {
526         return it->ii_path.ip_container;
527 }
528
529 static inline int it_keycmp(const struct iam_iterator *it,
530                             const struct iam_key *k)
531 {
532         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
533 }
534
535 static inline int it_keyeq(const struct iam_iterator *it,
536                            const struct iam_key *k)
537 {
538         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
539 }
540
541 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
542 {
543         return iam_ikeycmp(it->ii_path.ip_container,
544                            iam_leaf_ikey(&it->ii_path.ip_leaf,
545                                          iam_path_ikey(&it->ii_path, 0)), ik);
546 }
547
548 static inline int it_at_rec(const struct iam_iterator *it)
549 {
550         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
551 }
552
553 static inline int it_before(const struct iam_iterator *it)
554 {
555         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
556 }
557
558 /*
559  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
560  * with exactly the same key as asked is found.
561  */
562 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
563 {
564         int result;
565
566         result = iam_it_get(it, k);
567         if (result > 0)
568                 result = 0;
569         else if (result == 0)
570                 /*
571                  * Return -ENOENT if cursor is located above record with a key
572                  * different from one specified, or in the empty leaf.
573                  *
574                  * XXX returning -ENOENT only works if iam_it_get() never
575                  * returns -ENOENT as a legitimate error.
576                  */
577                 result = -ENOENT;
578         return result;
579 }
580
581 void iam_container_write_lock(struct iam_container *ic)
582 {
583         cfs_down_write(&ic->ic_sem);
584 }
585
586 void iam_container_write_unlock(struct iam_container *ic)
587 {
588         cfs_up_write(&ic->ic_sem);
589 }
590
591 void iam_container_read_lock(struct iam_container *ic)
592 {
593         cfs_down_read(&ic->ic_sem);
594 }
595
596 void iam_container_read_unlock(struct iam_container *ic)
597 {
598         cfs_up_read(&ic->ic_sem);
599 }
600
601 /*
602  * Initialize iterator to IAM_IT_DETACHED state.
603  *
604  * postcondition: it_state(it) == IAM_IT_DETACHED
605  */
606 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
607                  struct iam_path_descr *pd)
608 {
609         memset(it, 0, sizeof *it);
610         it->ii_flags  = flags;
611         it->ii_state  = IAM_IT_DETACHED;
612         iam_path_init(&it->ii_path, c, pd);
613         return 0;
614 }
615 EXPORT_SYMBOL(iam_it_init);
616
617 /*
618  * Finalize iterator and release all resources.
619  *
620  * precondition: it_state(it) == IAM_IT_DETACHED
621  */
622 void iam_it_fini(struct iam_iterator *it)
623 {
624         assert_corr(it_state(it) == IAM_IT_DETACHED);
625         iam_path_fini(&it->ii_path);
626 }
627 EXPORT_SYMBOL(iam_it_fini);
628
629 /*
630  * this locking primitives are used to protect parts
631  * of dir's htree. protection unit is block: leaf or index
632  */
633 struct dynlock_handle *iam_lock_htree(struct inode *dir, unsigned long value,
634                                      enum dynlock_type lt)
635 {
636         return dynlock_lock(&LDISKFS_I(dir)->i_htree_lock, value, lt, GFP_NOFS);
637 }
638
639
640
641 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
642 {
643         struct iam_frame *f;
644
645         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
646                 do_corr(schedule());
647                 *lh = iam_lock_htree(iam_path_obj(path), f->curidx, DLT_READ);
648                 if (*lh == NULL)
649                         return -ENOMEM;
650         }
651         return 0;
652 }
653
654 /*
655  * Fast check for frame consistency.
656  */
657 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
658 {
659         struct iam_container *bag;
660         struct iam_entry *next;
661         struct iam_entry *last;
662         struct iam_entry *entries;
663         struct iam_entry *at;
664
665         bag     = path->ip_container;
666         at      = frame->at;
667         entries = frame->entries;
668         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
669
670         if (unlikely(at > last))
671                 return -EAGAIN;
672
673         if (unlikely(dx_get_block(path, at) != frame->leaf))
674                 return -EAGAIN;
675
676         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
677                                  path->ip_ikey_target) > 0))
678                 return -EAGAIN;
679
680         next = iam_entry_shift(path, at, +1);
681         if (next <= last) {
682                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
683                                          path->ip_ikey_target) <= 0))
684                         return -EAGAIN;
685         }
686         return 0;
687 }
688
689 int dx_index_is_compat(struct iam_path *path)
690 {
691         return iam_path_descr(path) == NULL;
692 }
693
694 /*
695  * dx_find_position
696  *
697  * search position of specified hash in index
698  *
699  */
700
701 struct iam_entry *iam_find_position(struct iam_path *path,
702                                    struct iam_frame *frame)
703 {
704         int count;
705         struct iam_entry *p;
706         struct iam_entry *q;
707         struct iam_entry *m;
708
709         count = dx_get_count(frame->entries);
710         assert_corr(count && count <= dx_get_limit(frame->entries));
711         p = iam_entry_shift(path, frame->entries,
712                             dx_index_is_compat(path) ? 1 : 2);
713         q = iam_entry_shift(path, frame->entries, count - 1);
714         while (p <= q) {
715                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
716                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
717                                 path->ip_ikey_target) > 0)
718                         q = iam_entry_shift(path, m, -1);
719                 else
720                         p = iam_entry_shift(path, m, +1);
721         }
722         return iam_entry_shift(path, p, -1);
723 }
724
725
726
727 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
728 {
729         return dx_get_block(path, iam_find_position(path, frame));
730 }
731
732 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
733                     const struct iam_ikey *key, iam_ptr_t ptr)
734 {
735         struct iam_entry *entries = frame->entries;
736         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
737         int count = dx_get_count(entries);
738
739         /*
740          * Unfortunately we cannot assert this, as this function is sometimes
741          * called by VFS under i_sem and without pdirops lock.
742          */
743         assert_corr(1 || iam_frame_is_locked(path, frame));
744         assert_corr(count < dx_get_limit(entries));
745         assert_corr(frame->at < iam_entry_shift(path, entries, count));
746         assert_inv(dx_node_check(path, frame));
747
748         memmove(iam_entry_shift(path, new, 1), new,
749                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
750         dx_set_ikey(path, new, key);
751         dx_set_block(path, new, ptr);
752         dx_set_count(entries, count + 1);
753         assert_inv(dx_node_check(path, frame));
754 }
755
756 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
757                          const struct iam_ikey *key, iam_ptr_t ptr)
758 {
759         iam_lock_bh(frame->bh);
760         iam_insert_key(path, frame, key, ptr);
761         iam_unlock_bh(frame->bh);
762 }
763 /*
764  * returns 0 if path was unchanged, -EAGAIN otherwise.
765  */
766 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
767 {
768         int equal;
769
770         iam_lock_bh(frame->bh);
771         equal = iam_check_fast(path, frame) == 0 ||
772                 frame->leaf == iam_find_ptr(path, frame);
773         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
774         iam_unlock_bh(frame->bh);
775
776         return equal ? 0 : -EAGAIN;
777 }
778
779 static int iam_lookup_try(struct iam_path *path)
780 {
781         u32 ptr;
782         int err = 0;
783         int i;
784
785         struct iam_descr *param;
786         struct iam_frame *frame;
787         struct iam_container *c;
788
789         param = iam_path_descr(path);
790         c = path->ip_container;
791
792         ptr = param->id_ops->id_root_ptr(c);
793         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
794              ++frame, ++i) {
795                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
796                                                   &frame->bh);
797                 do_corr(schedule());
798
799                 iam_lock_bh(frame->bh);
800                 /*
801                  * node must be initialized under bh lock because concurrent
802                  * creation procedure may change it and iam_lookup_try() will
803                  * see obsolete tree height. -bzzz
804                  */
805                 if (err != 0)
806                         break;
807
808                 if (LDISKFS_INVARIANT_ON) {
809                         err = param->id_ops->id_node_check(path, frame);
810                         if (err != 0)
811                                 break;
812                 }
813
814                 err = param->id_ops->id_node_load(path, frame);
815                 if (err != 0)
816                         break;
817
818                 assert_inv(dx_node_check(path, frame));
819                 /*
820                  * splitting may change root index block and move hash we're
821                  * looking for into another index block so, we have to check
822                  * this situation and repeat from begining if path got changed
823                  * -bzzz
824                  */
825                 if (i > 0) {
826                         err = iam_check_path(path, frame - 1);
827                         if (err != 0)
828                                 break;
829                 }
830
831                 frame->at = iam_find_position(path, frame);
832                 frame->curidx = ptr;
833                 frame->leaf = ptr = dx_get_block(path, frame->at);
834
835                 iam_unlock_bh(frame->bh);
836                 do_corr(schedule());
837         }
838         if (err != 0)
839                 iam_unlock_bh(frame->bh);
840         path->ip_frame = --frame;
841         return err;
842 }
843
844 static int __iam_path_lookup(struct iam_path *path)
845 {
846         int err;
847         int i;
848
849         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
850                 assert(path->ip_frames[i].bh == NULL);
851
852         do {
853                 err = iam_lookup_try(path);
854                 do_corr(schedule());
855                 if (err != 0)
856                         iam_path_fini(path);
857         } while (err == -EAGAIN);
858
859         return err;
860 }
861
862 /*
863  * returns 0 if path was unchanged, -EAGAIN otherwise.
864  */
865 static int iam_check_full_path(struct iam_path *path, int search)
866 {
867         struct iam_frame *bottom;
868         struct iam_frame *scan;
869         int i;
870         int result;
871
872         do_corr(schedule());
873
874         for (bottom = path->ip_frames, i = 0;
875              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
876                 ; /* find last filled in frame */
877         }
878
879         /*
880          * Lock frames, bottom to top.
881          */
882         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
883                 iam_lock_bh(scan->bh);
884         /*
885          * Check them top to bottom.
886          */
887         result = 0;
888         for (scan = path->ip_frames; scan < bottom; ++scan) {
889                 struct iam_entry *pos;
890
891                 if (search) {
892                         if (iam_check_fast(path, scan) == 0)
893                                 continue;
894
895                         pos = iam_find_position(path, scan);
896                         if (scan->leaf != dx_get_block(path, pos)) {
897                                 result = -EAGAIN;
898                                 break;
899                         }
900                         scan->at = pos;
901                 } else {
902                         pos = iam_entry_shift(path, scan->entries,
903                                               dx_get_count(scan->entries) - 1);
904                         if (scan->at > pos ||
905                             scan->leaf != dx_get_block(path, scan->at)) {
906                                 result = -EAGAIN;
907                                 break;
908                         }
909                 }
910         }
911
912         /*
913          * Unlock top to bottom.
914          */
915         for (scan = path->ip_frames; scan < bottom; ++scan)
916                 iam_unlock_bh(scan->bh);
917         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
918         do_corr(schedule());
919
920         return result;
921 }
922
923
924 /*
925  * Performs path lookup and returns with found leaf (if any) locked by htree
926  * lock.
927  */
928 int iam_lookup_lock(struct iam_path *path,
929                    struct dynlock_handle **dl, enum dynlock_type lt)
930 {
931         int result;
932         struct inode *dir;
933
934         dir = iam_path_obj(path);
935         while ((result = __iam_path_lookup(path)) == 0) {
936                 do_corr(schedule());
937                 *dl = iam_lock_htree(dir, path->ip_frame->leaf, lt);
938                 if (*dl == NULL) {
939                         iam_path_fini(path);
940                         result = -ENOMEM;
941                         break;
942                 }
943                 do_corr(schedule());
944                 /*
945                  * while locking leaf we just found may get split so we need
946                  * to check this -bzzz
947                  */
948                 if (iam_check_full_path(path, 1) == 0)
949                         break;
950                 iam_unlock_htree(dir, *dl);
951                 *dl = NULL;
952                 iam_path_fini(path);
953         }
954         return result;
955 }
956 /*
957  * Performs tree top-to-bottom traversal starting from root, and loads leaf
958  * node.
959  */
960 static int iam_path_lookup(struct iam_path *path, int index)
961 {
962         struct iam_container *c;
963         struct iam_descr *descr;
964         struct iam_leaf  *leaf;
965         int result;
966
967         c = path->ip_container;
968         leaf = &path->ip_leaf;
969         descr = iam_path_descr(path);
970         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
971         assert_inv(iam_path_check(path));
972         do_corr(schedule());
973         if (result == 0) {
974                 result = iam_leaf_load(path);
975                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
976                 if (result == 0) {
977                         do_corr(schedule());
978                         if (index)
979                                 result = iam_leaf_ops(leaf)->
980                                         ilookup(leaf, path->ip_ikey_target);
981                         else
982                                 result = iam_leaf_ops(leaf)->
983                                         lookup(leaf, path->ip_key_target);
984                         do_corr(schedule());
985                 }
986                 if (result < 0)
987                         iam_leaf_unlock(leaf);
988         }
989         return result;
990 }
991
992 /*
993  * Common part of iam_it_{i,}get().
994  */
995 static int __iam_it_get(struct iam_iterator *it, int index)
996 {
997         int result;
998         assert_corr(it_state(it) == IAM_IT_DETACHED);
999
1000         result = iam_path_lookup(&it->ii_path, index);
1001         if (result >= 0) {
1002                 int collision;
1003
1004                 collision = result & IAM_LOOKUP_LAST;
1005                 switch (result & ~IAM_LOOKUP_LAST) {
1006                 case IAM_LOOKUP_EXACT:
1007                         result = +1;
1008                         it->ii_state = IAM_IT_ATTACHED;
1009                         break;
1010                 case IAM_LOOKUP_OK:
1011                         result = 0;
1012                         it->ii_state = IAM_IT_ATTACHED;
1013                         break;
1014                 case IAM_LOOKUP_BEFORE:
1015                 case IAM_LOOKUP_EMPTY:
1016                         result = 0;
1017                         it->ii_state = IAM_IT_SKEWED;
1018                         break;
1019                 default:
1020                         assert(0);
1021                 }
1022                 result |= collision;
1023         }
1024         /*
1025          * See iam_it_get_exact() for explanation.
1026          */
1027         assert_corr(result != -ENOENT);
1028         return result;
1029 }
1030
1031 /*
1032  * Correct hash, but not the same key was found, iterate through hash
1033  * collision chain, looking for correct record.
1034  */
1035 static int iam_it_collision(struct iam_iterator *it)
1036 {
1037         int result;
1038
1039         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1040
1041         while ((result = iam_it_next(it)) == 0) {
1042                 do_corr(schedule());
1043                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1044                         return -ENOENT;
1045                 if (it_keyeq(it, it->ii_path.ip_key_target))
1046                         return 0;
1047         }
1048         return result;
1049 }
1050
1051 /*
1052  * Attach iterator. After successful completion, @it points to record with
1053  * least key not larger than @k.
1054  *
1055  * Return value: 0: positioned on existing record,
1056  *             +ve: exact position found,
1057  *             -ve: error.
1058  *
1059  * precondition:  it_state(it) == IAM_IT_DETACHED
1060  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1061  *                     it_keycmp(it, k) <= 0)
1062  */
1063 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1064 {
1065         int result;
1066         assert_corr(it_state(it) == IAM_IT_DETACHED);
1067
1068         it->ii_path.ip_ikey_target = NULL;
1069         it->ii_path.ip_key_target  = k;
1070
1071         result = __iam_it_get(it, 0);
1072
1073         if (result == IAM_LOOKUP_LAST) {
1074                 result = iam_it_collision(it);
1075                 if (result != 0) {
1076                         iam_it_put(it);
1077                         iam_it_fini(it);
1078                         result = __iam_it_get(it, 0);
1079                 } else
1080                         result = +1;
1081         }
1082         if (result > 0)
1083                 result &= ~IAM_LOOKUP_LAST;
1084
1085         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1086         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1087                          it_keycmp(it, k) <= 0));
1088         return result;
1089 }
1090 EXPORT_SYMBOL(iam_it_get);
1091
1092 /*
1093  * Attach iterator by index key.
1094  */
1095 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1096 {
1097         assert_corr(it_state(it) == IAM_IT_DETACHED);
1098
1099         it->ii_path.ip_ikey_target = k;
1100         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1101 }
1102
1103 /*
1104  * Attach iterator, and assure it points to the record (not skewed).
1105  *
1106  * Return value: 0: positioned on existing record,
1107  *             +ve: exact position found,
1108  *             -ve: error.
1109  *
1110  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1111  *                !(it->ii_flags&IAM_IT_WRITE)
1112  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1113  */
1114 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1115 {
1116         int result;
1117         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1118                     !(it->ii_flags&IAM_IT_WRITE));
1119         result = iam_it_get(it, k);
1120         if (result == 0) {
1121                 if (it_state(it) != IAM_IT_ATTACHED) {
1122                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1123                         result = iam_it_next(it);
1124                 }
1125         }
1126         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1127         return result;
1128 }
1129 EXPORT_SYMBOL(iam_it_get_at);
1130
1131 /*
1132  * Duplicates iterator.
1133  *
1134  * postcondition: it_state(dst) == it_state(src) &&
1135  *                iam_it_container(dst) == iam_it_container(src) &&
1136  *                dst->ii_flags = src->ii_flags &&
1137  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1138  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1139  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1140  */
1141 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1142 {
1143         dst->ii_flags     = src->ii_flags;
1144         dst->ii_state     = src->ii_state;
1145         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1146         /*
1147          * XXX: duplicate lock.
1148          */
1149         assert_corr(it_state(dst) == it_state(src));
1150         assert_corr(iam_it_container(dst) == iam_it_container(src));
1151         assert_corr(dst->ii_flags = src->ii_flags);
1152         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1153                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1154                     iam_it_key_get(dst) == iam_it_key_get(src)));
1155
1156 }
1157
1158 /*
1159  * Detach iterator. Does nothing it detached state.
1160  *
1161  * postcondition: it_state(it) == IAM_IT_DETACHED
1162  */
1163 void iam_it_put(struct iam_iterator *it)
1164 {
1165         if (it->ii_state != IAM_IT_DETACHED) {
1166                 it->ii_state = IAM_IT_DETACHED;
1167                 iam_leaf_fini(&it->ii_path.ip_leaf);
1168         }
1169 }
1170 EXPORT_SYMBOL(iam_it_put);
1171
1172 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1173                                         struct iam_ikey *ikey);
1174
1175
1176 /*
1177  * This function increments the frame pointer to search the next leaf
1178  * block, and reads in the necessary intervening nodes if the search
1179  * should be necessary.  Whether or not the search is necessary is
1180  * controlled by the hash parameter.  If the hash value is even, then
1181  * the search is only continued if the next block starts with that
1182  * hash value.  This is used if we are searching for a specific file.
1183  *
1184  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1185  *
1186  * This function returns 1 if the caller should continue to search,
1187  * or 0 if it should not.  If there is an error reading one of the
1188  * index blocks, it will a negative error code.
1189  *
1190  * If start_hash is non-null, it will be filled in with the starting
1191  * hash of the next page.
1192  */
1193 static int iam_htree_advance(struct inode *dir, __u32 hash,
1194                               struct iam_path *path, __u32 *start_hash,
1195                               int compat)
1196 {
1197         struct iam_frame *p;
1198         struct buffer_head *bh;
1199         int err, num_frames = 0;
1200         __u32 bhash;
1201
1202         p = path->ip_frame;
1203         /*
1204          * Find the next leaf page by incrementing the frame pointer.
1205          * If we run out of entries in the interior node, loop around and
1206          * increment pointer in the parent node.  When we break out of
1207          * this loop, num_frames indicates the number of interior
1208          * nodes need to be read.
1209          */
1210         while (1) {
1211                 do_corr(schedule());
1212                 iam_lock_bh(p->bh);
1213                 p->at = iam_entry_shift(path, p->at, +1);
1214                 if (p->at < iam_entry_shift(path, p->entries,
1215                                             dx_get_count(p->entries))) {
1216                         p->leaf = dx_get_block(path, p->at);
1217                         iam_unlock_bh(p->bh);
1218                         break;
1219                 }
1220                 iam_unlock_bh(p->bh);
1221                 if (p == path->ip_frames)
1222                         return 0;
1223                 num_frames++;
1224                 --p;
1225         }
1226
1227         if (compat) {
1228                 /*
1229                  * Htree hash magic.
1230                  */
1231         /*
1232          * If the hash is 1, then continue only if the next page has a
1233          * continuation hash of any value.  This is used for readdir
1234          * handling.  Otherwise, check to see if the hash matches the
1235          * desired contiuation hash.  If it doesn't, return since
1236          * there's no point to read in the successive index pages.
1237          */
1238                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1239         if (start_hash)
1240                 *start_hash = bhash;
1241         if ((hash & 1) == 0) {
1242                 if ((bhash & ~1) != hash)
1243                         return 0;
1244         }
1245         }
1246         /*
1247          * If the hash is HASH_NB_ALWAYS, we always go to the next
1248          * block so no check is necessary
1249          */
1250         while (num_frames--) {
1251                 iam_ptr_t idx;
1252
1253                 do_corr(schedule());
1254                 iam_lock_bh(p->bh);
1255                 idx = p->leaf = dx_get_block(path, p->at);
1256                 iam_unlock_bh(p->bh);
1257                 err = iam_path_descr(path)->id_ops->
1258                         id_node_read(path->ip_container, idx, NULL, &bh);
1259                 if (err != 0)
1260                         return err; /* Failure */
1261                 ++p;
1262                 brelse(p->bh);
1263                 assert_corr(p->bh != bh);
1264                 p->bh = bh;
1265                 p->entries = dx_node_get_entries(path, p);
1266                 p->at = iam_entry_shift(path, p->entries, !compat);
1267                 assert_corr(p->curidx != idx);
1268                 p->curidx = idx;
1269                 iam_lock_bh(p->bh);
1270                 assert_corr(p->leaf != dx_get_block(path, p->at));
1271                 p->leaf = dx_get_block(path, p->at);
1272                 iam_unlock_bh(p->bh);
1273                 assert_inv(dx_node_check(path, p));
1274         }
1275         return 1;
1276 }
1277
1278
1279 static inline int iam_index_advance(struct iam_path *path)
1280 {
1281         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1282 }
1283
1284 static void iam_unlock_array(struct inode *dir, struct dynlock_handle **lh)
1285 {
1286         int i;
1287
1288         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1289                 if (*lh != NULL) {
1290                         iam_unlock_htree(dir, *lh);
1291                         *lh = NULL;
1292                 }
1293         }
1294 }
1295 /*
1296  * Advance index part of @path to point to the next leaf. Returns 1 on
1297  * success, 0, when end of container was reached. Leaf node is locked.
1298  */
1299 int iam_index_next(struct iam_container *c, struct iam_path *path)
1300 {
1301         iam_ptr_t cursor;
1302         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1303         int result;
1304         struct inode *object;
1305
1306         /*
1307          * Locking for iam_index_next()... is to be described.
1308          */
1309
1310         object = c->ic_object;
1311         cursor = path->ip_frame->leaf;
1312
1313         while (1) {
1314                 result = iam_index_lock(path, lh);
1315                 do_corr(schedule());
1316                 if (result < 0)
1317                         break;
1318
1319                 result = iam_check_full_path(path, 0);
1320                 if (result == 0 && cursor == path->ip_frame->leaf) {
1321                         result = iam_index_advance(path);
1322
1323                         assert_corr(result == 0 ||
1324                                     cursor != path->ip_frame->leaf);
1325                         break;
1326                 }
1327                 do {
1328                         iam_unlock_array(object, lh);
1329
1330                         iam_path_release(path);
1331                         do_corr(schedule());
1332
1333                         result = __iam_path_lookup(path);
1334                         if (result < 0)
1335                                 break;
1336
1337                         while (path->ip_frame->leaf != cursor) {
1338                                 do_corr(schedule());
1339
1340                                 result = iam_index_lock(path, lh);
1341                                 do_corr(schedule());
1342                                 if (result < 0)
1343                                         break;
1344
1345                                 result = iam_check_full_path(path, 0);
1346                                 if (result != 0)
1347                                         break;
1348
1349                                 result = iam_index_advance(path);
1350                                 if (result == 0) {
1351                                         CERROR("cannot find cursor : %u\n",
1352                                                 cursor);
1353                                         result = -EIO;
1354                                 }
1355                                 if (result < 0)
1356                                         break;
1357                                 result = iam_check_full_path(path, 0);
1358                                 if (result != 0)
1359                                         break;
1360                                 iam_unlock_array(object, lh);
1361                         }
1362                 } while (result == -EAGAIN);
1363                 if (result < 0)
1364                         break;
1365         }
1366         iam_unlock_array(object, lh);
1367         return result;
1368 }
1369
1370 /*
1371  * Move iterator one record right.
1372  *
1373  * Return value: 0: success,
1374  *              +1: end of container reached
1375  *             -ve: error
1376  *
1377  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1378  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1379  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1380  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1381  */
1382 int iam_it_next(struct iam_iterator *it)
1383 {
1384         int result;
1385         struct iam_path      *path;
1386         struct iam_leaf      *leaf;
1387         struct inode         *obj;
1388         do_corr(struct iam_ikey *ik_orig);
1389
1390         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1391         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1392                     it_state(it) == IAM_IT_SKEWED);
1393
1394         path = &it->ii_path;
1395         leaf = &path->ip_leaf;
1396         obj  = iam_path_obj(path);
1397
1398         assert_corr(iam_leaf_is_locked(leaf));
1399
1400         result = 0;
1401         do_corr(ik_orig = it_at_rec(it) ?
1402                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1403         if (it_before(it)) {
1404                 assert_corr(!iam_leaf_at_end(leaf));
1405                 it->ii_state = IAM_IT_ATTACHED;
1406         } else {
1407                 if (!iam_leaf_at_end(leaf))
1408                         /* advance within leaf node */
1409                         iam_leaf_next(leaf);
1410                 /*
1411                  * multiple iterations may be necessary due to empty leaves.
1412                  */
1413                 while (result == 0 && iam_leaf_at_end(leaf)) {
1414                         do_corr(schedule());
1415                         /* advance index portion of the path */
1416                         result = iam_index_next(iam_it_container(it), path);
1417                         assert_corr(iam_leaf_is_locked(leaf));
1418                         if (result == 1) {
1419                                 struct dynlock_handle *lh;
1420                                 lh = iam_lock_htree(obj, path->ip_frame->leaf,
1421                                                    DLT_WRITE);
1422                                 if (lh != NULL) {
1423                                         iam_leaf_fini(leaf);
1424                                         leaf->il_lock = lh;
1425                                         result = iam_leaf_load(path);
1426                                         if (result == 0)
1427                                                 iam_leaf_start(leaf);
1428                                 } else
1429                                         result = -ENOMEM;
1430                         } else if (result == 0)
1431                                 /* end of container reached */
1432                                 result = +1;
1433                         if (result != 0)
1434                                 iam_it_put(it);
1435                 }
1436                 if (result == 0)
1437                         it->ii_state = IAM_IT_ATTACHED;
1438         }
1439         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1440         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1441         assert_corr(ergo(result == 0 && ik_orig != NULL,
1442                          it_ikeycmp(it, ik_orig) >= 0));
1443         return result;
1444 }
1445 EXPORT_SYMBOL(iam_it_next);
1446
1447 /*
1448  * Return pointer to the record under iterator.
1449  *
1450  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1451  * postcondition: it_state(it) == IAM_IT_ATTACHED
1452  */
1453 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1454 {
1455         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1456         assert_corr(it_at_rec(it));
1457         return iam_leaf_rec(&it->ii_path.ip_leaf);
1458 }
1459 EXPORT_SYMBOL(iam_it_rec_get);
1460
1461 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1462 {
1463         struct iam_leaf *folio;
1464
1465         folio = &it->ii_path.ip_leaf;
1466         iam_leaf_ops(folio)->rec_set(folio, r);
1467 }
1468
1469 /*
1470  * Replace contents of record under iterator.
1471  *
1472  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1473  *                it->ii_flags&IAM_IT_WRITE
1474  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1475  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1476  */
1477 int iam_it_rec_set(handle_t *h,
1478                    struct iam_iterator *it, const struct iam_rec *r)
1479 {
1480         int result;
1481         struct iam_path *path;
1482         struct buffer_head *bh;
1483
1484         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1485                     it->ii_flags&IAM_IT_WRITE);
1486         assert_corr(it_at_rec(it));
1487
1488         path = &it->ii_path;
1489         bh   = path->ip_leaf.il_bh;
1490         result = iam_txn_add(h, path, bh);
1491         if (result == 0) {
1492                 iam_it_reccpy(it, r);
1493                 result = iam_txn_dirty(h, path, bh);
1494         }
1495         return result;
1496 }
1497 EXPORT_SYMBOL(iam_it_rec_set);
1498
1499 /*
1500  * Return pointer to the index key under iterator.
1501  *
1502  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1503  *                it_state(it) == IAM_IT_SKEWED
1504  */
1505 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1506                                         struct iam_ikey *ikey)
1507 {
1508         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1509                     it_state(it) == IAM_IT_SKEWED);
1510         assert_corr(it_at_rec(it));
1511         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1512 }
1513
1514 /*
1515  * Return pointer to the key under iterator.
1516  *
1517  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1518  *                it_state(it) == IAM_IT_SKEWED
1519  */
1520 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1521 {
1522         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1523                     it_state(it) == IAM_IT_SKEWED);
1524         assert_corr(it_at_rec(it));
1525         return iam_leaf_key(&it->ii_path.ip_leaf);
1526 }
1527 EXPORT_SYMBOL(iam_it_key_get);
1528
1529 /*
1530  * Return size of key under iterator (in bytes)
1531  *
1532  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1533  *                it_state(it) == IAM_IT_SKEWED
1534  */
1535 int iam_it_key_size(const struct iam_iterator *it)
1536 {
1537         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1538                     it_state(it) == IAM_IT_SKEWED);
1539         assert_corr(it_at_rec(it));
1540         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1541 }
1542 EXPORT_SYMBOL(iam_it_key_size);
1543
1544 /*
1545  * Insertion of new record. Interaction with jbd during non-trivial case (when
1546  * split happens) is as following:
1547  *
1548  *  - new leaf node is involved into transaction by ldiskfs_append();
1549  *
1550  *  - old leaf node is involved into transaction by iam_add_rec();
1551  *
1552  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1553  *
1554  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1555  *  iam_new_leaf();
1556  *
1557  *  - split index nodes are involved into transaction and marked dirty by
1558  *  split_index_node().
1559  *
1560  *  - "safe" index node, which is no split, but where new pointer is inserted
1561  *  is involved into transaction and marked dirty by split_index_node().
1562  *
1563  *  - index node where pointer to new leaf is inserted is involved into
1564  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1565  *
1566  *  - inode is marked dirty by iam_add_rec().
1567  *
1568  */
1569
1570 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1571 {
1572         int err;
1573         iam_ptr_t blknr;
1574         struct buffer_head   *new_leaf;
1575         struct buffer_head   *old_leaf;
1576         struct iam_container *c;
1577         struct inode         *obj;
1578         struct iam_path      *path;
1579
1580         assert_inv(iam_leaf_check(leaf));
1581
1582         c = iam_leaf_container(leaf);
1583         path = leaf->il_path;
1584
1585         obj = c->ic_object;
1586         new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1587         do_corr(schedule());
1588         if (new_leaf != NULL) {
1589                 struct dynlock_handle *lh;
1590
1591                 lh = iam_lock_htree(obj, blknr, DLT_WRITE);
1592                 do_corr(schedule());
1593                 if (lh != NULL) {
1594                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1595                         do_corr(schedule());
1596                         old_leaf = leaf->il_bh;
1597                         iam_leaf_split(leaf, &new_leaf, blknr);
1598                         if (old_leaf != leaf->il_bh) {
1599                                 /*
1600                                  * Switched to the new leaf.
1601                                  */
1602                                 iam_leaf_unlock(leaf);
1603                                 leaf->il_lock = lh;
1604                                 path->ip_frame->leaf = blknr;
1605                         } else
1606                                 iam_unlock_htree(obj, lh);
1607                         do_corr(schedule());
1608                         err = iam_txn_dirty(handle, path, new_leaf);
1609                         brelse(new_leaf);
1610                         if (err == 0)
1611                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1612                         do_corr(schedule());
1613                 } else
1614                         err = -ENOMEM;
1615         }
1616         assert_inv(iam_leaf_check(leaf));
1617         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1618         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1619         return err;
1620 }
1621
1622 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1623 {
1624         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1625 }
1626
1627 static int iam_shift_entries(struct iam_path *path,
1628                          struct iam_frame *frame, unsigned count,
1629                          struct iam_entry *entries, struct iam_entry *entries2,
1630                          u32 newblock)
1631 {
1632         unsigned count1;
1633         unsigned count2;
1634         int delta;
1635
1636         struct iam_frame *parent = frame - 1;
1637         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1638
1639         delta = dx_index_is_compat(path) ? 0 : +1;
1640
1641         count1 = count/2 + delta;
1642         count2 = count - count1;
1643         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1644
1645         dxtrace(printk("Split index %d/%d\n", count1, count2));
1646
1647         memcpy((char *) iam_entry_shift(path, entries2, delta),
1648                (char *) iam_entry_shift(path, entries, count1),
1649                count2 * iam_entry_size(path));
1650
1651         dx_set_count(entries2, count2 + delta);
1652         dx_set_limit(entries2, dx_node_limit(path));
1653
1654         /*
1655          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1656          * level index in root index, then we insert new index here and set
1657          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1658          * index w/o hash it looks for. the solution is to check root index
1659          * after we locked just founded 2nd level index -bzzz
1660          */
1661         iam_insert_key_lock(path, parent, pivot, newblock);
1662
1663         /*
1664          * now old and new 2nd level index blocks contain all pointers, so
1665          * dx_probe() may find it in the both.  it's OK -bzzz
1666          */
1667         iam_lock_bh(frame->bh);
1668         dx_set_count(entries, count1);
1669         iam_unlock_bh(frame->bh);
1670
1671         /*
1672          * now old 2nd level index block points to first half of leafs. it's
1673          * importand that dx_probe() must check root index block for changes
1674          * under dx_lock_bh(frame->bh) -bzzz
1675          */
1676
1677         return count1;
1678 }
1679
1680
1681 int split_index_node(handle_t *handle, struct iam_path *path,
1682                      struct dynlock_handle **lh)
1683 {
1684
1685         struct iam_entry *entries;   /* old block contents */
1686         struct iam_entry *entries2;  /* new block contents */
1687          struct iam_frame *frame, *safe;
1688         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1689         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1690         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1691         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1692         struct inode *dir = iam_path_obj(path);
1693         struct iam_descr *descr;
1694         int nr_splet;
1695         int i, err;
1696
1697         descr = iam_path_descr(path);
1698         /*
1699          * Algorithm below depends on this.
1700          */
1701         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1702
1703         frame = path->ip_frame;
1704         entries = frame->entries;
1705
1706         /*
1707          * Tall-tree handling: we might have to split multiple index blocks
1708          * all the way up to tree root. Tricky point here is error handling:
1709          * to avoid complicated undo/rollback we
1710          *
1711          *   - first allocate all necessary blocks
1712          *
1713          *   - insert pointers into them atomically.
1714          */
1715
1716         /*
1717          * Locking: leaf is already locked. htree-locks are acquired on all
1718          * index nodes that require split bottom-to-top, on the "safe" node,
1719          * and on all new nodes
1720          */
1721
1722         dxtrace(printk("using %u of %u node entries\n",
1723                        dx_get_count(entries), dx_get_limit(entries)));
1724
1725         /* What levels need split? */
1726         for (nr_splet = 0; frame >= path->ip_frames &&
1727              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1728              --frame, ++nr_splet) {
1729                 do_corr(schedule());
1730                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1731                         /*
1732                         CWARN(dir->i_sb, __FUNCTION__,
1733                                      "Directory index full!\n");
1734                                      */
1735                         err = -ENOSPC;
1736                         goto cleanup;
1737                 }
1738         }
1739
1740         safe = frame;
1741
1742         /*
1743          * Lock all nodes, bottom to top.
1744          */
1745         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1746                 do_corr(schedule());
1747                 lock[i] = iam_lock_htree(dir, frame->curidx, DLT_WRITE);
1748                 if (lock[i] == NULL) {
1749                         err = -ENOMEM;
1750                         goto cleanup;
1751                 }
1752         }
1753
1754         /*
1755          * Check for concurrent index modification.
1756          */
1757         err = iam_check_full_path(path, 1);
1758         if (err)
1759                 goto cleanup;
1760         /*
1761          * And check that the same number of nodes is to be split.
1762          */
1763         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1764              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1765              --frame, ++i) {
1766                 ;
1767         }
1768         if (i != nr_splet) {
1769                 err = -EAGAIN;
1770                 goto cleanup;
1771         }
1772
1773         /* Go back down, allocating blocks, locking them, and adding into
1774          * transaction... */
1775         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1776                 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1777                 do_corr(schedule());
1778                 if (!bh_new[i] ||
1779                     descr->id_ops->id_node_init(path->ip_container,
1780                                                 bh_new[i], 0) != 0)
1781                         goto cleanup;
1782                 new_lock[i] = iam_lock_htree(dir, newblock[i], DLT_WRITE);
1783                 if (new_lock[i] == NULL) {
1784                         err = -ENOMEM;
1785                         goto cleanup;
1786                 }
1787                 do_corr(schedule());
1788                 BUFFER_TRACE(frame->bh, "get_write_access");
1789                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1790                 if (err)
1791                         goto journal_error;
1792         }
1793         /* Add "safe" node to transaction too */
1794         if (safe + 1 != path->ip_frames) {
1795                 do_corr(schedule());
1796                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1797                 if (err)
1798                         goto journal_error;
1799         }
1800
1801         /* Go through nodes once more, inserting pointers */
1802         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1803                 unsigned count;
1804                 int idx;
1805                 struct buffer_head *bh2;
1806                 struct buffer_head *bh;
1807
1808                 entries = frame->entries;
1809                 count = dx_get_count(entries);
1810                 idx = iam_entry_diff(path, frame->at, entries);
1811
1812                 bh2 = bh_new[i];
1813                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1814
1815                 bh = frame->bh;
1816                 if (frame == path->ip_frames) {
1817                         /* splitting root node. Tricky point:
1818                          *
1819                          * In the "normal" B-tree we'd split root *and* add
1820                          * new root to the tree with pointers to the old root
1821                          * and its sibling (thus introducing two new nodes).
1822                          *
1823                          * In htree it's enough to add one node, because
1824                          * capacity of the root node is smaller than that of
1825                          * non-root one.
1826                          */
1827                         struct iam_frame *frames;
1828                         struct iam_entry *next;
1829
1830                         assert_corr(i == 0);
1831
1832                         do_corr(schedule());
1833
1834                         frames = path->ip_frames;
1835                         memcpy((char *) entries2, (char *) entries,
1836                                count * iam_entry_size(path));
1837                         dx_set_limit(entries2, dx_node_limit(path));
1838
1839                         /* Set up root */
1840                           iam_lock_bh(frame->bh);
1841                         next = descr->id_ops->id_root_inc(path->ip_container,
1842                                                           path, frame);
1843                         dx_set_block(path, next, newblock[0]);
1844                           iam_unlock_bh(frame->bh);
1845
1846                         do_corr(schedule());
1847                         /* Shift frames in the path */
1848                         memmove(frames + 2, frames + 1,
1849                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1850                         /* Add new access path frame */
1851                         frames[1].at = iam_entry_shift(path, entries2, idx);
1852                         frames[1].entries = entries = entries2;
1853                         frames[1].bh = bh2;
1854                         assert_inv(dx_node_check(path, frame));
1855                         ++ path->ip_frame;
1856                         ++ frame;
1857                         assert_inv(dx_node_check(path, frame));
1858                         bh_new[0] = NULL; /* buffer head is "consumed" */
1859                         err = ldiskfs_journal_get_write_access(handle, bh2);
1860                         if (err)
1861                                 goto journal_error;
1862                         do_corr(schedule());
1863                 } else {
1864                         /* splitting non-root index node. */
1865                         struct iam_frame *parent = frame - 1;
1866
1867                         do_corr(schedule());
1868                         count = iam_shift_entries(path, frame, count,
1869                                               entries, entries2, newblock[i]);
1870                         /* Which index block gets the new entry? */
1871                         if (idx >= count) {
1872                                 int d = dx_index_is_compat(path) ? 0 : +1;
1873
1874                                 frame->at = iam_entry_shift(path, entries2,
1875                                                             idx - count + d);
1876                                 frame->entries = entries = entries2;
1877                                 frame->curidx = newblock[i];
1878                                 swap(frame->bh, bh2);
1879                                 assert_corr(lock[i + 1] != NULL);
1880                                 assert_corr(new_lock[i] != NULL);
1881                                 swap(lock[i + 1], new_lock[i]);
1882                                 bh_new[i] = bh2;
1883                                 parent->at = iam_entry_shift(path,
1884                                                              parent->at, +1);
1885                         }
1886                         assert_inv(dx_node_check(path, frame));
1887                         assert_inv(dx_node_check(path, parent));
1888                         dxtrace(dx_show_index ("node", frame->entries));
1889                         dxtrace(dx_show_index ("node",
1890                                ((struct dx_node *) bh2->b_data)->entries));
1891                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
1892                         if (err)
1893                                 goto journal_error;
1894                         do_corr(schedule());
1895                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1896                         if (err)
1897                                 goto journal_error;
1898                 }
1899                 do_corr(schedule());
1900                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1901                 if (err)
1902                         goto journal_error;
1903         }
1904                 /*
1905                  * This function was called to make insertion of new leaf
1906                  * possible. Check that it fulfilled its obligations.
1907                  */
1908                 assert_corr(dx_get_count(path->ip_frame->entries) <
1909                             dx_get_limit(path->ip_frame->entries));
1910         assert_corr(lock[nr_splet] != NULL);
1911         *lh = lock[nr_splet];
1912         lock[nr_splet] = NULL;
1913         if (nr_splet > 0) {
1914                 /*
1915                  * Log ->i_size modification.
1916                  */
1917                 err = ldiskfs_mark_inode_dirty(handle, dir);
1918                 if (err)
1919                         goto journal_error;
1920         }
1921         goto cleanup;
1922 journal_error:
1923         ldiskfs_std_error(dir->i_sb, err);
1924
1925 cleanup:
1926         iam_unlock_array(dir, lock);
1927         iam_unlock_array(dir, new_lock);
1928
1929         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1930
1931         do_corr(schedule());
1932         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1933                 if (bh_new[i] != NULL)
1934                         brelse(bh_new[i]);
1935         }
1936         return err;
1937 }
1938
1939 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1940                        struct iam_path *path,
1941                        const struct iam_key *k, const struct iam_rec *r)
1942 {
1943         int err;
1944         struct iam_leaf *leaf;
1945
1946         leaf = &path->ip_leaf;
1947         assert_inv(iam_leaf_check(leaf));
1948         assert_inv(iam_path_check(path));
1949         err = iam_txn_add(handle, path, leaf->il_bh);
1950         if (err == 0) {
1951                 do_corr(schedule());
1952                 if (!iam_leaf_can_add(leaf, k, r)) {
1953                         struct dynlock_handle *lh = NULL;
1954
1955                         do {
1956                                 assert_corr(lh == NULL);
1957                                 do_corr(schedule());
1958                                 err = split_index_node(handle, path, &lh);
1959                                 if (err == -EAGAIN) {
1960                                         assert_corr(lh == NULL);
1961
1962                                         iam_path_fini(path);
1963                                         it->ii_state = IAM_IT_DETACHED;
1964
1965                                         do_corr(schedule());
1966                                         err = iam_it_get_exact(it, k);
1967                                         if (err == -ENOENT)
1968                                                 err = +1; /* repeat split */
1969                                         else if (err == 0)
1970                                                 err = -EEXIST;
1971                                 }
1972                         } while (err > 0);
1973                         assert_inv(iam_path_check(path));
1974                         if (err == 0) {
1975                                 assert_corr(lh != NULL);
1976                                 do_corr(schedule());
1977                                 err = iam_new_leaf(handle, leaf);
1978                                 if (err == 0)
1979                                         err = iam_txn_dirty(handle, path,
1980                                                             path->ip_frame->bh);
1981                         }
1982                         iam_unlock_htree(iam_path_obj(path), lh);
1983                         do_corr(schedule());
1984                 }
1985                 if (err == 0) {
1986                         iam_leaf_rec_add(leaf, k, r);
1987                         err = iam_txn_dirty(handle, path, leaf->il_bh);
1988                 }
1989         }
1990         assert_inv(iam_leaf_check(leaf));
1991         assert_inv(iam_leaf_check(&path->ip_leaf));
1992         assert_inv(iam_path_check(path));
1993         return err;
1994 }
1995
1996 /*
1997  * Insert new record with key @k and contents from @r, shifting records to the
1998  * right. On success, iterator is positioned on the newly inserted record.
1999  *
2000  * precondition: it->ii_flags&IAM_IT_WRITE &&
2001  *               (it_state(it) == IAM_IT_ATTACHED ||
2002  *                it_state(it) == IAM_IT_SKEWED) &&
2003  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2004  *                    it_keycmp(it, k) <= 0) &&
2005  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2006  * postcondition: ergo(result == 0,
2007  *                     it_state(it) == IAM_IT_ATTACHED &&
2008  *                     it_keycmp(it, k) == 0 &&
2009  *                     !memcmp(iam_it_rec_get(it), r, ...))
2010  */
2011 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2012                       const struct iam_key *k, const struct iam_rec *r)
2013 {
2014         int result;
2015         struct iam_path *path;
2016
2017         path = &it->ii_path;
2018
2019         assert_corr(it->ii_flags&IAM_IT_WRITE);
2020         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2021                     it_state(it) == IAM_IT_SKEWED);
2022         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2023                          it_keycmp(it, k) <= 0));
2024         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2025         result = iam_add_rec(h, it, path, k, r);
2026         if (result == 0)
2027                 it->ii_state = IAM_IT_ATTACHED;
2028         assert_corr(ergo(result == 0,
2029                          it_state(it) == IAM_IT_ATTACHED &&
2030                          it_keycmp(it, k) == 0));
2031         return result;
2032 }
2033 EXPORT_SYMBOL(iam_it_rec_insert);
2034
2035 /*
2036  * Delete record under iterator.
2037  *
2038  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2039  *                it->ii_flags&IAM_IT_WRITE &&
2040  *                it_at_rec(it)
2041  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2042  *                it_state(it) == IAM_IT_DETACHED
2043  */
2044 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2045 {
2046         int result;
2047         struct iam_leaf *leaf;
2048         struct iam_path *path;
2049
2050         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2051                     it->ii_flags&IAM_IT_WRITE);
2052         assert_corr(it_at_rec(it));
2053
2054         path = &it->ii_path;
2055         leaf = &path->ip_leaf;
2056
2057         assert_inv(iam_leaf_check(leaf));
2058         assert_inv(iam_path_check(path));
2059
2060         result = iam_txn_add(h, path, leaf->il_bh);
2061         /*
2062          * no compaction for now.
2063          */
2064         if (result == 0) {
2065                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2066                 result = iam_txn_dirty(h, path, leaf->il_bh);
2067                 if (result == 0 && iam_leaf_at_end(leaf) &&
2068                     it->ii_flags&IAM_IT_MOVE) {
2069                         result = iam_it_next(it);
2070                         if (result > 0)
2071                                 result = 0;
2072                 }
2073         }
2074         assert_inv(iam_leaf_check(leaf));
2075         assert_inv(iam_path_check(path));
2076         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2077                     it_state(it) == IAM_IT_DETACHED);
2078         return result;
2079 }
2080 EXPORT_SYMBOL(iam_it_rec_delete);
2081
2082 /*
2083  * Convert iterator to cookie.
2084  *
2085  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2086  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2087  * postcondition: it_state(it) == IAM_IT_ATTACHED
2088  */
2089 iam_pos_t iam_it_store(const struct iam_iterator *it)
2090 {
2091         iam_pos_t result;
2092
2093         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2094         assert_corr(it_at_rec(it));
2095         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2096                     sizeof result);
2097
2098         result = 0;
2099         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2100 }
2101 EXPORT_SYMBOL(iam_it_store);
2102
2103 /*
2104  * Restore iterator from cookie.
2105  *
2106  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2107  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2108  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2109  *                                  iam_it_store(it) == pos)
2110  */
2111 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2112 {
2113         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2114                     it->ii_flags&IAM_IT_MOVE);
2115         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2116         return iam_it_iget(it, (struct iam_ikey *)&pos);
2117 }
2118 EXPORT_SYMBOL(iam_it_load);
2119
2120 /***********************************************************************/
2121 /* invariants                                                          */
2122 /***********************************************************************/
2123
2124 static inline int ptr_inside(void *base, size_t size, void *ptr)
2125 {
2126         return (base <= ptr) && (ptr < base + size);
2127 }
2128
2129 int iam_frame_invariant(struct iam_frame *f)
2130 {
2131         return
2132                 (f->bh != NULL &&
2133                 f->bh->b_data != NULL &&
2134                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2135                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2136                 f->entries <= f->at);
2137 }
2138 int iam_leaf_invariant(struct iam_leaf *l)
2139 {
2140         return
2141                 l->il_bh != NULL &&
2142                 l->il_bh->b_data != NULL &&
2143                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2144                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2145                 l->il_entries <= l->il_at;
2146 }
2147
2148 int iam_path_invariant(struct iam_path *p)
2149 {
2150         int i;
2151
2152         if (p->ip_container == NULL ||
2153             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2154             p->ip_frame != p->ip_frames + p->ip_indirect ||
2155             !iam_leaf_invariant(&p->ip_leaf))
2156                 return 0;
2157         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2158                 if (i <= p->ip_indirect) {
2159                         if (!iam_frame_invariant(&p->ip_frames[i]))
2160                                 return 0;
2161                 }
2162         }
2163         return 1;
2164 }
2165
2166 int iam_it_invariant(struct iam_iterator *it)
2167 {
2168         return
2169                 (it->ii_state == IAM_IT_DETACHED ||
2170                  it->ii_state == IAM_IT_ATTACHED ||
2171                  it->ii_state == IAM_IT_SKEWED) &&
2172                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2173                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2174                      it->ii_state == IAM_IT_SKEWED,
2175                      iam_path_invariant(&it->ii_path) &&
2176                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2177 }
2178
2179 /*
2180  * Search container @c for record with key @k. If record is found, its data
2181  * are moved into @r.
2182  *
2183  * Return values: 0: found, -ENOENT: not-found, -ve: error
2184  */
2185 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2186                struct iam_rec *r, struct iam_path_descr *pd)
2187 {
2188         struct iam_iterator it;
2189         int result;
2190
2191         iam_it_init(&it, c, 0, pd);
2192
2193         result = iam_it_get_exact(&it, k);
2194         if (result == 0)
2195                 /*
2196                  * record with required key found, copy it into user buffer
2197                  */
2198                 iam_reccpy(&it.ii_path.ip_leaf, r);
2199         iam_it_put(&it);
2200         iam_it_fini(&it);
2201         return result;
2202 }
2203 EXPORT_SYMBOL(iam_lookup);
2204
2205 /*
2206  * Insert new record @r with key @k into container @c (within context of
2207  * transaction @h).
2208  *
2209  * Return values: 0: success, -ve: error, including -EEXIST when record with
2210  * given key is already present.
2211  *
2212  * postcondition: ergo(result == 0 || result == -EEXIST,
2213  *                                  iam_lookup(c, k, r2) > 0;
2214  */
2215 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2216                const struct iam_rec *r, struct iam_path_descr *pd)
2217 {
2218         struct iam_iterator it;
2219         int result;
2220
2221         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2222
2223         result = iam_it_get_exact(&it, k);
2224         if (result == -ENOENT)
2225                 result = iam_it_rec_insert(h, &it, k, r);
2226         else if (result == 0)
2227                 result = -EEXIST;
2228         iam_it_put(&it);
2229         iam_it_fini(&it);
2230         return result;
2231 }
2232 EXPORT_SYMBOL(iam_insert);
2233
2234 /*
2235  * Update record with the key @k in container @c (within context of
2236  * transaction @h), new record is given by @r.
2237  *
2238  * Return values: 0: success, -ve: error, including -ENOENT if no record with
2239  * the given key found.
2240  */
2241 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2242                const struct iam_rec *r, struct iam_path_descr *pd)
2243 {
2244         struct iam_iterator it;
2245         int result;
2246
2247         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2248
2249         result = iam_it_get_exact(&it, k);
2250         if (result == 0)
2251                 iam_it_rec_set(h, &it, r);
2252         iam_it_put(&it);
2253         iam_it_fini(&it);
2254         return result;
2255 }
2256 EXPORT_SYMBOL(iam_update);
2257
2258 /*
2259  * Delete existing record with key @k.
2260  *
2261  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2262  *
2263  * postcondition: ergo(result == 0 || result == -ENOENT,
2264  *                                 !iam_lookup(c, k, *));
2265  */
2266 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2267                struct iam_path_descr *pd)
2268 {
2269         struct iam_iterator it;
2270         int result;
2271
2272         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2273
2274         result = iam_it_get_exact(&it, k);
2275         if (result == 0)
2276                 iam_it_rec_delete(h, &it);
2277         iam_it_put(&it);
2278         iam_it_fini(&it);
2279         return result;
2280 }
2281 EXPORT_SYMBOL(iam_delete);
2282
2283 int iam_root_limit(int rootgap, int blocksize, int size)
2284 {
2285         int limit;
2286         int nlimit;
2287
2288         limit = (blocksize - rootgap) / size;
2289         nlimit = blocksize / size;
2290         if (limit == nlimit)
2291                 limit--;
2292         return limit;
2293 }