Whamcloud - gitweb
b=20226 rename osd to osd-ldiskfs
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  */
105
106 #include <linux/module.h>
107 #include <linux/fs.h>
108 #include <linux/pagemap.h>
109 #include <linux/time.h>
110 #include <linux/fcntl.h>
111 #include <linux/stat.h>
112 #include <linux/string.h>
113 #include <linux/quotaops.h>
114 #include <linux/buffer_head.h>
115 #include <linux/smp_lock.h>
116 #include "osd_internal.h"
117
118 #include "xattr.h"
119 #include "iopen.h"
120 #include "acl.h"
121
122 /*
123  * List of all registered formats.
124  *
125  * No locking. Callers synchronize.
126  */
127 static CFS_LIST_HEAD(iam_formats);
128
129 void iam_format_register(struct iam_format *fmt)
130 {
131         cfs_list_add(&fmt->if_linkage, &iam_formats);
132 }
133 EXPORT_SYMBOL(iam_format_register);
134
135 /*
136  * Determine format of given container. This is done by scanning list of
137  * registered formats and calling ->if_guess() method of each in turn.
138  */
139 static int iam_format_guess(struct iam_container *c)
140 {
141         int result;
142         struct iam_format *fmt;
143
144         /*
145          * XXX temporary initialization hook.
146          */
147         {
148                 static int initialized = 0;
149
150                 if (!initialized) {
151                         iam_lvar_format_init();
152                         iam_lfix_format_init();
153                         initialized = 1;
154                 }
155         }
156
157         result = -ENOENT;
158         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
159                 result = fmt->if_guess(c);
160                 if (result == 0)
161                         break;
162         }
163         return result;
164 }
165
166 /*
167  * Initialize container @c.
168  */
169 int iam_container_init(struct iam_container *c,
170                        struct iam_descr *descr, struct inode *inode)
171 {
172         memset(c, 0, sizeof *c);
173         c->ic_descr  = descr;
174         c->ic_object = inode;
175         cfs_init_rwsem(&c->ic_sem);
176         return 0;
177 }
178 EXPORT_SYMBOL(iam_container_init);
179
180 /*
181  * Determine container format.
182  */
183 int iam_container_setup(struct iam_container *c)
184 {
185         return iam_format_guess(c);
186 }
187 EXPORT_SYMBOL(iam_container_setup);
188
189 /*
190  * Finalize container @c, release all resources.
191  */
192 void iam_container_fini(struct iam_container *c)
193 {
194 }
195 EXPORT_SYMBOL(iam_container_fini);
196
197 void iam_path_init(struct iam_path *path, struct iam_container *c,
198                    struct iam_path_descr *pd)
199 {
200         memset(path, 0, sizeof *path);
201         path->ip_container = c;
202         path->ip_frame = path->ip_frames;
203         path->ip_data = pd;
204         path->ip_leaf.il_path = path;
205 }
206
207 static void iam_leaf_fini(struct iam_leaf *leaf);
208
209 void iam_path_release(struct iam_path *path)
210 {
211         int i;
212
213         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
214                 if (path->ip_frames[i].bh != NULL) {
215                         brelse(path->ip_frames[i].bh);
216                         path->ip_frames[i].bh = NULL;
217                 }
218         }
219 }
220
221 void iam_path_fini(struct iam_path *path)
222 {
223         iam_leaf_fini(&path->ip_leaf);
224         iam_path_release(path);
225 }
226
227
228 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
229 {
230         int i;
231
232         path->ipc_hinfo = &path->ipc_hinfo_area;
233         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
234                 path->ipc_descr.ipd_key_scratch[i] =
235                         (struct iam_ikey *)&path->ipc_scratch[i];
236
237         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
238 }
239
240 void iam_path_compat_fini(struct iam_path_compat *path)
241 {
242         iam_path_fini(&path->ipc_path);
243 }
244
245 /*
246  * Helper function initializing iam_path_descr and its key scratch area.
247  */
248 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
249 {
250         struct iam_path_descr *ipd;
251         void *karea;
252         int i;
253
254         ipd = area;
255         karea = ipd + 1;
256         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
257                 ipd->ipd_key_scratch[i] = karea;
258         return ipd;
259 }
260 EXPORT_SYMBOL(iam_ipd_alloc);
261
262 void iam_ipd_free(struct iam_path_descr *ipd)
263 {
264 }
265 EXPORT_SYMBOL(iam_ipd_free);
266
267 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
268                   handle_t *h, struct buffer_head **bh)
269 {
270         int result = 0;
271
272         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
273         if (*bh == NULL)
274                 result = -EIO;
275         return result;
276 }
277
278 /*
279  * Return pointer to current leaf record. Pointer is valid while corresponding
280  * leaf node is locked and pinned.
281  */
282 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
283 {
284         return iam_leaf_ops(leaf)->rec(leaf);
285 }
286
287 /*
288  * Return pointer to the current leaf key. This function returns pointer to
289  * the key stored in node.
290  *
291  * Caller should assume that returned pointer is only valid while leaf node is
292  * pinned and locked.
293  */
294 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
295 {
296         return iam_leaf_ops(leaf)->key(leaf);
297 }
298
299 static int iam_leaf_key_size(const struct iam_leaf *leaf)
300 {
301         return iam_leaf_ops(leaf)->key_size(leaf);
302 }
303
304 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
305                                       struct iam_ikey *key)
306 {
307         return iam_leaf_ops(leaf)->ikey(leaf, key);
308 }
309
310 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
311                            const struct iam_key *key)
312 {
313         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
314 }
315
316 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
317                           const struct iam_key *key)
318 {
319         return iam_leaf_ops(leaf)->key_eq(leaf, key);
320 }
321
322 #if LDISKFS_INVARIANT_ON
323 static int iam_leaf_check(struct iam_leaf *leaf);
324 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
325
326 static int iam_path_check(struct iam_path *p)
327 {
328         int i;
329         int result;
330         struct iam_frame *f;
331         struct iam_descr *param;
332
333         result = 1;
334         param = iam_path_descr(p);
335         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
336                 f = &p->ip_frames[i];
337                 if (f->bh != NULL) {
338                         result = dx_node_check(p, f);
339                         if (result)
340                                 result = !param->id_ops->id_node_check(p, f);
341                 }
342         }
343         if (result && p->ip_leaf.il_bh != NULL)
344                 result = iam_leaf_check(&p->ip_leaf);
345         if (result == 0) {
346                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
347         }
348         return result;
349 }
350 #endif
351
352 static int iam_leaf_load(struct iam_path *path)
353 {
354         iam_ptr_t block;
355         int err;
356         struct iam_container *c;
357         struct buffer_head   *bh;
358         struct iam_leaf      *leaf;
359         struct iam_descr     *descr;
360
361         c     = path->ip_container;
362         leaf  = &path->ip_leaf;
363         descr = iam_path_descr(path);
364         block = path->ip_frame->leaf;
365         if (block == 0) {
366                 /* XXX bug 11027 */
367                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
368                        (long unsigned)path->ip_frame->leaf,
369                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
370                        path->ip_frames[0].bh, path->ip_frames[1].bh,
371                        path->ip_frames[2].bh);
372         }
373         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
374         if (err == 0) {
375                 leaf->il_bh = bh;
376                 leaf->il_curidx = block;
377                 err = iam_leaf_ops(leaf)->init(leaf);
378                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
379         }
380         return err;
381 }
382
383 static void iam_unlock_htree(struct inode *dir, struct dynlock_handle *lh)
384 {
385         if (lh != NULL)
386                 dynlock_unlock(&LDISKFS_I(dir)->i_htree_lock, lh);
387 }
388
389
390 static void iam_leaf_unlock(struct iam_leaf *leaf)
391 {
392         if (leaf->il_lock != NULL) {
393                 iam_unlock_htree(iam_leaf_container(leaf)->ic_object,
394                                 leaf->il_lock);
395                 do_corr(schedule());
396                 leaf->il_lock = NULL;
397         }
398 }
399
400 static void iam_leaf_fini(struct iam_leaf *leaf)
401 {
402         if (leaf->il_path != NULL) {
403                 iam_leaf_unlock(leaf);
404                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
405                 iam_leaf_ops(leaf)->fini(leaf);
406                 if (leaf->il_bh) {
407                         brelse(leaf->il_bh);
408                         leaf->il_bh = NULL;
409                         leaf->il_curidx = 0;
410                 }
411         }
412 }
413
414 static void iam_leaf_start(struct iam_leaf *folio)
415 {
416         iam_leaf_ops(folio)->start(folio);
417 }
418
419 void iam_leaf_next(struct iam_leaf *folio)
420 {
421         iam_leaf_ops(folio)->next(folio);
422 }
423
424 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
425                              const struct iam_rec *rec)
426 {
427         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
428 }
429
430 static void iam_rec_del(struct iam_leaf *leaf, int shift)
431 {
432         iam_leaf_ops(leaf)->rec_del(leaf, shift);
433 }
434
435 int iam_leaf_at_end(const struct iam_leaf *leaf)
436 {
437         return iam_leaf_ops(leaf)->at_end(leaf);
438 }
439
440 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
441 {
442         iam_leaf_ops(l)->split(l, bh, nr);
443 }
444
445 int iam_leaf_can_add(const struct iam_leaf *l,
446                      const struct iam_key *k, const struct iam_rec *r)
447 {
448         return iam_leaf_ops(l)->can_add(l, k, r);
449 }
450
451 #if LDISKFS_INVARIANT_ON
452 static int iam_leaf_check(struct iam_leaf *leaf)
453 {
454         return 1;
455 #if 0
456         struct iam_lentry    *orig;
457         struct iam_path      *path;
458         struct iam_container *bag;
459         struct iam_ikey       *k0;
460         struct iam_ikey       *k1;
461         int result;
462         int first;
463
464         orig = leaf->il_at;
465         path = iam_leaf_path(leaf);
466         bag  = iam_leaf_container(leaf);
467
468         result = iam_leaf_ops(leaf)->init(leaf);
469         if (result != 0)
470                 return result;
471
472         first = 1;
473         iam_leaf_start(leaf);
474         k0 = iam_path_ikey(path, 0);
475         k1 = iam_path_ikey(path, 1);
476         while (!iam_leaf_at_end(leaf)) {
477                 iam_ikeycpy(bag, k0, k1);
478                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
479                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
480                         return 0;
481                 }
482                 first = 0;
483                 iam_leaf_next(leaf);
484         }
485         leaf->il_at = orig;
486         return 1;
487 #endif
488 }
489 #endif
490
491 static int iam_txn_dirty(handle_t *handle,
492                          struct iam_path *path, struct buffer_head *bh)
493 {
494         int result;
495
496         result = ldiskfs_journal_dirty_metadata(handle, bh);
497         if (result != 0)
498                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
499         return result;
500 }
501
502 static int iam_txn_add(handle_t *handle,
503                        struct iam_path *path, struct buffer_head *bh)
504 {
505         int result;
506
507         result = ldiskfs_journal_get_write_access(handle, bh);
508         if (result != 0)
509                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
510         return result;
511 }
512
513 /***********************************************************************/
514 /* iterator interface                                                  */
515 /***********************************************************************/
516
517 static enum iam_it_state it_state(const struct iam_iterator *it)
518 {
519         return it->ii_state;
520 }
521
522 /*
523  * Helper function returning scratch key.
524  */
525 static struct iam_container *iam_it_container(const struct iam_iterator *it)
526 {
527         return it->ii_path.ip_container;
528 }
529
530 static inline int it_keycmp(const struct iam_iterator *it,
531                             const struct iam_key *k)
532 {
533         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
534 }
535
536 static inline int it_keyeq(const struct iam_iterator *it,
537                            const struct iam_key *k)
538 {
539         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
540 }
541
542 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
543 {
544         return iam_ikeycmp(it->ii_path.ip_container,
545                            iam_leaf_ikey(&it->ii_path.ip_leaf,
546                                          iam_path_ikey(&it->ii_path, 0)), ik);
547 }
548
549 static inline int it_at_rec(const struct iam_iterator *it)
550 {
551         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
552 }
553
554 static inline int it_before(const struct iam_iterator *it)
555 {
556         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
557 }
558
559 /*
560  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
561  * with exactly the same key as asked is found.
562  */
563 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
564 {
565         int result;
566
567         result = iam_it_get(it, k);
568         if (result > 0)
569                 result = 0;
570         else if (result == 0)
571                 /*
572                  * Return -ENOENT if cursor is located above record with a key
573                  * different from one specified, or in the empty leaf.
574                  *
575                  * XXX returning -ENOENT only works if iam_it_get() never
576                  * returns -ENOENT as a legitimate error.
577                  */
578                 result = -ENOENT;
579         return result;
580 }
581
582 void iam_container_write_lock(struct iam_container *ic)
583 {
584         cfs_down_write(&ic->ic_sem);
585 }
586
587 void iam_container_write_unlock(struct iam_container *ic)
588 {
589         cfs_up_write(&ic->ic_sem);
590 }
591
592 void iam_container_read_lock(struct iam_container *ic)
593 {
594         cfs_down_read(&ic->ic_sem);
595 }
596
597 void iam_container_read_unlock(struct iam_container *ic)
598 {
599         cfs_up_read(&ic->ic_sem);
600 }
601
602 /*
603  * Initialize iterator to IAM_IT_DETACHED state.
604  *
605  * postcondition: it_state(it) == IAM_IT_DETACHED
606  */
607 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
608                  struct iam_path_descr *pd)
609 {
610         memset(it, 0, sizeof *it);
611         it->ii_flags  = flags;
612         it->ii_state  = IAM_IT_DETACHED;
613         iam_path_init(&it->ii_path, c, pd);
614         return 0;
615 }
616 EXPORT_SYMBOL(iam_it_init);
617
618 /*
619  * Finalize iterator and release all resources.
620  *
621  * precondition: it_state(it) == IAM_IT_DETACHED
622  */
623 void iam_it_fini(struct iam_iterator *it)
624 {
625         assert_corr(it_state(it) == IAM_IT_DETACHED);
626         iam_path_fini(&it->ii_path);
627 }
628 EXPORT_SYMBOL(iam_it_fini);
629
630 /*
631  * this locking primitives are used to protect parts
632  * of dir's htree. protection unit is block: leaf or index
633  */
634 struct dynlock_handle *iam_lock_htree(struct inode *dir, unsigned long value,
635                                      enum dynlock_type lt)
636 {
637         return dynlock_lock(&LDISKFS_I(dir)->i_htree_lock, value, lt, GFP_NOFS);
638 }
639
640
641
642 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
643 {
644         struct iam_frame *f;
645
646         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
647                 do_corr(schedule());
648                 *lh = iam_lock_htree(iam_path_obj(path), f->curidx, DLT_READ);
649                 if (*lh == NULL)
650                         return -ENOMEM;
651         }
652         return 0;
653 }
654
655 /*
656  * Fast check for frame consistency.
657  */
658 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
659 {
660         struct iam_container *bag;
661         struct iam_entry *next;
662         struct iam_entry *last;
663         struct iam_entry *entries;
664         struct iam_entry *at;
665
666         bag     = path->ip_container;
667         at      = frame->at;
668         entries = frame->entries;
669         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
670
671         if (unlikely(at > last))
672                 return -EAGAIN;
673
674         if (unlikely(dx_get_block(path, at) != frame->leaf))
675                 return -EAGAIN;
676
677         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
678                                  path->ip_ikey_target) > 0))
679                 return -EAGAIN;
680
681         next = iam_entry_shift(path, at, +1);
682         if (next <= last) {
683                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
684                                          path->ip_ikey_target) <= 0))
685                         return -EAGAIN;
686         }
687         return 0;
688 }
689
690 int dx_index_is_compat(struct iam_path *path)
691 {
692         return iam_path_descr(path) == NULL;
693 }
694
695 /*
696  * dx_find_position
697  *
698  * search position of specified hash in index
699  *
700  */
701
702 struct iam_entry *iam_find_position(struct iam_path *path,
703                                    struct iam_frame *frame)
704 {
705         int count;
706         struct iam_entry *p;
707         struct iam_entry *q;
708         struct iam_entry *m;
709
710         count = dx_get_count(frame->entries);
711         assert_corr(count && count <= dx_get_limit(frame->entries));
712         p = iam_entry_shift(path, frame->entries,
713                             dx_index_is_compat(path) ? 1 : 2);
714         q = iam_entry_shift(path, frame->entries, count - 1);
715         while (p <= q) {
716                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
717                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
718                                 path->ip_ikey_target) > 0)
719                         q = iam_entry_shift(path, m, -1);
720                 else
721                         p = iam_entry_shift(path, m, +1);
722         }
723         return iam_entry_shift(path, p, -1);
724 }
725
726
727
728 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
729 {
730         return dx_get_block(path, iam_find_position(path, frame));
731 }
732
733 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
734                     const struct iam_ikey *key, iam_ptr_t ptr)
735 {
736         struct iam_entry *entries = frame->entries;
737         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
738         int count = dx_get_count(entries);
739
740         /*
741          * Unfortunately we cannot assert this, as this function is sometimes
742          * called by VFS under i_sem and without pdirops lock.
743          */
744         assert_corr(1 || iam_frame_is_locked(path, frame));
745         assert_corr(count < dx_get_limit(entries));
746         assert_corr(frame->at < iam_entry_shift(path, entries, count));
747         assert_inv(dx_node_check(path, frame));
748
749         memmove(iam_entry_shift(path, new, 1), new,
750                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
751         dx_set_ikey(path, new, key);
752         dx_set_block(path, new, ptr);
753         dx_set_count(entries, count + 1);
754         assert_inv(dx_node_check(path, frame));
755 }
756
757 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
758                          const struct iam_ikey *key, iam_ptr_t ptr)
759 {
760         iam_lock_bh(frame->bh);
761         iam_insert_key(path, frame, key, ptr);
762         iam_unlock_bh(frame->bh);
763 }
764 /*
765  * returns 0 if path was unchanged, -EAGAIN otherwise.
766  */
767 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
768 {
769         int equal;
770
771         iam_lock_bh(frame->bh);
772         equal = iam_check_fast(path, frame) == 0 ||
773                 frame->leaf == iam_find_ptr(path, frame);
774         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
775         iam_unlock_bh(frame->bh);
776
777         return equal ? 0 : -EAGAIN;
778 }
779
780 static int iam_lookup_try(struct iam_path *path)
781 {
782         u32 ptr;
783         int err = 0;
784         int i;
785
786         struct iam_descr *param;
787         struct iam_frame *frame;
788         struct iam_container *c;
789
790         param = iam_path_descr(path);
791         c = path->ip_container;
792
793         ptr = param->id_ops->id_root_ptr(c);
794         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
795              ++frame, ++i) {
796                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
797                                                   &frame->bh);
798                 do_corr(schedule());
799
800                 iam_lock_bh(frame->bh);
801                 /*
802                  * node must be initialized under bh lock because concurrent
803                  * creation procedure may change it and iam_lookup_try() will
804                  * see obsolete tree height. -bzzz
805                  */
806                 if (err != 0)
807                         break;
808
809                 if (LDISKFS_INVARIANT_ON) {
810                         err = param->id_ops->id_node_check(path, frame);
811                         if (err != 0)
812                                 break;
813                 }
814
815                 err = param->id_ops->id_node_load(path, frame);
816                 if (err != 0)
817                         break;
818
819                 assert_inv(dx_node_check(path, frame));
820                 /*
821                  * splitting may change root index block and move hash we're
822                  * looking for into another index block so, we have to check
823                  * this situation and repeat from begining if path got changed
824                  * -bzzz
825                  */
826                 if (i > 0) {
827                         err = iam_check_path(path, frame - 1);
828                         if (err != 0)
829                                 break;
830                 }
831
832                 frame->at = iam_find_position(path, frame);
833                 frame->curidx = ptr;
834                 frame->leaf = ptr = dx_get_block(path, frame->at);
835
836                 iam_unlock_bh(frame->bh);
837                 do_corr(schedule());
838         }
839         if (err != 0)
840                 iam_unlock_bh(frame->bh);
841         path->ip_frame = --frame;
842         return err;
843 }
844
845 static int __iam_path_lookup(struct iam_path *path)
846 {
847         int err;
848         int i;
849
850         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
851                 assert(path->ip_frames[i].bh == NULL);
852
853         do {
854                 err = iam_lookup_try(path);
855                 do_corr(schedule());
856                 if (err != 0)
857                         iam_path_fini(path);
858         } while (err == -EAGAIN);
859
860         return err;
861 }
862
863 /*
864  * returns 0 if path was unchanged, -EAGAIN otherwise.
865  */
866 static int iam_check_full_path(struct iam_path *path, int search)
867 {
868         struct iam_frame *bottom;
869         struct iam_frame *scan;
870         int i;
871         int result;
872
873         do_corr(schedule());
874
875         for (bottom = path->ip_frames, i = 0;
876              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
877                 ; /* find last filled in frame */
878         }
879
880         /*
881          * Lock frames, bottom to top.
882          */
883         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
884                 iam_lock_bh(scan->bh);
885         /*
886          * Check them top to bottom.
887          */
888         result = 0;
889         for (scan = path->ip_frames; scan < bottom; ++scan) {
890                 struct iam_entry *pos;
891
892                 if (search) {
893                         if (iam_check_fast(path, scan) == 0)
894                                 continue;
895
896                         pos = iam_find_position(path, scan);
897                         if (scan->leaf != dx_get_block(path, pos)) {
898                                 result = -EAGAIN;
899                                 break;
900                         }
901                         scan->at = pos;
902                 } else {
903                         pos = iam_entry_shift(path, scan->entries,
904                                               dx_get_count(scan->entries) - 1);
905                         if (scan->at > pos ||
906                             scan->leaf != dx_get_block(path, scan->at)) {
907                                 result = -EAGAIN;
908                                 break;
909                         }
910                 }
911         }
912
913         /*
914          * Unlock top to bottom.
915          */
916         for (scan = path->ip_frames; scan < bottom; ++scan)
917                 iam_unlock_bh(scan->bh);
918         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
919         do_corr(schedule());
920
921         return result;
922 }
923
924
925 /*
926  * Performs path lookup and returns with found leaf (if any) locked by htree
927  * lock.
928  */
929 int iam_lookup_lock(struct iam_path *path,
930                    struct dynlock_handle **dl, enum dynlock_type lt)
931 {
932         int result;
933         struct inode *dir;
934
935         dir = iam_path_obj(path);
936         while ((result = __iam_path_lookup(path)) == 0) {
937                 do_corr(schedule());
938                 *dl = iam_lock_htree(dir, path->ip_frame->leaf, lt);
939                 if (*dl == NULL) {
940                         iam_path_fini(path);
941                         result = -ENOMEM;
942                         break;
943                 }
944                 do_corr(schedule());
945                 /*
946                  * while locking leaf we just found may get split so we need
947                  * to check this -bzzz
948                  */
949                 if (iam_check_full_path(path, 1) == 0)
950                         break;
951                 iam_unlock_htree(dir, *dl);
952                 *dl = NULL;
953                 iam_path_fini(path);
954         }
955         return result;
956 }
957 /*
958  * Performs tree top-to-bottom traversal starting from root, and loads leaf
959  * node.
960  */
961 static int iam_path_lookup(struct iam_path *path, int index)
962 {
963         struct iam_container *c;
964         struct iam_descr *descr;
965         struct iam_leaf  *leaf;
966         int result;
967
968         c = path->ip_container;
969         leaf = &path->ip_leaf;
970         descr = iam_path_descr(path);
971         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
972         assert_inv(iam_path_check(path));
973         do_corr(schedule());
974         if (result == 0) {
975                 result = iam_leaf_load(path);
976                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
977                 if (result == 0) {
978                         do_corr(schedule());
979                         if (index)
980                                 result = iam_leaf_ops(leaf)->
981                                         ilookup(leaf, path->ip_ikey_target);
982                         else
983                                 result = iam_leaf_ops(leaf)->
984                                         lookup(leaf, path->ip_key_target);
985                         do_corr(schedule());
986                 }
987                 if (result < 0)
988                         iam_leaf_unlock(leaf);
989         }
990         return result;
991 }
992
993 /*
994  * Common part of iam_it_{i,}get().
995  */
996 static int __iam_it_get(struct iam_iterator *it, int index)
997 {
998         int result;
999         assert_corr(it_state(it) == IAM_IT_DETACHED);
1000
1001         result = iam_path_lookup(&it->ii_path, index);
1002         if (result >= 0) {
1003                 int collision;
1004
1005                 collision = result & IAM_LOOKUP_LAST;
1006                 switch (result & ~IAM_LOOKUP_LAST) {
1007                 case IAM_LOOKUP_EXACT:
1008                         result = +1;
1009                         it->ii_state = IAM_IT_ATTACHED;
1010                         break;
1011                 case IAM_LOOKUP_OK:
1012                         result = 0;
1013                         it->ii_state = IAM_IT_ATTACHED;
1014                         break;
1015                 case IAM_LOOKUP_BEFORE:
1016                 case IAM_LOOKUP_EMPTY:
1017                         result = 0;
1018                         it->ii_state = IAM_IT_SKEWED;
1019                         break;
1020                 default:
1021                         assert(0);
1022                 }
1023                 result |= collision;
1024         }
1025         /*
1026          * See iam_it_get_exact() for explanation.
1027          */
1028         assert_corr(result != -ENOENT);
1029         return result;
1030 }
1031
1032 /*
1033  * Correct hash, but not the same key was found, iterate through hash
1034  * collision chain, looking for correct record.
1035  */
1036 static int iam_it_collision(struct iam_iterator *it)
1037 {
1038         int result;
1039
1040         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1041
1042         while ((result = iam_it_next(it)) == 0) {
1043                 do_corr(schedule());
1044                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1045                         return -ENOENT;
1046                 if (it_keyeq(it, it->ii_path.ip_key_target))
1047                         return 0;
1048         }
1049         return result;
1050 }
1051
1052 /*
1053  * Attach iterator. After successful completion, @it points to record with
1054  * least key not larger than @k.
1055  *
1056  * Return value: 0: positioned on existing record,
1057  *             +ve: exact position found,
1058  *             -ve: error.
1059  *
1060  * precondition:  it_state(it) == IAM_IT_DETACHED
1061  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1062  *                     it_keycmp(it, k) <= 0)
1063  */
1064 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1065 {
1066         int result;
1067         assert_corr(it_state(it) == IAM_IT_DETACHED);
1068
1069         it->ii_path.ip_ikey_target = NULL;
1070         it->ii_path.ip_key_target  = k;
1071
1072         result = __iam_it_get(it, 0);
1073
1074         if (result == IAM_LOOKUP_LAST) {
1075                 result = iam_it_collision(it);
1076                 if (result != 0) {
1077                         iam_it_put(it);
1078                         iam_it_fini(it);
1079                         result = __iam_it_get(it, 0);
1080                 } else
1081                         result = +1;
1082         }
1083         if (result > 0)
1084                 result &= ~IAM_LOOKUP_LAST;
1085
1086         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1087         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1088                          it_keycmp(it, k) <= 0));
1089         return result;
1090 }
1091 EXPORT_SYMBOL(iam_it_get);
1092
1093 /*
1094  * Attach iterator by index key.
1095  */
1096 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1097 {
1098         assert_corr(it_state(it) == IAM_IT_DETACHED);
1099
1100         it->ii_path.ip_ikey_target = k;
1101         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1102 }
1103
1104 /*
1105  * Attach iterator, and assure it points to the record (not skewed).
1106  *
1107  * Return value: 0: positioned on existing record,
1108  *             +ve: exact position found,
1109  *             -ve: error.
1110  *
1111  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1112  *                !(it->ii_flags&IAM_IT_WRITE)
1113  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1114  */
1115 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1116 {
1117         int result;
1118         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1119                     !(it->ii_flags&IAM_IT_WRITE));
1120         result = iam_it_get(it, k);
1121         if (result == 0) {
1122                 if (it_state(it) != IAM_IT_ATTACHED) {
1123                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1124                         result = iam_it_next(it);
1125                 }
1126         }
1127         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1128         return result;
1129 }
1130 EXPORT_SYMBOL(iam_it_get_at);
1131
1132 /*
1133  * Duplicates iterator.
1134  *
1135  * postcondition: it_state(dst) == it_state(src) &&
1136  *                iam_it_container(dst) == iam_it_container(src) &&
1137  *                dst->ii_flags = src->ii_flags &&
1138  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1139  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1140  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1141  */
1142 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1143 {
1144         dst->ii_flags     = src->ii_flags;
1145         dst->ii_state     = src->ii_state;
1146         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1147         /*
1148          * XXX: duplicate lock.
1149          */
1150         assert_corr(it_state(dst) == it_state(src));
1151         assert_corr(iam_it_container(dst) == iam_it_container(src));
1152         assert_corr(dst->ii_flags = src->ii_flags);
1153         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1154                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1155                     iam_it_key_get(dst) == iam_it_key_get(src)));
1156
1157 }
1158
1159 /*
1160  * Detach iterator. Does nothing it detached state.
1161  *
1162  * postcondition: it_state(it) == IAM_IT_DETACHED
1163  */
1164 void iam_it_put(struct iam_iterator *it)
1165 {
1166         if (it->ii_state != IAM_IT_DETACHED) {
1167                 it->ii_state = IAM_IT_DETACHED;
1168                 iam_leaf_fini(&it->ii_path.ip_leaf);
1169         }
1170 }
1171 EXPORT_SYMBOL(iam_it_put);
1172
1173 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1174                                         struct iam_ikey *ikey);
1175
1176
1177 /*
1178  * This function increments the frame pointer to search the next leaf
1179  * block, and reads in the necessary intervening nodes if the search
1180  * should be necessary.  Whether or not the search is necessary is
1181  * controlled by the hash parameter.  If the hash value is even, then
1182  * the search is only continued if the next block starts with that
1183  * hash value.  This is used if we are searching for a specific file.
1184  *
1185  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1186  *
1187  * This function returns 1 if the caller should continue to search,
1188  * or 0 if it should not.  If there is an error reading one of the
1189  * index blocks, it will a negative error code.
1190  *
1191  * If start_hash is non-null, it will be filled in with the starting
1192  * hash of the next page.
1193  */
1194 static int iam_htree_advance(struct inode *dir, __u32 hash,
1195                               struct iam_path *path, __u32 *start_hash,
1196                               int compat)
1197 {
1198         struct iam_frame *p;
1199         struct buffer_head *bh;
1200         int err, num_frames = 0;
1201         __u32 bhash;
1202
1203         p = path->ip_frame;
1204         /*
1205          * Find the next leaf page by incrementing the frame pointer.
1206          * If we run out of entries in the interior node, loop around and
1207          * increment pointer in the parent node.  When we break out of
1208          * this loop, num_frames indicates the number of interior
1209          * nodes need to be read.
1210          */
1211         while (1) {
1212                 do_corr(schedule());
1213                 iam_lock_bh(p->bh);
1214                 p->at = iam_entry_shift(path, p->at, +1);
1215                 if (p->at < iam_entry_shift(path, p->entries,
1216                                             dx_get_count(p->entries))) {
1217                         p->leaf = dx_get_block(path, p->at);
1218                         iam_unlock_bh(p->bh);
1219                         break;
1220                 }
1221                 iam_unlock_bh(p->bh);
1222                 if (p == path->ip_frames)
1223                         return 0;
1224                 num_frames++;
1225                 --p;
1226         }
1227
1228         if (compat) {
1229                 /*
1230                  * Htree hash magic.
1231                  */
1232         /*
1233          * If the hash is 1, then continue only if the next page has a
1234          * continuation hash of any value.  This is used for readdir
1235          * handling.  Otherwise, check to see if the hash matches the
1236          * desired contiuation hash.  If it doesn't, return since
1237          * there's no point to read in the successive index pages.
1238          */
1239                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1240         if (start_hash)
1241                 *start_hash = bhash;
1242         if ((hash & 1) == 0) {
1243                 if ((bhash & ~1) != hash)
1244                         return 0;
1245         }
1246         }
1247         /*
1248          * If the hash is HASH_NB_ALWAYS, we always go to the next
1249          * block so no check is necessary
1250          */
1251         while (num_frames--) {
1252                 iam_ptr_t idx;
1253
1254                 do_corr(schedule());
1255                 iam_lock_bh(p->bh);
1256                 idx = p->leaf = dx_get_block(path, p->at);
1257                 iam_unlock_bh(p->bh);
1258                 err = iam_path_descr(path)->id_ops->
1259                         id_node_read(path->ip_container, idx, NULL, &bh);
1260                 if (err != 0)
1261                         return err; /* Failure */
1262                 ++p;
1263                 brelse(p->bh);
1264                 assert_corr(p->bh != bh);
1265                 p->bh = bh;
1266                 p->entries = dx_node_get_entries(path, p);
1267                 p->at = iam_entry_shift(path, p->entries, !compat);
1268                 assert_corr(p->curidx != idx);
1269                 p->curidx = idx;
1270                 iam_lock_bh(p->bh);
1271                 assert_corr(p->leaf != dx_get_block(path, p->at));
1272                 p->leaf = dx_get_block(path, p->at);
1273                 iam_unlock_bh(p->bh);
1274                 assert_inv(dx_node_check(path, p));
1275         }
1276         return 1;
1277 }
1278
1279
1280 static inline int iam_index_advance(struct iam_path *path)
1281 {
1282         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1283 }
1284
1285 static void iam_unlock_array(struct inode *dir, struct dynlock_handle **lh)
1286 {
1287         int i;
1288
1289         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1290                 if (*lh != NULL) {
1291                         iam_unlock_htree(dir, *lh);
1292                         *lh = NULL;
1293                 }
1294         }
1295 }
1296 /*
1297  * Advance index part of @path to point to the next leaf. Returns 1 on
1298  * success, 0, when end of container was reached. Leaf node is locked.
1299  */
1300 int iam_index_next(struct iam_container *c, struct iam_path *path)
1301 {
1302         iam_ptr_t cursor;
1303         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1304         int result;
1305         struct inode *object;
1306
1307         /*
1308          * Locking for iam_index_next()... is to be described.
1309          */
1310
1311         object = c->ic_object;
1312         cursor = path->ip_frame->leaf;
1313
1314         while (1) {
1315                 result = iam_index_lock(path, lh);
1316                 do_corr(schedule());
1317                 if (result < 0)
1318                         break;
1319
1320                 result = iam_check_full_path(path, 0);
1321                 if (result == 0 && cursor == path->ip_frame->leaf) {
1322                         result = iam_index_advance(path);
1323
1324                         assert_corr(result == 0 ||
1325                                     cursor != path->ip_frame->leaf);
1326                         break;
1327                 }
1328                 do {
1329                         iam_unlock_array(object, lh);
1330
1331                         iam_path_release(path);
1332                         do_corr(schedule());
1333
1334                         result = __iam_path_lookup(path);
1335                         if (result < 0)
1336                                 break;
1337
1338                         while (path->ip_frame->leaf != cursor) {
1339                                 do_corr(schedule());
1340
1341                                 result = iam_index_lock(path, lh);
1342                                 do_corr(schedule());
1343                                 if (result < 0)
1344                                         break;
1345
1346                                 result = iam_check_full_path(path, 0);
1347                                 if (result != 0)
1348                                         break;
1349
1350                                 result = iam_index_advance(path);
1351                                 if (result == 0) {
1352                                         CERROR("cannot find cursor : %u\n",
1353                                                 cursor);
1354                                         result = -EIO;
1355                                 }
1356                                 if (result < 0)
1357                                         break;
1358                                 result = iam_check_full_path(path, 0);
1359                                 if (result != 0)
1360                                         break;
1361                                 iam_unlock_array(object, lh);
1362                         }
1363                 } while (result == -EAGAIN);
1364                 if (result < 0)
1365                         break;
1366         }
1367         iam_unlock_array(object, lh);
1368         return result;
1369 }
1370
1371 /*
1372  * Move iterator one record right.
1373  *
1374  * Return value: 0: success,
1375  *              +1: end of container reached
1376  *             -ve: error
1377  *
1378  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1379  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1380  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1381  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1382  */
1383 int iam_it_next(struct iam_iterator *it)
1384 {
1385         int result;
1386         struct iam_path      *path;
1387         struct iam_leaf      *leaf;
1388         struct inode         *obj;
1389         do_corr(struct iam_ikey *ik_orig);
1390
1391         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1392         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1393                     it_state(it) == IAM_IT_SKEWED);
1394
1395         path = &it->ii_path;
1396         leaf = &path->ip_leaf;
1397         obj  = iam_path_obj(path);
1398
1399         assert_corr(iam_leaf_is_locked(leaf));
1400
1401         result = 0;
1402         do_corr(ik_orig = it_at_rec(it) ?
1403                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1404         if (it_before(it)) {
1405                 assert_corr(!iam_leaf_at_end(leaf));
1406                 it->ii_state = IAM_IT_ATTACHED;
1407         } else {
1408                 if (!iam_leaf_at_end(leaf))
1409                         /* advance within leaf node */
1410                         iam_leaf_next(leaf);
1411                 /*
1412                  * multiple iterations may be necessary due to empty leaves.
1413                  */
1414                 while (result == 0 && iam_leaf_at_end(leaf)) {
1415                         do_corr(schedule());
1416                         /* advance index portion of the path */
1417                         result = iam_index_next(iam_it_container(it), path);
1418                         assert_corr(iam_leaf_is_locked(leaf));
1419                         if (result == 1) {
1420                                 struct dynlock_handle *lh;
1421                                 lh = iam_lock_htree(obj, path->ip_frame->leaf,
1422                                                    DLT_WRITE);
1423                                 if (lh != NULL) {
1424                                         iam_leaf_fini(leaf);
1425                                         leaf->il_lock = lh;
1426                                         result = iam_leaf_load(path);
1427                                         if (result == 0)
1428                                                 iam_leaf_start(leaf);
1429                                 } else
1430                                         result = -ENOMEM;
1431                         } else if (result == 0)
1432                                 /* end of container reached */
1433                                 result = +1;
1434                         if (result != 0)
1435                                 iam_it_put(it);
1436                 }
1437                 if (result == 0)
1438                         it->ii_state = IAM_IT_ATTACHED;
1439         }
1440         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1441         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1442         assert_corr(ergo(result == 0 && ik_orig != NULL,
1443                          it_ikeycmp(it, ik_orig) >= 0));
1444         return result;
1445 }
1446 EXPORT_SYMBOL(iam_it_next);
1447
1448 /*
1449  * Return pointer to the record under iterator.
1450  *
1451  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1452  * postcondition: it_state(it) == IAM_IT_ATTACHED
1453  */
1454 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1455 {
1456         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1457         assert_corr(it_at_rec(it));
1458         return iam_leaf_rec(&it->ii_path.ip_leaf);
1459 }
1460 EXPORT_SYMBOL(iam_it_rec_get);
1461
1462 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1463 {
1464         struct iam_leaf *folio;
1465
1466         folio = &it->ii_path.ip_leaf;
1467         iam_leaf_ops(folio)->rec_set(folio, r);
1468 }
1469
1470 /*
1471  * Replace contents of record under iterator.
1472  *
1473  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1474  *                it->ii_flags&IAM_IT_WRITE
1475  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1476  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1477  */
1478 int iam_it_rec_set(handle_t *h,
1479                    struct iam_iterator *it, const struct iam_rec *r)
1480 {
1481         int result;
1482         struct iam_path *path;
1483         struct buffer_head *bh;
1484
1485         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1486                     it->ii_flags&IAM_IT_WRITE);
1487         assert_corr(it_at_rec(it));
1488
1489         path = &it->ii_path;
1490         bh   = path->ip_leaf.il_bh;
1491         result = iam_txn_add(h, path, bh);
1492         if (result == 0) {
1493                 iam_it_reccpy(it, r);
1494                 result = iam_txn_dirty(h, path, bh);
1495         }
1496         return result;
1497 }
1498 EXPORT_SYMBOL(iam_it_rec_set);
1499
1500 /*
1501  * Return pointer to the index key under iterator.
1502  *
1503  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1504  *                it_state(it) == IAM_IT_SKEWED
1505  */
1506 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1507                                         struct iam_ikey *ikey)
1508 {
1509         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1510                     it_state(it) == IAM_IT_SKEWED);
1511         assert_corr(it_at_rec(it));
1512         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1513 }
1514
1515 /*
1516  * Return pointer to the key under iterator.
1517  *
1518  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1519  *                it_state(it) == IAM_IT_SKEWED
1520  */
1521 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1522 {
1523         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1524                     it_state(it) == IAM_IT_SKEWED);
1525         assert_corr(it_at_rec(it));
1526         return iam_leaf_key(&it->ii_path.ip_leaf);
1527 }
1528 EXPORT_SYMBOL(iam_it_key_get);
1529
1530 /*
1531  * Return size of key under iterator (in bytes)
1532  *
1533  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1534  *                it_state(it) == IAM_IT_SKEWED
1535  */
1536 int iam_it_key_size(const struct iam_iterator *it)
1537 {
1538         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1539                     it_state(it) == IAM_IT_SKEWED);
1540         assert_corr(it_at_rec(it));
1541         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1542 }
1543 EXPORT_SYMBOL(iam_it_key_size);
1544
1545 /*
1546  * Insertion of new record. Interaction with jbd during non-trivial case (when
1547  * split happens) is as following:
1548  *
1549  *  - new leaf node is involved into transaction by ldiskfs_append();
1550  *
1551  *  - old leaf node is involved into transaction by iam_add_rec();
1552  *
1553  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1554  *
1555  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1556  *  iam_new_leaf();
1557  *
1558  *  - split index nodes are involved into transaction and marked dirty by
1559  *  split_index_node().
1560  *
1561  *  - "safe" index node, which is no split, but where new pointer is inserted
1562  *  is involved into transaction and marked dirty by split_index_node().
1563  *
1564  *  - index node where pointer to new leaf is inserted is involved into
1565  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1566  *
1567  *  - inode is marked dirty by iam_add_rec().
1568  *
1569  */
1570
1571 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1572 {
1573         int err;
1574         iam_ptr_t blknr;
1575         struct buffer_head   *new_leaf;
1576         struct buffer_head   *old_leaf;
1577         struct iam_container *c;
1578         struct inode         *obj;
1579         struct iam_path      *path;
1580
1581         assert_inv(iam_leaf_check(leaf));
1582
1583         c = iam_leaf_container(leaf);
1584         path = leaf->il_path;
1585
1586         obj = c->ic_object;
1587         new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1588         do_corr(schedule());
1589         if (new_leaf != NULL) {
1590                 struct dynlock_handle *lh;
1591
1592                 lh = iam_lock_htree(obj, blknr, DLT_WRITE);
1593                 do_corr(schedule());
1594                 if (lh != NULL) {
1595                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1596                         do_corr(schedule());
1597                         old_leaf = leaf->il_bh;
1598                         iam_leaf_split(leaf, &new_leaf, blknr);
1599                         if (old_leaf != leaf->il_bh) {
1600                                 /*
1601                                  * Switched to the new leaf.
1602                                  */
1603                                 iam_leaf_unlock(leaf);
1604                                 leaf->il_lock = lh;
1605                                 path->ip_frame->leaf = blknr;
1606                         } else
1607                                 iam_unlock_htree(obj, lh);
1608                         do_corr(schedule());
1609                         err = iam_txn_dirty(handle, path, new_leaf);
1610                         brelse(new_leaf);
1611                         if (err == 0)
1612                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1613                         do_corr(schedule());
1614                 } else
1615                         err = -ENOMEM;
1616         }
1617         assert_inv(iam_leaf_check(leaf));
1618         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1619         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1620         return err;
1621 }
1622
1623 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1624 {
1625         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1626 }
1627
1628 static int iam_shift_entries(struct iam_path *path,
1629                          struct iam_frame *frame, unsigned count,
1630                          struct iam_entry *entries, struct iam_entry *entries2,
1631                          u32 newblock)
1632 {
1633         unsigned count1;
1634         unsigned count2;
1635         int delta;
1636
1637         struct iam_frame *parent = frame - 1;
1638         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1639
1640         delta = dx_index_is_compat(path) ? 0 : +1;
1641
1642         count1 = count/2 + delta;
1643         count2 = count - count1;
1644         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1645
1646         dxtrace(printk("Split index %i/%i\n", count1, count2));
1647
1648         memcpy((char *) iam_entry_shift(path, entries2, delta),
1649                (char *) iam_entry_shift(path, entries, count1),
1650                count2 * iam_entry_size(path));
1651
1652         dx_set_count(entries2, count2 + delta);
1653         dx_set_limit(entries2, dx_node_limit(path));
1654
1655         /*
1656          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1657          * level index in root index, then we insert new index here and set
1658          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1659          * index w/o hash it looks for. the solution is to check root index
1660          * after we locked just founded 2nd level index -bzzz
1661          */
1662         iam_insert_key_lock(path, parent, pivot, newblock);
1663
1664         /*
1665          * now old and new 2nd level index blocks contain all pointers, so
1666          * dx_probe() may find it in the both.  it's OK -bzzz
1667          */
1668         iam_lock_bh(frame->bh);
1669         dx_set_count(entries, count1);
1670         iam_unlock_bh(frame->bh);
1671
1672         /*
1673          * now old 2nd level index block points to first half of leafs. it's
1674          * importand that dx_probe() must check root index block for changes
1675          * under dx_lock_bh(frame->bh) -bzzz
1676          */
1677
1678         return count1;
1679 }
1680
1681
1682 int split_index_node(handle_t *handle, struct iam_path *path,
1683                      struct dynlock_handle **lh)
1684 {
1685
1686         struct iam_entry *entries;   /* old block contents */
1687         struct iam_entry *entries2;  /* new block contents */
1688          struct iam_frame *frame, *safe;
1689         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1690         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1691         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1692         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1693         struct inode *dir = iam_path_obj(path);
1694         struct iam_descr *descr;
1695         int nr_splet;
1696         int i, err;
1697
1698         descr = iam_path_descr(path);
1699         /*
1700          * Algorithm below depends on this.
1701          */
1702         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1703
1704         frame = path->ip_frame;
1705         entries = frame->entries;
1706
1707         /*
1708          * Tall-tree handling: we might have to split multiple index blocks
1709          * all the way up to tree root. Tricky point here is error handling:
1710          * to avoid complicated undo/rollback we
1711          *
1712          *   - first allocate all necessary blocks
1713          *
1714          *   - insert pointers into them atomically.
1715          */
1716
1717         /*
1718          * Locking: leaf is already locked. htree-locks are acquired on all
1719          * index nodes that require split bottom-to-top, on the "safe" node,
1720          * and on all new nodes
1721          */
1722
1723         dxtrace(printk("using %u of %u node entries\n",
1724                        dx_get_count(entries), dx_get_limit(entries)));
1725
1726         /* What levels need split? */
1727         for (nr_splet = 0; frame >= path->ip_frames &&
1728              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1729              --frame, ++nr_splet) {
1730                 do_corr(schedule());
1731                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1732                         /*
1733                         CWARN(dir->i_sb, __FUNCTION__,
1734                                      "Directory index full!\n");
1735                                      */
1736                         err = -ENOSPC;
1737                         goto cleanup;
1738                 }
1739         }
1740
1741         safe = frame;
1742
1743         /*
1744          * Lock all nodes, bottom to top.
1745          */
1746         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1747                 do_corr(schedule());
1748                 lock[i] = iam_lock_htree(dir, frame->curidx, DLT_WRITE);
1749                 if (lock[i] == NULL) {
1750                         err = -ENOMEM;
1751                         goto cleanup;
1752                 }
1753         }
1754
1755         /*
1756          * Check for concurrent index modification.
1757          */
1758         err = iam_check_full_path(path, 1);
1759         if (err)
1760                 goto cleanup;
1761         /*
1762          * And check that the same number of nodes is to be split.
1763          */
1764         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1765              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1766              --frame, ++i) {
1767                 ;
1768         }
1769         if (i != nr_splet) {
1770                 err = -EAGAIN;
1771                 goto cleanup;
1772         }
1773
1774         /* Go back down, allocating blocks, locking them, and adding into
1775          * transaction... */
1776         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1777                 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1778                 do_corr(schedule());
1779                 if (!bh_new[i] ||
1780                     descr->id_ops->id_node_init(path->ip_container,
1781                                                 bh_new[i], 0) != 0)
1782                         goto cleanup;
1783                 new_lock[i] = iam_lock_htree(dir, newblock[i], DLT_WRITE);
1784                 if (new_lock[i] == NULL) {
1785                         err = -ENOMEM;
1786                         goto cleanup;
1787                 }
1788                 do_corr(schedule());
1789                 BUFFER_TRACE(frame->bh, "get_write_access");
1790                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1791                 if (err)
1792                         goto journal_error;
1793         }
1794         /* Add "safe" node to transaction too */
1795         if (safe + 1 != path->ip_frames) {
1796                 do_corr(schedule());
1797                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1798                 if (err)
1799                         goto journal_error;
1800         }
1801
1802         /* Go through nodes once more, inserting pointers */
1803         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1804                 unsigned count;
1805                 int idx;
1806                 struct buffer_head *bh2;
1807                 struct buffer_head *bh;
1808
1809                 entries = frame->entries;
1810                 count = dx_get_count(entries);
1811                 idx = iam_entry_diff(path, frame->at, entries);
1812
1813                 bh2 = bh_new[i];
1814                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1815
1816                 bh = frame->bh;
1817                 if (frame == path->ip_frames) {
1818                         /* splitting root node. Tricky point:
1819                          *
1820                          * In the "normal" B-tree we'd split root *and* add
1821                          * new root to the tree with pointers to the old root
1822                          * and its sibling (thus introducing two new nodes).
1823                          *
1824                          * In htree it's enough to add one node, because
1825                          * capacity of the root node is smaller than that of
1826                          * non-root one.
1827                          */
1828                         struct iam_frame *frames;
1829                         struct iam_entry *next;
1830
1831                         assert_corr(i == 0);
1832
1833                         do_corr(schedule());
1834
1835                         frames = path->ip_frames;
1836                         memcpy((char *) entries2, (char *) entries,
1837                                count * iam_entry_size(path));
1838                         dx_set_limit(entries2, dx_node_limit(path));
1839
1840                         /* Set up root */
1841                           iam_lock_bh(frame->bh);
1842                         next = descr->id_ops->id_root_inc(path->ip_container,
1843                                                           path, frame);
1844                         dx_set_block(path, next, newblock[0]);
1845                           iam_unlock_bh(frame->bh);
1846
1847                         do_corr(schedule());
1848                         /* Shift frames in the path */
1849                         memmove(frames + 2, frames + 1,
1850                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1851                         /* Add new access path frame */
1852                         frames[1].at = iam_entry_shift(path, entries2, idx);
1853                         frames[1].entries = entries = entries2;
1854                         frames[1].bh = bh2;
1855                         assert_inv(dx_node_check(path, frame));
1856                         ++ path->ip_frame;
1857                         ++ frame;
1858                         assert_inv(dx_node_check(path, frame));
1859                         bh_new[0] = NULL; /* buffer head is "consumed" */
1860                         err = ldiskfs_journal_get_write_access(handle, bh2);
1861                         if (err)
1862                                 goto journal_error;
1863                         do_corr(schedule());
1864                 } else {
1865                         /* splitting non-root index node. */
1866                         struct iam_frame *parent = frame - 1;
1867
1868                         do_corr(schedule());
1869                         count = iam_shift_entries(path, frame, count,
1870                                               entries, entries2, newblock[i]);
1871                         /* Which index block gets the new entry? */
1872                         if (idx >= count) {
1873                                 int d = dx_index_is_compat(path) ? 0 : +1;
1874
1875                                 frame->at = iam_entry_shift(path, entries2,
1876                                                             idx - count + d);
1877                                 frame->entries = entries = entries2;
1878                                 frame->curidx = newblock[i];
1879                                 swap(frame->bh, bh2);
1880                                 assert_corr(lock[i + 1] != NULL);
1881                                 assert_corr(new_lock[i] != NULL);
1882                                 swap(lock[i + 1], new_lock[i]);
1883                                 bh_new[i] = bh2;
1884                                 parent->at = iam_entry_shift(path,
1885                                                              parent->at, +1);
1886                         }
1887                         assert_inv(dx_node_check(path, frame));
1888                         assert_inv(dx_node_check(path, parent));
1889                         dxtrace(dx_show_index ("node", frame->entries));
1890                         dxtrace(dx_show_index ("node",
1891                                ((struct dx_node *) bh2->b_data)->entries));
1892                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
1893                         if (err)
1894                                 goto journal_error;
1895                         do_corr(schedule());
1896                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1897                         if (err)
1898                                 goto journal_error;
1899                 }
1900                 do_corr(schedule());
1901                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1902                 if (err)
1903                         goto journal_error;
1904         }
1905                 /*
1906                  * This function was called to make insertion of new leaf
1907                  * possible. Check that it fulfilled its obligations.
1908                  */
1909                 assert_corr(dx_get_count(path->ip_frame->entries) <
1910                             dx_get_limit(path->ip_frame->entries));
1911         assert_corr(lock[nr_splet] != NULL);
1912         *lh = lock[nr_splet];
1913         lock[nr_splet] = NULL;
1914         if (nr_splet > 0) {
1915                 /*
1916                  * Log ->i_size modification.
1917                  */
1918                 err = ldiskfs_mark_inode_dirty(handle, dir);
1919                 if (err)
1920                         goto journal_error;
1921         }
1922         goto cleanup;
1923 journal_error:
1924         ldiskfs_std_error(dir->i_sb, err);
1925
1926 cleanup:
1927         iam_unlock_array(dir, lock);
1928         iam_unlock_array(dir, new_lock);
1929
1930         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1931
1932         do_corr(schedule());
1933         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1934                 if (bh_new[i] != NULL)
1935                         brelse(bh_new[i]);
1936         }
1937         return err;
1938 }
1939
1940 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1941                        struct iam_path *path,
1942                        const struct iam_key *k, const struct iam_rec *r)
1943 {
1944         int err;
1945         struct iam_leaf *leaf;
1946
1947         leaf = &path->ip_leaf;
1948         assert_inv(iam_leaf_check(leaf));
1949         assert_inv(iam_path_check(path));
1950         err = iam_txn_add(handle, path, leaf->il_bh);
1951         if (err == 0) {
1952                 do_corr(schedule());
1953                 if (!iam_leaf_can_add(leaf, k, r)) {
1954                         struct dynlock_handle *lh = NULL;
1955
1956                         do {
1957                                 assert_corr(lh == NULL);
1958                                 do_corr(schedule());
1959                                 err = split_index_node(handle, path, &lh);
1960                                 if (err == -EAGAIN) {
1961                                         assert_corr(lh == NULL);
1962
1963                                         iam_path_fini(path);
1964                                         it->ii_state = IAM_IT_DETACHED;
1965
1966                                         do_corr(schedule());
1967                                         err = iam_it_get_exact(it, k);
1968                                         if (err == -ENOENT)
1969                                                 err = +1; /* repeat split */
1970                                         else if (err == 0)
1971                                                 err = -EEXIST;
1972                                 }
1973                         } while (err > 0);
1974                         assert_inv(iam_path_check(path));
1975                         if (err == 0) {
1976                                 assert_corr(lh != NULL);
1977                                 do_corr(schedule());
1978                                 err = iam_new_leaf(handle, leaf);
1979                                 if (err == 0)
1980                                         err = iam_txn_dirty(handle, path,
1981                                                             path->ip_frame->bh);
1982                         }
1983                         iam_unlock_htree(iam_path_obj(path), lh);
1984                         do_corr(schedule());
1985                 }
1986                 if (err == 0) {
1987                         iam_leaf_rec_add(leaf, k, r);
1988                         err = iam_txn_dirty(handle, path, leaf->il_bh);
1989                 }
1990         }
1991         assert_inv(iam_leaf_check(leaf));
1992         assert_inv(iam_leaf_check(&path->ip_leaf));
1993         assert_inv(iam_path_check(path));
1994         return err;
1995 }
1996
1997 /*
1998  * Insert new record with key @k and contents from @r, shifting records to the
1999  * right. On success, iterator is positioned on the newly inserted record.
2000  *
2001  * precondition: it->ii_flags&IAM_IT_WRITE &&
2002  *               (it_state(it) == IAM_IT_ATTACHED ||
2003  *                it_state(it) == IAM_IT_SKEWED) &&
2004  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2005  *                    it_keycmp(it, k) <= 0) &&
2006  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2007  * postcondition: ergo(result == 0,
2008  *                     it_state(it) == IAM_IT_ATTACHED &&
2009  *                     it_keycmp(it, k) == 0 &&
2010  *                     !memcmp(iam_it_rec_get(it), r, ...))
2011  */
2012 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2013                       const struct iam_key *k, const struct iam_rec *r)
2014 {
2015         int result;
2016         struct iam_path *path;
2017
2018         path = &it->ii_path;
2019
2020         assert_corr(it->ii_flags&IAM_IT_WRITE);
2021         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2022                     it_state(it) == IAM_IT_SKEWED);
2023         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2024                          it_keycmp(it, k) <= 0));
2025         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2026         result = iam_add_rec(h, it, path, k, r);
2027         if (result == 0)
2028                 it->ii_state = IAM_IT_ATTACHED;
2029         assert_corr(ergo(result == 0,
2030                          it_state(it) == IAM_IT_ATTACHED &&
2031                          it_keycmp(it, k) == 0));
2032         return result;
2033 }
2034 EXPORT_SYMBOL(iam_it_rec_insert);
2035
2036 /*
2037  * Delete record under iterator.
2038  *
2039  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2040  *                it->ii_flags&IAM_IT_WRITE &&
2041  *                it_at_rec(it)
2042  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2043  *                it_state(it) == IAM_IT_DETACHED
2044  */
2045 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2046 {
2047         int result;
2048         struct iam_leaf *leaf;
2049         struct iam_path *path;
2050
2051         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2052                     it->ii_flags&IAM_IT_WRITE);
2053         assert_corr(it_at_rec(it));
2054
2055         path = &it->ii_path;
2056         leaf = &path->ip_leaf;
2057
2058         assert_inv(iam_leaf_check(leaf));
2059         assert_inv(iam_path_check(path));
2060
2061         result = iam_txn_add(h, path, leaf->il_bh);
2062         /*
2063          * no compaction for now.
2064          */
2065         if (result == 0) {
2066                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2067                 result = iam_txn_dirty(h, path, leaf->il_bh);
2068                 if (result == 0 && iam_leaf_at_end(leaf) &&
2069                     it->ii_flags&IAM_IT_MOVE) {
2070                         result = iam_it_next(it);
2071                         if (result > 0)
2072                                 result = 0;
2073                 }
2074         }
2075         assert_inv(iam_leaf_check(leaf));
2076         assert_inv(iam_path_check(path));
2077         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2078                     it_state(it) == IAM_IT_DETACHED);
2079         return result;
2080 }
2081 EXPORT_SYMBOL(iam_it_rec_delete);
2082
2083 /*
2084  * Convert iterator to cookie.
2085  *
2086  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2087  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2088  * postcondition: it_state(it) == IAM_IT_ATTACHED
2089  */
2090 iam_pos_t iam_it_store(const struct iam_iterator *it)
2091 {
2092         iam_pos_t result;
2093
2094         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2095         assert_corr(it_at_rec(it));
2096         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2097                     sizeof result);
2098
2099         result = 0;
2100         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2101 }
2102 EXPORT_SYMBOL(iam_it_store);
2103
2104 /*
2105  * Restore iterator from cookie.
2106  *
2107  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2108  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2109  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2110  *                                  iam_it_store(it) == pos)
2111  */
2112 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2113 {
2114         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2115                     it->ii_flags&IAM_IT_MOVE);
2116         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2117         return iam_it_iget(it, (struct iam_ikey *)&pos);
2118 }
2119 EXPORT_SYMBOL(iam_it_load);
2120
2121 /***********************************************************************/
2122 /* invariants                                                          */
2123 /***********************************************************************/
2124
2125 static inline int ptr_inside(void *base, size_t size, void *ptr)
2126 {
2127         return (base <= ptr) && (ptr < base + size);
2128 }
2129
2130 int iam_frame_invariant(struct iam_frame *f)
2131 {
2132         return
2133                 (f->bh != NULL &&
2134                 f->bh->b_data != NULL &&
2135                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2136                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2137                 f->entries <= f->at);
2138 }
2139 int iam_leaf_invariant(struct iam_leaf *l)
2140 {
2141         return
2142                 l->il_bh != NULL &&
2143                 l->il_bh->b_data != NULL &&
2144                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2145                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2146                 l->il_entries <= l->il_at;
2147 }
2148
2149 int iam_path_invariant(struct iam_path *p)
2150 {
2151         int i;
2152
2153         if (p->ip_container == NULL ||
2154             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2155             p->ip_frame != p->ip_frames + p->ip_indirect ||
2156             !iam_leaf_invariant(&p->ip_leaf))
2157                 return 0;
2158         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2159                 if (i <= p->ip_indirect) {
2160                         if (!iam_frame_invariant(&p->ip_frames[i]))
2161                                 return 0;
2162                 }
2163         }
2164         return 1;
2165 }
2166
2167 int iam_it_invariant(struct iam_iterator *it)
2168 {
2169         return
2170                 (it->ii_state == IAM_IT_DETACHED ||
2171                  it->ii_state == IAM_IT_ATTACHED ||
2172                  it->ii_state == IAM_IT_SKEWED) &&
2173                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2174                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2175                      it->ii_state == IAM_IT_SKEWED,
2176                      iam_path_invariant(&it->ii_path) &&
2177                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2178 }
2179
2180 /*
2181  * Search container @c for record with key @k. If record is found, its data
2182  * are moved into @r.
2183  *
2184  * Return values: 0: found, -ENOENT: not-found, -ve: error
2185  */
2186 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2187                struct iam_rec *r, struct iam_path_descr *pd)
2188 {
2189         struct iam_iterator it;
2190         int result;
2191
2192         iam_it_init(&it, c, 0, pd);
2193
2194         result = iam_it_get_exact(&it, k);
2195         if (result == 0)
2196                 /*
2197                  * record with required key found, copy it into user buffer
2198                  */
2199                 iam_reccpy(&it.ii_path.ip_leaf, r);
2200         iam_it_put(&it);
2201         iam_it_fini(&it);
2202         return result;
2203 }
2204 EXPORT_SYMBOL(iam_lookup);
2205
2206 /*
2207  * Insert new record @r with key @k into container @c (within context of
2208  * transaction @h).
2209  *
2210  * Return values: 0: success, -ve: error, including -EEXIST when record with
2211  * given key is already present.
2212  *
2213  * postcondition: ergo(result == 0 || result == -EEXIST,
2214  *                                  iam_lookup(c, k, r2) > 0;
2215  */
2216 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2217                const struct iam_rec *r, struct iam_path_descr *pd)
2218 {
2219         struct iam_iterator it;
2220         int result;
2221
2222         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2223
2224         result = iam_it_get_exact(&it, k);
2225         if (result == -ENOENT)
2226                 result = iam_it_rec_insert(h, &it, k, r);
2227         else if (result == 0)
2228                 result = -EEXIST;
2229         iam_it_put(&it);
2230         iam_it_fini(&it);
2231         return result;
2232 }
2233 EXPORT_SYMBOL(iam_insert);
2234
2235 /*
2236  * Update record with the key @k in container @c (within context of
2237  * transaction @h), new record is given by @r.
2238  *
2239  * Return values: 0: success, -ve: error, including -ENOENT if no record with
2240  * the given key found.
2241  */
2242 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2243                const struct iam_rec *r, struct iam_path_descr *pd)
2244 {
2245         struct iam_iterator it;
2246         int result;
2247
2248         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2249
2250         result = iam_it_get_exact(&it, k);
2251         if (result == 0)
2252                 iam_it_rec_set(h, &it, r);
2253         iam_it_put(&it);
2254         iam_it_fini(&it);
2255         return result;
2256 }
2257 EXPORT_SYMBOL(iam_update);
2258
2259 /*
2260  * Delete existing record with key @k.
2261  *
2262  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2263  *
2264  * postcondition: ergo(result == 0 || result == -ENOENT,
2265  *                                 !iam_lookup(c, k, *));
2266  */
2267 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2268                struct iam_path_descr *pd)
2269 {
2270         struct iam_iterator it;
2271         int result;
2272
2273         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2274
2275         result = iam_it_get_exact(&it, k);
2276         if (result == 0)
2277                 iam_it_rec_delete(h, &it);
2278         iam_it_put(&it);
2279         iam_it_fini(&it);
2280         return result;
2281 }
2282 EXPORT_SYMBOL(iam_delete);
2283
2284 int iam_root_limit(int rootgap, int blocksize, int size)
2285 {
2286         int limit;
2287         int nlimit;
2288
2289         limit = (blocksize - rootgap) / size;
2290         nlimit = blocksize / size;
2291         if (limit == nlimit)
2292                 limit--;
2293         return limit;
2294 }