Whamcloud - gitweb
LU-709 build: clean up percpu_counter and sb.s_time_gran
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  */
105
106 #include <linux/module.h>
107 #include <linux/fs.h>
108 #include <linux/pagemap.h>
109 #include <linux/time.h>
110 #include <linux/fcntl.h>
111 #include <linux/stat.h>
112 #include <linux/string.h>
113 #include <linux/quotaops.h>
114 #include <linux/buffer_head.h>
115 #include "osd_internal.h"
116
117 #include "xattr.h"
118 #include "acl.h"
119
120 /*
121  * List of all registered formats.
122  *
123  * No locking. Callers synchronize.
124  */
125 static CFS_LIST_HEAD(iam_formats);
126
127 void iam_format_register(struct iam_format *fmt)
128 {
129         cfs_list_add(&fmt->if_linkage, &iam_formats);
130 }
131 EXPORT_SYMBOL(iam_format_register);
132
133 /*
134  * Determine format of given container. This is done by scanning list of
135  * registered formats and calling ->if_guess() method of each in turn.
136  */
137 static int iam_format_guess(struct iam_container *c)
138 {
139         int result;
140         struct iam_format *fmt;
141
142         /*
143          * XXX temporary initialization hook.
144          */
145         {
146                 static int initialized = 0;
147
148                 if (!initialized) {
149                         iam_lvar_format_init();
150                         iam_lfix_format_init();
151                         initialized = 1;
152                 }
153         }
154
155         result = -ENOENT;
156         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
157                 result = fmt->if_guess(c);
158                 if (result == 0)
159                         break;
160         }
161         return result;
162 }
163
164 /*
165  * Initialize container @c.
166  */
167 int iam_container_init(struct iam_container *c,
168                        struct iam_descr *descr, struct inode *inode)
169 {
170         memset(c, 0, sizeof *c);
171         c->ic_descr  = descr;
172         c->ic_object = inode;
173         cfs_init_rwsem(&c->ic_sem);
174         return 0;
175 }
176 EXPORT_SYMBOL(iam_container_init);
177
178 /*
179  * Determine container format.
180  */
181 int iam_container_setup(struct iam_container *c)
182 {
183         return iam_format_guess(c);
184 }
185 EXPORT_SYMBOL(iam_container_setup);
186
187 /*
188  * Finalize container @c, release all resources.
189  */
190 void iam_container_fini(struct iam_container *c)
191 {
192         brelse(c->ic_root_bh);
193         c->ic_root_bh = NULL;
194 }
195 EXPORT_SYMBOL(iam_container_fini);
196
197 void iam_path_init(struct iam_path *path, struct iam_container *c,
198                    struct iam_path_descr *pd)
199 {
200         memset(path, 0, sizeof *path);
201         path->ip_container = c;
202         path->ip_frame = path->ip_frames;
203         path->ip_data = pd;
204         path->ip_leaf.il_path = path;
205 }
206
207 static void iam_leaf_fini(struct iam_leaf *leaf);
208
209 void iam_path_release(struct iam_path *path)
210 {
211         int i;
212
213         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
214                 if (path->ip_frames[i].bh != NULL) {
215                         brelse(path->ip_frames[i].bh);
216                         path->ip_frames[i].bh = NULL;
217                 }
218         }
219 }
220
221 void iam_path_fini(struct iam_path *path)
222 {
223         iam_leaf_fini(&path->ip_leaf);
224         iam_path_release(path);
225 }
226
227
228 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
229 {
230         int i;
231
232         path->ipc_hinfo = &path->ipc_hinfo_area;
233         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
234                 path->ipc_descr.ipd_key_scratch[i] =
235                         (struct iam_ikey *)&path->ipc_scratch[i];
236
237         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
238 }
239
240 void iam_path_compat_fini(struct iam_path_compat *path)
241 {
242         iam_path_fini(&path->ipc_path);
243 }
244
245 /*
246  * Helper function initializing iam_path_descr and its key scratch area.
247  */
248 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
249 {
250         struct iam_path_descr *ipd;
251         void *karea;
252         int i;
253
254         ipd = area;
255         karea = ipd + 1;
256         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
257                 ipd->ipd_key_scratch[i] = karea;
258         return ipd;
259 }
260 EXPORT_SYMBOL(iam_ipd_alloc);
261
262 void iam_ipd_free(struct iam_path_descr *ipd)
263 {
264 }
265 EXPORT_SYMBOL(iam_ipd_free);
266
267 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
268                   handle_t *h, struct buffer_head **bh)
269 {
270         int result = 0;
271
272         /* NB: it can be called by iam_lfix_guess() which is still at
273          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
274          * haven't been intialized yet.
275          * Also, we don't have this for IAM dir.
276          */
277         if (c->ic_root_bh != NULL &&
278             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
279                 get_bh(c->ic_root_bh);
280                 *bh = c->ic_root_bh;
281                 return 0;
282         }
283
284         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
285         if (*bh == NULL)
286                 result = -EIO;
287         return result;
288 }
289
290 /*
291  * Return pointer to current leaf record. Pointer is valid while corresponding
292  * leaf node is locked and pinned.
293  */
294 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
295 {
296         return iam_leaf_ops(leaf)->rec(leaf);
297 }
298
299 /*
300  * Return pointer to the current leaf key. This function returns pointer to
301  * the key stored in node.
302  *
303  * Caller should assume that returned pointer is only valid while leaf node is
304  * pinned and locked.
305  */
306 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
307 {
308         return iam_leaf_ops(leaf)->key(leaf);
309 }
310
311 static int iam_leaf_key_size(const struct iam_leaf *leaf)
312 {
313         return iam_leaf_ops(leaf)->key_size(leaf);
314 }
315
316 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
317                                       struct iam_ikey *key)
318 {
319         return iam_leaf_ops(leaf)->ikey(leaf, key);
320 }
321
322 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
323                            const struct iam_key *key)
324 {
325         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
326 }
327
328 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
329                           const struct iam_key *key)
330 {
331         return iam_leaf_ops(leaf)->key_eq(leaf, key);
332 }
333
334 #if LDISKFS_INVARIANT_ON
335 static int iam_leaf_check(struct iam_leaf *leaf);
336 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
337
338 static int iam_path_check(struct iam_path *p)
339 {
340         int i;
341         int result;
342         struct iam_frame *f;
343         struct iam_descr *param;
344
345         result = 1;
346         param = iam_path_descr(p);
347         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
348                 f = &p->ip_frames[i];
349                 if (f->bh != NULL) {
350                         result = dx_node_check(p, f);
351                         if (result)
352                                 result = !param->id_ops->id_node_check(p, f);
353                 }
354         }
355         if (result && p->ip_leaf.il_bh != NULL)
356                 result = iam_leaf_check(&p->ip_leaf);
357         if (result == 0) {
358                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
359         }
360         return result;
361 }
362 #endif
363
364 static int iam_leaf_load(struct iam_path *path)
365 {
366         iam_ptr_t block;
367         int err;
368         struct iam_container *c;
369         struct buffer_head   *bh;
370         struct iam_leaf      *leaf;
371         struct iam_descr     *descr;
372
373         c     = path->ip_container;
374         leaf  = &path->ip_leaf;
375         descr = iam_path_descr(path);
376         block = path->ip_frame->leaf;
377         if (block == 0) {
378                 /* XXX bug 11027 */
379                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
380                        (long unsigned)path->ip_frame->leaf,
381                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
382                        path->ip_frames[0].bh, path->ip_frames[1].bh,
383                        path->ip_frames[2].bh);
384         }
385         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
386         if (err == 0) {
387                 leaf->il_bh = bh;
388                 leaf->il_curidx = block;
389                 err = iam_leaf_ops(leaf)->init(leaf);
390                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
391         }
392         return err;
393 }
394
395 static void iam_unlock_htree(struct inode *dir, struct dynlock_handle *lh)
396 {
397         if (lh != NULL)
398                 dynlock_unlock(&LDISKFS_I(dir)->i_htree_lock, lh);
399 }
400
401
402 static void iam_leaf_unlock(struct iam_leaf *leaf)
403 {
404         if (leaf->il_lock != NULL) {
405                 iam_unlock_htree(iam_leaf_container(leaf)->ic_object,
406                                 leaf->il_lock);
407                 do_corr(schedule());
408                 leaf->il_lock = NULL;
409         }
410 }
411
412 static void iam_leaf_fini(struct iam_leaf *leaf)
413 {
414         if (leaf->il_path != NULL) {
415                 iam_leaf_unlock(leaf);
416                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
417                 iam_leaf_ops(leaf)->fini(leaf);
418                 if (leaf->il_bh) {
419                         brelse(leaf->il_bh);
420                         leaf->il_bh = NULL;
421                         leaf->il_curidx = 0;
422                 }
423         }
424 }
425
426 static void iam_leaf_start(struct iam_leaf *folio)
427 {
428         iam_leaf_ops(folio)->start(folio);
429 }
430
431 void iam_leaf_next(struct iam_leaf *folio)
432 {
433         iam_leaf_ops(folio)->next(folio);
434 }
435
436 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
437                              const struct iam_rec *rec)
438 {
439         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
440 }
441
442 static void iam_rec_del(struct iam_leaf *leaf, int shift)
443 {
444         iam_leaf_ops(leaf)->rec_del(leaf, shift);
445 }
446
447 int iam_leaf_at_end(const struct iam_leaf *leaf)
448 {
449         return iam_leaf_ops(leaf)->at_end(leaf);
450 }
451
452 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
453 {
454         iam_leaf_ops(l)->split(l, bh, nr);
455 }
456
457 int iam_leaf_can_add(const struct iam_leaf *l,
458                      const struct iam_key *k, const struct iam_rec *r)
459 {
460         return iam_leaf_ops(l)->can_add(l, k, r);
461 }
462
463 #if LDISKFS_INVARIANT_ON
464 static int iam_leaf_check(struct iam_leaf *leaf)
465 {
466         return 1;
467 #if 0
468         struct iam_lentry    *orig;
469         struct iam_path      *path;
470         struct iam_container *bag;
471         struct iam_ikey       *k0;
472         struct iam_ikey       *k1;
473         int result;
474         int first;
475
476         orig = leaf->il_at;
477         path = iam_leaf_path(leaf);
478         bag  = iam_leaf_container(leaf);
479
480         result = iam_leaf_ops(leaf)->init(leaf);
481         if (result != 0)
482                 return result;
483
484         first = 1;
485         iam_leaf_start(leaf);
486         k0 = iam_path_ikey(path, 0);
487         k1 = iam_path_ikey(path, 1);
488         while (!iam_leaf_at_end(leaf)) {
489                 iam_ikeycpy(bag, k0, k1);
490                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
491                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
492                         return 0;
493                 }
494                 first = 0;
495                 iam_leaf_next(leaf);
496         }
497         leaf->il_at = orig;
498         return 1;
499 #endif
500 }
501 #endif
502
503 static int iam_txn_dirty(handle_t *handle,
504                          struct iam_path *path, struct buffer_head *bh)
505 {
506         int result;
507
508         result = ldiskfs_journal_dirty_metadata(handle, bh);
509         if (result != 0)
510                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
511         return result;
512 }
513
514 static int iam_txn_add(handle_t *handle,
515                        struct iam_path *path, struct buffer_head *bh)
516 {
517         int result;
518
519         result = ldiskfs_journal_get_write_access(handle, bh);
520         if (result != 0)
521                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
522         return result;
523 }
524
525 /***********************************************************************/
526 /* iterator interface                                                  */
527 /***********************************************************************/
528
529 static enum iam_it_state it_state(const struct iam_iterator *it)
530 {
531         return it->ii_state;
532 }
533
534 /*
535  * Helper function returning scratch key.
536  */
537 static struct iam_container *iam_it_container(const struct iam_iterator *it)
538 {
539         return it->ii_path.ip_container;
540 }
541
542 static inline int it_keycmp(const struct iam_iterator *it,
543                             const struct iam_key *k)
544 {
545         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
546 }
547
548 static inline int it_keyeq(const struct iam_iterator *it,
549                            const struct iam_key *k)
550 {
551         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
552 }
553
554 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
555 {
556         return iam_ikeycmp(it->ii_path.ip_container,
557                            iam_leaf_ikey(&it->ii_path.ip_leaf,
558                                          iam_path_ikey(&it->ii_path, 0)), ik);
559 }
560
561 static inline int it_at_rec(const struct iam_iterator *it)
562 {
563         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
564 }
565
566 static inline int it_before(const struct iam_iterator *it)
567 {
568         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
569 }
570
571 /*
572  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
573  * with exactly the same key as asked is found.
574  */
575 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
576 {
577         int result;
578
579         result = iam_it_get(it, k);
580         if (result > 0)
581                 result = 0;
582         else if (result == 0)
583                 /*
584                  * Return -ENOENT if cursor is located above record with a key
585                  * different from one specified, or in the empty leaf.
586                  *
587                  * XXX returning -ENOENT only works if iam_it_get() never
588                  * returns -ENOENT as a legitimate error.
589                  */
590                 result = -ENOENT;
591         return result;
592 }
593
594 void iam_container_write_lock(struct iam_container *ic)
595 {
596         cfs_down_write(&ic->ic_sem);
597 }
598
599 void iam_container_write_unlock(struct iam_container *ic)
600 {
601         cfs_up_write(&ic->ic_sem);
602 }
603
604 void iam_container_read_lock(struct iam_container *ic)
605 {
606         cfs_down_read(&ic->ic_sem);
607 }
608
609 void iam_container_read_unlock(struct iam_container *ic)
610 {
611         cfs_up_read(&ic->ic_sem);
612 }
613
614 /*
615  * Initialize iterator to IAM_IT_DETACHED state.
616  *
617  * postcondition: it_state(it) == IAM_IT_DETACHED
618  */
619 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
620                  struct iam_path_descr *pd)
621 {
622         memset(it, 0, sizeof *it);
623         it->ii_flags  = flags;
624         it->ii_state  = IAM_IT_DETACHED;
625         iam_path_init(&it->ii_path, c, pd);
626         return 0;
627 }
628 EXPORT_SYMBOL(iam_it_init);
629
630 /*
631  * Finalize iterator and release all resources.
632  *
633  * precondition: it_state(it) == IAM_IT_DETACHED
634  */
635 void iam_it_fini(struct iam_iterator *it)
636 {
637         assert_corr(it_state(it) == IAM_IT_DETACHED);
638         iam_path_fini(&it->ii_path);
639 }
640 EXPORT_SYMBOL(iam_it_fini);
641
642 /*
643  * this locking primitives are used to protect parts
644  * of dir's htree. protection unit is block: leaf or index
645  */
646 struct dynlock_handle *iam_lock_htree(struct inode *dir, unsigned long value,
647                                      enum dynlock_type lt)
648 {
649         return dynlock_lock(&LDISKFS_I(dir)->i_htree_lock, value, lt, GFP_NOFS);
650 }
651
652
653
654 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
655 {
656         struct iam_frame *f;
657
658         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
659                 do_corr(schedule());
660                 *lh = iam_lock_htree(iam_path_obj(path), f->curidx, DLT_READ);
661                 if (*lh == NULL)
662                         return -ENOMEM;
663         }
664         return 0;
665 }
666
667 /*
668  * Fast check for frame consistency.
669  */
670 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
671 {
672         struct iam_container *bag;
673         struct iam_entry *next;
674         struct iam_entry *last;
675         struct iam_entry *entries;
676         struct iam_entry *at;
677
678         bag     = path->ip_container;
679         at      = frame->at;
680         entries = frame->entries;
681         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
682
683         if (unlikely(at > last))
684                 return -EAGAIN;
685
686         if (unlikely(dx_get_block(path, at) != frame->leaf))
687                 return -EAGAIN;
688
689         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
690                                  path->ip_ikey_target) > 0))
691                 return -EAGAIN;
692
693         next = iam_entry_shift(path, at, +1);
694         if (next <= last) {
695                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
696                                          path->ip_ikey_target) <= 0))
697                         return -EAGAIN;
698         }
699         return 0;
700 }
701
702 int dx_index_is_compat(struct iam_path *path)
703 {
704         return iam_path_descr(path) == NULL;
705 }
706
707 /*
708  * dx_find_position
709  *
710  * search position of specified hash in index
711  *
712  */
713
714 struct iam_entry *iam_find_position(struct iam_path *path,
715                                    struct iam_frame *frame)
716 {
717         int count;
718         struct iam_entry *p;
719         struct iam_entry *q;
720         struct iam_entry *m;
721
722         count = dx_get_count(frame->entries);
723         assert_corr(count && count <= dx_get_limit(frame->entries));
724         p = iam_entry_shift(path, frame->entries,
725                             dx_index_is_compat(path) ? 1 : 2);
726         q = iam_entry_shift(path, frame->entries, count - 1);
727         while (p <= q) {
728                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
729                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
730                                 path->ip_ikey_target) > 0)
731                         q = iam_entry_shift(path, m, -1);
732                 else
733                         p = iam_entry_shift(path, m, +1);
734         }
735         return iam_entry_shift(path, p, -1);
736 }
737
738
739
740 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
741 {
742         return dx_get_block(path, iam_find_position(path, frame));
743 }
744
745 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
746                     const struct iam_ikey *key, iam_ptr_t ptr)
747 {
748         struct iam_entry *entries = frame->entries;
749         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
750         int count = dx_get_count(entries);
751
752         /*
753          * Unfortunately we cannot assert this, as this function is sometimes
754          * called by VFS under i_sem and without pdirops lock.
755          */
756         assert_corr(1 || iam_frame_is_locked(path, frame));
757         assert_corr(count < dx_get_limit(entries));
758         assert_corr(frame->at < iam_entry_shift(path, entries, count));
759         assert_inv(dx_node_check(path, frame));
760
761         memmove(iam_entry_shift(path, new, 1), new,
762                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
763         dx_set_ikey(path, new, key);
764         dx_set_block(path, new, ptr);
765         dx_set_count(entries, count + 1);
766         assert_inv(dx_node_check(path, frame));
767 }
768
769 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
770                          const struct iam_ikey *key, iam_ptr_t ptr)
771 {
772         iam_lock_bh(frame->bh);
773         iam_insert_key(path, frame, key, ptr);
774         iam_unlock_bh(frame->bh);
775 }
776 /*
777  * returns 0 if path was unchanged, -EAGAIN otherwise.
778  */
779 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
780 {
781         int equal;
782
783         iam_lock_bh(frame->bh);
784         equal = iam_check_fast(path, frame) == 0 ||
785                 frame->leaf == iam_find_ptr(path, frame);
786         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
787         iam_unlock_bh(frame->bh);
788
789         return equal ? 0 : -EAGAIN;
790 }
791
792 static int iam_lookup_try(struct iam_path *path)
793 {
794         u32 ptr;
795         int err = 0;
796         int i;
797
798         struct iam_descr *param;
799         struct iam_frame *frame;
800         struct iam_container *c;
801
802         param = iam_path_descr(path);
803         c = path->ip_container;
804
805         ptr = param->id_ops->id_root_ptr(c);
806         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
807              ++frame, ++i) {
808                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
809                                                   &frame->bh);
810                 do_corr(schedule());
811
812                 iam_lock_bh(frame->bh);
813                 /*
814                  * node must be initialized under bh lock because concurrent
815                  * creation procedure may change it and iam_lookup_try() will
816                  * see obsolete tree height. -bzzz
817                  */
818                 if (err != 0)
819                         break;
820
821                 if (LDISKFS_INVARIANT_ON) {
822                         err = param->id_ops->id_node_check(path, frame);
823                         if (err != 0)
824                                 break;
825                 }
826
827                 err = param->id_ops->id_node_load(path, frame);
828                 if (err != 0)
829                         break;
830
831                 assert_inv(dx_node_check(path, frame));
832                 /*
833                  * splitting may change root index block and move hash we're
834                  * looking for into another index block so, we have to check
835                  * this situation and repeat from begining if path got changed
836                  * -bzzz
837                  */
838                 if (i > 0) {
839                         err = iam_check_path(path, frame - 1);
840                         if (err != 0)
841                                 break;
842                 }
843
844                 frame->at = iam_find_position(path, frame);
845                 frame->curidx = ptr;
846                 frame->leaf = ptr = dx_get_block(path, frame->at);
847
848                 iam_unlock_bh(frame->bh);
849                 do_corr(schedule());
850         }
851         if (err != 0)
852                 iam_unlock_bh(frame->bh);
853         path->ip_frame = --frame;
854         return err;
855 }
856
857 static int __iam_path_lookup(struct iam_path *path)
858 {
859         int err;
860         int i;
861
862         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
863                 assert(path->ip_frames[i].bh == NULL);
864
865         do {
866                 err = iam_lookup_try(path);
867                 do_corr(schedule());
868                 if (err != 0)
869                         iam_path_fini(path);
870         } while (err == -EAGAIN);
871
872         return err;
873 }
874
875 /*
876  * returns 0 if path was unchanged, -EAGAIN otherwise.
877  */
878 static int iam_check_full_path(struct iam_path *path, int search)
879 {
880         struct iam_frame *bottom;
881         struct iam_frame *scan;
882         int i;
883         int result;
884
885         do_corr(schedule());
886
887         for (bottom = path->ip_frames, i = 0;
888              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
889                 ; /* find last filled in frame */
890         }
891
892         /*
893          * Lock frames, bottom to top.
894          */
895         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
896                 iam_lock_bh(scan->bh);
897         /*
898          * Check them top to bottom.
899          */
900         result = 0;
901         for (scan = path->ip_frames; scan < bottom; ++scan) {
902                 struct iam_entry *pos;
903
904                 if (search) {
905                         if (iam_check_fast(path, scan) == 0)
906                                 continue;
907
908                         pos = iam_find_position(path, scan);
909                         if (scan->leaf != dx_get_block(path, pos)) {
910                                 result = -EAGAIN;
911                                 break;
912                         }
913                         scan->at = pos;
914                 } else {
915                         pos = iam_entry_shift(path, scan->entries,
916                                               dx_get_count(scan->entries) - 1);
917                         if (scan->at > pos ||
918                             scan->leaf != dx_get_block(path, scan->at)) {
919                                 result = -EAGAIN;
920                                 break;
921                         }
922                 }
923         }
924
925         /*
926          * Unlock top to bottom.
927          */
928         for (scan = path->ip_frames; scan < bottom; ++scan)
929                 iam_unlock_bh(scan->bh);
930         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
931         do_corr(schedule());
932
933         return result;
934 }
935
936
937 /*
938  * Performs path lookup and returns with found leaf (if any) locked by htree
939  * lock.
940  */
941 int iam_lookup_lock(struct iam_path *path,
942                    struct dynlock_handle **dl, enum dynlock_type lt)
943 {
944         int result;
945         struct inode *dir;
946
947         dir = iam_path_obj(path);
948         while ((result = __iam_path_lookup(path)) == 0) {
949                 do_corr(schedule());
950                 *dl = iam_lock_htree(dir, path->ip_frame->leaf, lt);
951                 if (*dl == NULL) {
952                         iam_path_fini(path);
953                         result = -ENOMEM;
954                         break;
955                 }
956                 do_corr(schedule());
957                 /*
958                  * while locking leaf we just found may get split so we need
959                  * to check this -bzzz
960                  */
961                 if (iam_check_full_path(path, 1) == 0)
962                         break;
963                 iam_unlock_htree(dir, *dl);
964                 *dl = NULL;
965                 iam_path_fini(path);
966         }
967         return result;
968 }
969 /*
970  * Performs tree top-to-bottom traversal starting from root, and loads leaf
971  * node.
972  */
973 static int iam_path_lookup(struct iam_path *path, int index)
974 {
975         struct iam_container *c;
976         struct iam_descr *descr;
977         struct iam_leaf  *leaf;
978         int result;
979
980         c = path->ip_container;
981         leaf = &path->ip_leaf;
982         descr = iam_path_descr(path);
983         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
984         assert_inv(iam_path_check(path));
985         do_corr(schedule());
986         if (result == 0) {
987                 result = iam_leaf_load(path);
988                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
989                 if (result == 0) {
990                         do_corr(schedule());
991                         if (index)
992                                 result = iam_leaf_ops(leaf)->
993                                         ilookup(leaf, path->ip_ikey_target);
994                         else
995                                 result = iam_leaf_ops(leaf)->
996                                         lookup(leaf, path->ip_key_target);
997                         do_corr(schedule());
998                 }
999                 if (result < 0)
1000                         iam_leaf_unlock(leaf);
1001         }
1002         return result;
1003 }
1004
1005 /*
1006  * Common part of iam_it_{i,}get().
1007  */
1008 static int __iam_it_get(struct iam_iterator *it, int index)
1009 {
1010         int result;
1011         assert_corr(it_state(it) == IAM_IT_DETACHED);
1012
1013         result = iam_path_lookup(&it->ii_path, index);
1014         if (result >= 0) {
1015                 int collision;
1016
1017                 collision = result & IAM_LOOKUP_LAST;
1018                 switch (result & ~IAM_LOOKUP_LAST) {
1019                 case IAM_LOOKUP_EXACT:
1020                         result = +1;
1021                         it->ii_state = IAM_IT_ATTACHED;
1022                         break;
1023                 case IAM_LOOKUP_OK:
1024                         result = 0;
1025                         it->ii_state = IAM_IT_ATTACHED;
1026                         break;
1027                 case IAM_LOOKUP_BEFORE:
1028                 case IAM_LOOKUP_EMPTY:
1029                         result = 0;
1030                         it->ii_state = IAM_IT_SKEWED;
1031                         break;
1032                 default:
1033                         assert(0);
1034                 }
1035                 result |= collision;
1036         }
1037         /*
1038          * See iam_it_get_exact() for explanation.
1039          */
1040         assert_corr(result != -ENOENT);
1041         return result;
1042 }
1043
1044 /*
1045  * Correct hash, but not the same key was found, iterate through hash
1046  * collision chain, looking for correct record.
1047  */
1048 static int iam_it_collision(struct iam_iterator *it)
1049 {
1050         int result;
1051
1052         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1053
1054         while ((result = iam_it_next(it)) == 0) {
1055                 do_corr(schedule());
1056                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1057                         return -ENOENT;
1058                 if (it_keyeq(it, it->ii_path.ip_key_target))
1059                         return 0;
1060         }
1061         return result;
1062 }
1063
1064 /*
1065  * Attach iterator. After successful completion, @it points to record with
1066  * least key not larger than @k.
1067  *
1068  * Return value: 0: positioned on existing record,
1069  *             +ve: exact position found,
1070  *             -ve: error.
1071  *
1072  * precondition:  it_state(it) == IAM_IT_DETACHED
1073  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1074  *                     it_keycmp(it, k) <= 0)
1075  */
1076 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1077 {
1078         int result;
1079         assert_corr(it_state(it) == IAM_IT_DETACHED);
1080
1081         it->ii_path.ip_ikey_target = NULL;
1082         it->ii_path.ip_key_target  = k;
1083
1084         result = __iam_it_get(it, 0);
1085
1086         if (result == IAM_LOOKUP_LAST) {
1087                 result = iam_it_collision(it);
1088                 if (result != 0) {
1089                         iam_it_put(it);
1090                         iam_it_fini(it);
1091                         result = __iam_it_get(it, 0);
1092                 } else
1093                         result = +1;
1094         }
1095         if (result > 0)
1096                 result &= ~IAM_LOOKUP_LAST;
1097
1098         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1099         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1100                          it_keycmp(it, k) <= 0));
1101         return result;
1102 }
1103 EXPORT_SYMBOL(iam_it_get);
1104
1105 /*
1106  * Attach iterator by index key.
1107  */
1108 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1109 {
1110         assert_corr(it_state(it) == IAM_IT_DETACHED);
1111
1112         it->ii_path.ip_ikey_target = k;
1113         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1114 }
1115
1116 /*
1117  * Attach iterator, and assure it points to the record (not skewed).
1118  *
1119  * Return value: 0: positioned on existing record,
1120  *             +ve: exact position found,
1121  *             -ve: error.
1122  *
1123  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1124  *                !(it->ii_flags&IAM_IT_WRITE)
1125  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1126  */
1127 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1128 {
1129         int result;
1130         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1131                     !(it->ii_flags&IAM_IT_WRITE));
1132         result = iam_it_get(it, k);
1133         if (result == 0) {
1134                 if (it_state(it) != IAM_IT_ATTACHED) {
1135                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1136                         result = iam_it_next(it);
1137                 }
1138         }
1139         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1140         return result;
1141 }
1142 EXPORT_SYMBOL(iam_it_get_at);
1143
1144 /*
1145  * Duplicates iterator.
1146  *
1147  * postcondition: it_state(dst) == it_state(src) &&
1148  *                iam_it_container(dst) == iam_it_container(src) &&
1149  *                dst->ii_flags = src->ii_flags &&
1150  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1151  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1152  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1153  */
1154 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1155 {
1156         dst->ii_flags     = src->ii_flags;
1157         dst->ii_state     = src->ii_state;
1158         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1159         /*
1160          * XXX: duplicate lock.
1161          */
1162         assert_corr(it_state(dst) == it_state(src));
1163         assert_corr(iam_it_container(dst) == iam_it_container(src));
1164         assert_corr(dst->ii_flags = src->ii_flags);
1165         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1166                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1167                     iam_it_key_get(dst) == iam_it_key_get(src)));
1168
1169 }
1170
1171 /*
1172  * Detach iterator. Does nothing it detached state.
1173  *
1174  * postcondition: it_state(it) == IAM_IT_DETACHED
1175  */
1176 void iam_it_put(struct iam_iterator *it)
1177 {
1178         if (it->ii_state != IAM_IT_DETACHED) {
1179                 it->ii_state = IAM_IT_DETACHED;
1180                 iam_leaf_fini(&it->ii_path.ip_leaf);
1181         }
1182 }
1183 EXPORT_SYMBOL(iam_it_put);
1184
1185 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1186                                         struct iam_ikey *ikey);
1187
1188
1189 /*
1190  * This function increments the frame pointer to search the next leaf
1191  * block, and reads in the necessary intervening nodes if the search
1192  * should be necessary.  Whether or not the search is necessary is
1193  * controlled by the hash parameter.  If the hash value is even, then
1194  * the search is only continued if the next block starts with that
1195  * hash value.  This is used if we are searching for a specific file.
1196  *
1197  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1198  *
1199  * This function returns 1 if the caller should continue to search,
1200  * or 0 if it should not.  If there is an error reading one of the
1201  * index blocks, it will a negative error code.
1202  *
1203  * If start_hash is non-null, it will be filled in with the starting
1204  * hash of the next page.
1205  */
1206 static int iam_htree_advance(struct inode *dir, __u32 hash,
1207                               struct iam_path *path, __u32 *start_hash,
1208                               int compat)
1209 {
1210         struct iam_frame *p;
1211         struct buffer_head *bh;
1212         int err, num_frames = 0;
1213         __u32 bhash;
1214
1215         p = path->ip_frame;
1216         /*
1217          * Find the next leaf page by incrementing the frame pointer.
1218          * If we run out of entries in the interior node, loop around and
1219          * increment pointer in the parent node.  When we break out of
1220          * this loop, num_frames indicates the number of interior
1221          * nodes need to be read.
1222          */
1223         while (1) {
1224                 do_corr(schedule());
1225                 iam_lock_bh(p->bh);
1226                 p->at = iam_entry_shift(path, p->at, +1);
1227                 if (p->at < iam_entry_shift(path, p->entries,
1228                                             dx_get_count(p->entries))) {
1229                         p->leaf = dx_get_block(path, p->at);
1230                         iam_unlock_bh(p->bh);
1231                         break;
1232                 }
1233                 iam_unlock_bh(p->bh);
1234                 if (p == path->ip_frames)
1235                         return 0;
1236                 num_frames++;
1237                 --p;
1238         }
1239
1240         if (compat) {
1241                 /*
1242                  * Htree hash magic.
1243                  */
1244         /*
1245          * If the hash is 1, then continue only if the next page has a
1246          * continuation hash of any value.  This is used for readdir
1247          * handling.  Otherwise, check to see if the hash matches the
1248          * desired contiuation hash.  If it doesn't, return since
1249          * there's no point to read in the successive index pages.
1250          */
1251                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1252         if (start_hash)
1253                 *start_hash = bhash;
1254         if ((hash & 1) == 0) {
1255                 if ((bhash & ~1) != hash)
1256                         return 0;
1257         }
1258         }
1259         /*
1260          * If the hash is HASH_NB_ALWAYS, we always go to the next
1261          * block so no check is necessary
1262          */
1263         while (num_frames--) {
1264                 iam_ptr_t idx;
1265
1266                 do_corr(schedule());
1267                 iam_lock_bh(p->bh);
1268                 idx = p->leaf = dx_get_block(path, p->at);
1269                 iam_unlock_bh(p->bh);
1270                 err = iam_path_descr(path)->id_ops->
1271                         id_node_read(path->ip_container, idx, NULL, &bh);
1272                 if (err != 0)
1273                         return err; /* Failure */
1274                 ++p;
1275                 brelse(p->bh);
1276                 assert_corr(p->bh != bh);
1277                 p->bh = bh;
1278                 p->entries = dx_node_get_entries(path, p);
1279                 p->at = iam_entry_shift(path, p->entries, !compat);
1280                 assert_corr(p->curidx != idx);
1281                 p->curidx = idx;
1282                 iam_lock_bh(p->bh);
1283                 assert_corr(p->leaf != dx_get_block(path, p->at));
1284                 p->leaf = dx_get_block(path, p->at);
1285                 iam_unlock_bh(p->bh);
1286                 assert_inv(dx_node_check(path, p));
1287         }
1288         return 1;
1289 }
1290
1291
1292 static inline int iam_index_advance(struct iam_path *path)
1293 {
1294         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1295 }
1296
1297 static void iam_unlock_array(struct inode *dir, struct dynlock_handle **lh)
1298 {
1299         int i;
1300
1301         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1302                 if (*lh != NULL) {
1303                         iam_unlock_htree(dir, *lh);
1304                         *lh = NULL;
1305                 }
1306         }
1307 }
1308 /*
1309  * Advance index part of @path to point to the next leaf. Returns 1 on
1310  * success, 0, when end of container was reached. Leaf node is locked.
1311  */
1312 int iam_index_next(struct iam_container *c, struct iam_path *path)
1313 {
1314         iam_ptr_t cursor;
1315         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1316         int result;
1317         struct inode *object;
1318
1319         /*
1320          * Locking for iam_index_next()... is to be described.
1321          */
1322
1323         object = c->ic_object;
1324         cursor = path->ip_frame->leaf;
1325
1326         while (1) {
1327                 result = iam_index_lock(path, lh);
1328                 do_corr(schedule());
1329                 if (result < 0)
1330                         break;
1331
1332                 result = iam_check_full_path(path, 0);
1333                 if (result == 0 && cursor == path->ip_frame->leaf) {
1334                         result = iam_index_advance(path);
1335
1336                         assert_corr(result == 0 ||
1337                                     cursor != path->ip_frame->leaf);
1338                         break;
1339                 }
1340                 do {
1341                         iam_unlock_array(object, lh);
1342
1343                         iam_path_release(path);
1344                         do_corr(schedule());
1345
1346                         result = __iam_path_lookup(path);
1347                         if (result < 0)
1348                                 break;
1349
1350                         while (path->ip_frame->leaf != cursor) {
1351                                 do_corr(schedule());
1352
1353                                 result = iam_index_lock(path, lh);
1354                                 do_corr(schedule());
1355                                 if (result < 0)
1356                                         break;
1357
1358                                 result = iam_check_full_path(path, 0);
1359                                 if (result != 0)
1360                                         break;
1361
1362                                 result = iam_index_advance(path);
1363                                 if (result == 0) {
1364                                         CERROR("cannot find cursor : %u\n",
1365                                                 cursor);
1366                                         result = -EIO;
1367                                 }
1368                                 if (result < 0)
1369                                         break;
1370                                 result = iam_check_full_path(path, 0);
1371                                 if (result != 0)
1372                                         break;
1373                                 iam_unlock_array(object, lh);
1374                         }
1375                 } while (result == -EAGAIN);
1376                 if (result < 0)
1377                         break;
1378         }
1379         iam_unlock_array(object, lh);
1380         return result;
1381 }
1382
1383 /*
1384  * Move iterator one record right.
1385  *
1386  * Return value: 0: success,
1387  *              +1: end of container reached
1388  *             -ve: error
1389  *
1390  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1391  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1392  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1393  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1394  */
1395 int iam_it_next(struct iam_iterator *it)
1396 {
1397         int result;
1398         struct iam_path      *path;
1399         struct iam_leaf      *leaf;
1400         struct inode         *obj;
1401         do_corr(struct iam_ikey *ik_orig);
1402
1403         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1404         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1405                     it_state(it) == IAM_IT_SKEWED);
1406
1407         path = &it->ii_path;
1408         leaf = &path->ip_leaf;
1409         obj  = iam_path_obj(path);
1410
1411         assert_corr(iam_leaf_is_locked(leaf));
1412
1413         result = 0;
1414         do_corr(ik_orig = it_at_rec(it) ?
1415                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1416         if (it_before(it)) {
1417                 assert_corr(!iam_leaf_at_end(leaf));
1418                 it->ii_state = IAM_IT_ATTACHED;
1419         } else {
1420                 if (!iam_leaf_at_end(leaf))
1421                         /* advance within leaf node */
1422                         iam_leaf_next(leaf);
1423                 /*
1424                  * multiple iterations may be necessary due to empty leaves.
1425                  */
1426                 while (result == 0 && iam_leaf_at_end(leaf)) {
1427                         do_corr(schedule());
1428                         /* advance index portion of the path */
1429                         result = iam_index_next(iam_it_container(it), path);
1430                         assert_corr(iam_leaf_is_locked(leaf));
1431                         if (result == 1) {
1432                                 struct dynlock_handle *lh;
1433                                 lh = iam_lock_htree(obj, path->ip_frame->leaf,
1434                                                    DLT_WRITE);
1435                                 if (lh != NULL) {
1436                                         iam_leaf_fini(leaf);
1437                                         leaf->il_lock = lh;
1438                                         result = iam_leaf_load(path);
1439                                         if (result == 0)
1440                                                 iam_leaf_start(leaf);
1441                                 } else
1442                                         result = -ENOMEM;
1443                         } else if (result == 0)
1444                                 /* end of container reached */
1445                                 result = +1;
1446                         if (result != 0)
1447                                 iam_it_put(it);
1448                 }
1449                 if (result == 0)
1450                         it->ii_state = IAM_IT_ATTACHED;
1451         }
1452         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1453         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1454         assert_corr(ergo(result == 0 && ik_orig != NULL,
1455                          it_ikeycmp(it, ik_orig) >= 0));
1456         return result;
1457 }
1458 EXPORT_SYMBOL(iam_it_next);
1459
1460 /*
1461  * Return pointer to the record under iterator.
1462  *
1463  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1464  * postcondition: it_state(it) == IAM_IT_ATTACHED
1465  */
1466 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1467 {
1468         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1469         assert_corr(it_at_rec(it));
1470         return iam_leaf_rec(&it->ii_path.ip_leaf);
1471 }
1472 EXPORT_SYMBOL(iam_it_rec_get);
1473
1474 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1475 {
1476         struct iam_leaf *folio;
1477
1478         folio = &it->ii_path.ip_leaf;
1479         iam_leaf_ops(folio)->rec_set(folio, r);
1480 }
1481
1482 /*
1483  * Replace contents of record under iterator.
1484  *
1485  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1486  *                it->ii_flags&IAM_IT_WRITE
1487  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1488  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1489  */
1490 int iam_it_rec_set(handle_t *h,
1491                    struct iam_iterator *it, const struct iam_rec *r)
1492 {
1493         int result;
1494         struct iam_path *path;
1495         struct buffer_head *bh;
1496
1497         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1498                     it->ii_flags&IAM_IT_WRITE);
1499         assert_corr(it_at_rec(it));
1500
1501         path = &it->ii_path;
1502         bh   = path->ip_leaf.il_bh;
1503         result = iam_txn_add(h, path, bh);
1504         if (result == 0) {
1505                 iam_it_reccpy(it, r);
1506                 result = iam_txn_dirty(h, path, bh);
1507         }
1508         return result;
1509 }
1510 EXPORT_SYMBOL(iam_it_rec_set);
1511
1512 /*
1513  * Return pointer to the index key under iterator.
1514  *
1515  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1516  *                it_state(it) == IAM_IT_SKEWED
1517  */
1518 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1519                                         struct iam_ikey *ikey)
1520 {
1521         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1522                     it_state(it) == IAM_IT_SKEWED);
1523         assert_corr(it_at_rec(it));
1524         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1525 }
1526
1527 /*
1528  * Return pointer to the key under iterator.
1529  *
1530  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1531  *                it_state(it) == IAM_IT_SKEWED
1532  */
1533 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1534 {
1535         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1536                     it_state(it) == IAM_IT_SKEWED);
1537         assert_corr(it_at_rec(it));
1538         return iam_leaf_key(&it->ii_path.ip_leaf);
1539 }
1540 EXPORT_SYMBOL(iam_it_key_get);
1541
1542 /*
1543  * Return size of key under iterator (in bytes)
1544  *
1545  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1546  *                it_state(it) == IAM_IT_SKEWED
1547  */
1548 int iam_it_key_size(const struct iam_iterator *it)
1549 {
1550         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1551                     it_state(it) == IAM_IT_SKEWED);
1552         assert_corr(it_at_rec(it));
1553         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1554 }
1555 EXPORT_SYMBOL(iam_it_key_size);
1556
1557 /*
1558  * Insertion of new record. Interaction with jbd during non-trivial case (when
1559  * split happens) is as following:
1560  *
1561  *  - new leaf node is involved into transaction by ldiskfs_append();
1562  *
1563  *  - old leaf node is involved into transaction by iam_add_rec();
1564  *
1565  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1566  *
1567  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1568  *  iam_new_leaf();
1569  *
1570  *  - split index nodes are involved into transaction and marked dirty by
1571  *  split_index_node().
1572  *
1573  *  - "safe" index node, which is no split, but where new pointer is inserted
1574  *  is involved into transaction and marked dirty by split_index_node().
1575  *
1576  *  - index node where pointer to new leaf is inserted is involved into
1577  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1578  *
1579  *  - inode is marked dirty by iam_add_rec().
1580  *
1581  */
1582
1583 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1584 {
1585         int err;
1586         iam_ptr_t blknr;
1587         struct buffer_head   *new_leaf;
1588         struct buffer_head   *old_leaf;
1589         struct iam_container *c;
1590         struct inode         *obj;
1591         struct iam_path      *path;
1592
1593         assert_inv(iam_leaf_check(leaf));
1594
1595         c = iam_leaf_container(leaf);
1596         path = leaf->il_path;
1597
1598         obj = c->ic_object;
1599         new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1600         do_corr(schedule());
1601         if (new_leaf != NULL) {
1602                 struct dynlock_handle *lh;
1603
1604                 lh = iam_lock_htree(obj, blknr, DLT_WRITE);
1605                 do_corr(schedule());
1606                 if (lh != NULL) {
1607                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1608                         do_corr(schedule());
1609                         old_leaf = leaf->il_bh;
1610                         iam_leaf_split(leaf, &new_leaf, blknr);
1611                         if (old_leaf != leaf->il_bh) {
1612                                 /*
1613                                  * Switched to the new leaf.
1614                                  */
1615                                 iam_leaf_unlock(leaf);
1616                                 leaf->il_lock = lh;
1617                                 path->ip_frame->leaf = blknr;
1618                         } else
1619                                 iam_unlock_htree(obj, lh);
1620                         do_corr(schedule());
1621                         err = iam_txn_dirty(handle, path, new_leaf);
1622                         brelse(new_leaf);
1623                         if (err == 0)
1624                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1625                         do_corr(schedule());
1626                 } else
1627                         err = -ENOMEM;
1628         }
1629         assert_inv(iam_leaf_check(leaf));
1630         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1631         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1632         return err;
1633 }
1634
1635 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1636 {
1637         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1638 }
1639
1640 static int iam_shift_entries(struct iam_path *path,
1641                          struct iam_frame *frame, unsigned count,
1642                          struct iam_entry *entries, struct iam_entry *entries2,
1643                          u32 newblock)
1644 {
1645         unsigned count1;
1646         unsigned count2;
1647         int delta;
1648
1649         struct iam_frame *parent = frame - 1;
1650         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1651
1652         delta = dx_index_is_compat(path) ? 0 : +1;
1653
1654         count1 = count/2 + delta;
1655         count2 = count - count1;
1656         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1657
1658         dxtrace(printk("Split index %d/%d\n", count1, count2));
1659
1660         memcpy((char *) iam_entry_shift(path, entries2, delta),
1661                (char *) iam_entry_shift(path, entries, count1),
1662                count2 * iam_entry_size(path));
1663
1664         dx_set_count(entries2, count2 + delta);
1665         dx_set_limit(entries2, dx_node_limit(path));
1666
1667         /*
1668          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1669          * level index in root index, then we insert new index here and set
1670          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1671          * index w/o hash it looks for. the solution is to check root index
1672          * after we locked just founded 2nd level index -bzzz
1673          */
1674         iam_insert_key_lock(path, parent, pivot, newblock);
1675
1676         /*
1677          * now old and new 2nd level index blocks contain all pointers, so
1678          * dx_probe() may find it in the both.  it's OK -bzzz
1679          */
1680         iam_lock_bh(frame->bh);
1681         dx_set_count(entries, count1);
1682         iam_unlock_bh(frame->bh);
1683
1684         /*
1685          * now old 2nd level index block points to first half of leafs. it's
1686          * importand that dx_probe() must check root index block for changes
1687          * under dx_lock_bh(frame->bh) -bzzz
1688          */
1689
1690         return count1;
1691 }
1692
1693
1694 int split_index_node(handle_t *handle, struct iam_path *path,
1695                      struct dynlock_handle **lh)
1696 {
1697
1698         struct iam_entry *entries;   /* old block contents */
1699         struct iam_entry *entries2;  /* new block contents */
1700          struct iam_frame *frame, *safe;
1701         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1702         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1703         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1704         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1705         struct inode *dir = iam_path_obj(path);
1706         struct iam_descr *descr;
1707         int nr_splet;
1708         int i, err;
1709
1710         descr = iam_path_descr(path);
1711         /*
1712          * Algorithm below depends on this.
1713          */
1714         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1715
1716         frame = path->ip_frame;
1717         entries = frame->entries;
1718
1719         /*
1720          * Tall-tree handling: we might have to split multiple index blocks
1721          * all the way up to tree root. Tricky point here is error handling:
1722          * to avoid complicated undo/rollback we
1723          *
1724          *   - first allocate all necessary blocks
1725          *
1726          *   - insert pointers into them atomically.
1727          */
1728
1729         /*
1730          * Locking: leaf is already locked. htree-locks are acquired on all
1731          * index nodes that require split bottom-to-top, on the "safe" node,
1732          * and on all new nodes
1733          */
1734
1735         dxtrace(printk("using %u of %u node entries\n",
1736                        dx_get_count(entries), dx_get_limit(entries)));
1737
1738         /* What levels need split? */
1739         for (nr_splet = 0; frame >= path->ip_frames &&
1740              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1741              --frame, ++nr_splet) {
1742                 do_corr(schedule());
1743                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1744                         /*
1745                         CWARN(dir->i_sb, __FUNCTION__,
1746                                      "Directory index full!\n");
1747                                      */
1748                         err = -ENOSPC;
1749                         goto cleanup;
1750                 }
1751         }
1752
1753         safe = frame;
1754
1755         /*
1756          * Lock all nodes, bottom to top.
1757          */
1758         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1759                 do_corr(schedule());
1760                 lock[i] = iam_lock_htree(dir, frame->curidx, DLT_WRITE);
1761                 if (lock[i] == NULL) {
1762                         err = -ENOMEM;
1763                         goto cleanup;
1764                 }
1765         }
1766
1767         /*
1768          * Check for concurrent index modification.
1769          */
1770         err = iam_check_full_path(path, 1);
1771         if (err)
1772                 goto cleanup;
1773         /*
1774          * And check that the same number of nodes is to be split.
1775          */
1776         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1777              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1778              --frame, ++i) {
1779                 ;
1780         }
1781         if (i != nr_splet) {
1782                 err = -EAGAIN;
1783                 goto cleanup;
1784         }
1785
1786         /* Go back down, allocating blocks, locking them, and adding into
1787          * transaction... */
1788         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1789                 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1790                 do_corr(schedule());
1791                 if (!bh_new[i] ||
1792                     descr->id_ops->id_node_init(path->ip_container,
1793                                                 bh_new[i], 0) != 0)
1794                         goto cleanup;
1795                 new_lock[i] = iam_lock_htree(dir, newblock[i], DLT_WRITE);
1796                 if (new_lock[i] == NULL) {
1797                         err = -ENOMEM;
1798                         goto cleanup;
1799                 }
1800                 do_corr(schedule());
1801                 BUFFER_TRACE(frame->bh, "get_write_access");
1802                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1803                 if (err)
1804                         goto journal_error;
1805         }
1806         /* Add "safe" node to transaction too */
1807         if (safe + 1 != path->ip_frames) {
1808                 do_corr(schedule());
1809                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1810                 if (err)
1811                         goto journal_error;
1812         }
1813
1814         /* Go through nodes once more, inserting pointers */
1815         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1816                 unsigned count;
1817                 int idx;
1818                 struct buffer_head *bh2;
1819                 struct buffer_head *bh;
1820
1821                 entries = frame->entries;
1822                 count = dx_get_count(entries);
1823                 idx = iam_entry_diff(path, frame->at, entries);
1824
1825                 bh2 = bh_new[i];
1826                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1827
1828                 bh = frame->bh;
1829                 if (frame == path->ip_frames) {
1830                         /* splitting root node. Tricky point:
1831                          *
1832                          * In the "normal" B-tree we'd split root *and* add
1833                          * new root to the tree with pointers to the old root
1834                          * and its sibling (thus introducing two new nodes).
1835                          *
1836                          * In htree it's enough to add one node, because
1837                          * capacity of the root node is smaller than that of
1838                          * non-root one.
1839                          */
1840                         struct iam_frame *frames;
1841                         struct iam_entry *next;
1842
1843                         assert_corr(i == 0);
1844
1845                         do_corr(schedule());
1846
1847                         frames = path->ip_frames;
1848                         memcpy((char *) entries2, (char *) entries,
1849                                count * iam_entry_size(path));
1850                         dx_set_limit(entries2, dx_node_limit(path));
1851
1852                         /* Set up root */
1853                           iam_lock_bh(frame->bh);
1854                         next = descr->id_ops->id_root_inc(path->ip_container,
1855                                                           path, frame);
1856                         dx_set_block(path, next, newblock[0]);
1857                           iam_unlock_bh(frame->bh);
1858
1859                         do_corr(schedule());
1860                         /* Shift frames in the path */
1861                         memmove(frames + 2, frames + 1,
1862                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1863                         /* Add new access path frame */
1864                         frames[1].at = iam_entry_shift(path, entries2, idx);
1865                         frames[1].entries = entries = entries2;
1866                         frames[1].bh = bh2;
1867                         assert_inv(dx_node_check(path, frame));
1868                         ++ path->ip_frame;
1869                         ++ frame;
1870                         assert_inv(dx_node_check(path, frame));
1871                         bh_new[0] = NULL; /* buffer head is "consumed" */
1872                         err = ldiskfs_journal_get_write_access(handle, bh2);
1873                         if (err)
1874                                 goto journal_error;
1875                         do_corr(schedule());
1876                 } else {
1877                         /* splitting non-root index node. */
1878                         struct iam_frame *parent = frame - 1;
1879
1880                         do_corr(schedule());
1881                         count = iam_shift_entries(path, frame, count,
1882                                               entries, entries2, newblock[i]);
1883                         /* Which index block gets the new entry? */
1884                         if (idx >= count) {
1885                                 int d = dx_index_is_compat(path) ? 0 : +1;
1886
1887                                 frame->at = iam_entry_shift(path, entries2,
1888                                                             idx - count + d);
1889                                 frame->entries = entries = entries2;
1890                                 frame->curidx = newblock[i];
1891                                 swap(frame->bh, bh2);
1892                                 assert_corr(lock[i + 1] != NULL);
1893                                 assert_corr(new_lock[i] != NULL);
1894                                 swap(lock[i + 1], new_lock[i]);
1895                                 bh_new[i] = bh2;
1896                                 parent->at = iam_entry_shift(path,
1897                                                              parent->at, +1);
1898                         }
1899                         assert_inv(dx_node_check(path, frame));
1900                         assert_inv(dx_node_check(path, parent));
1901                         dxtrace(dx_show_index ("node", frame->entries));
1902                         dxtrace(dx_show_index ("node",
1903                                ((struct dx_node *) bh2->b_data)->entries));
1904                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
1905                         if (err)
1906                                 goto journal_error;
1907                         do_corr(schedule());
1908                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1909                         if (err)
1910                                 goto journal_error;
1911                 }
1912                 do_corr(schedule());
1913                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1914                 if (err)
1915                         goto journal_error;
1916         }
1917                 /*
1918                  * This function was called to make insertion of new leaf
1919                  * possible. Check that it fulfilled its obligations.
1920                  */
1921                 assert_corr(dx_get_count(path->ip_frame->entries) <
1922                             dx_get_limit(path->ip_frame->entries));
1923         assert_corr(lock[nr_splet] != NULL);
1924         *lh = lock[nr_splet];
1925         lock[nr_splet] = NULL;
1926         if (nr_splet > 0) {
1927                 /*
1928                  * Log ->i_size modification.
1929                  */
1930                 err = ldiskfs_mark_inode_dirty(handle, dir);
1931                 if (err)
1932                         goto journal_error;
1933         }
1934         goto cleanup;
1935 journal_error:
1936         ldiskfs_std_error(dir->i_sb, err);
1937
1938 cleanup:
1939         iam_unlock_array(dir, lock);
1940         iam_unlock_array(dir, new_lock);
1941
1942         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1943
1944         do_corr(schedule());
1945         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1946                 if (bh_new[i] != NULL)
1947                         brelse(bh_new[i]);
1948         }
1949         return err;
1950 }
1951
1952 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1953                        struct iam_path *path,
1954                        const struct iam_key *k, const struct iam_rec *r)
1955 {
1956         int err;
1957         struct iam_leaf *leaf;
1958
1959         leaf = &path->ip_leaf;
1960         assert_inv(iam_leaf_check(leaf));
1961         assert_inv(iam_path_check(path));
1962         err = iam_txn_add(handle, path, leaf->il_bh);
1963         if (err == 0) {
1964                 do_corr(schedule());
1965                 if (!iam_leaf_can_add(leaf, k, r)) {
1966                         struct dynlock_handle *lh = NULL;
1967
1968                         do {
1969                                 assert_corr(lh == NULL);
1970                                 do_corr(schedule());
1971                                 err = split_index_node(handle, path, &lh);
1972                                 if (err == -EAGAIN) {
1973                                         assert_corr(lh == NULL);
1974
1975                                         iam_path_fini(path);
1976                                         it->ii_state = IAM_IT_DETACHED;
1977
1978                                         do_corr(schedule());
1979                                         err = iam_it_get_exact(it, k);
1980                                         if (err == -ENOENT)
1981                                                 err = +1; /* repeat split */
1982                                         else if (err == 0)
1983                                                 err = -EEXIST;
1984                                 }
1985                         } while (err > 0);
1986                         assert_inv(iam_path_check(path));
1987                         if (err == 0) {
1988                                 assert_corr(lh != NULL);
1989                                 do_corr(schedule());
1990                                 err = iam_new_leaf(handle, leaf);
1991                                 if (err == 0)
1992                                         err = iam_txn_dirty(handle, path,
1993                                                             path->ip_frame->bh);
1994                         }
1995                         iam_unlock_htree(iam_path_obj(path), lh);
1996                         do_corr(schedule());
1997                 }
1998                 if (err == 0) {
1999                         iam_leaf_rec_add(leaf, k, r);
2000                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2001                 }
2002         }
2003         assert_inv(iam_leaf_check(leaf));
2004         assert_inv(iam_leaf_check(&path->ip_leaf));
2005         assert_inv(iam_path_check(path));
2006         return err;
2007 }
2008
2009 /*
2010  * Insert new record with key @k and contents from @r, shifting records to the
2011  * right. On success, iterator is positioned on the newly inserted record.
2012  *
2013  * precondition: it->ii_flags&IAM_IT_WRITE &&
2014  *               (it_state(it) == IAM_IT_ATTACHED ||
2015  *                it_state(it) == IAM_IT_SKEWED) &&
2016  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2017  *                    it_keycmp(it, k) <= 0) &&
2018  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2019  * postcondition: ergo(result == 0,
2020  *                     it_state(it) == IAM_IT_ATTACHED &&
2021  *                     it_keycmp(it, k) == 0 &&
2022  *                     !memcmp(iam_it_rec_get(it), r, ...))
2023  */
2024 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2025                       const struct iam_key *k, const struct iam_rec *r)
2026 {
2027         int result;
2028         struct iam_path *path;
2029
2030         path = &it->ii_path;
2031
2032         assert_corr(it->ii_flags&IAM_IT_WRITE);
2033         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2034                     it_state(it) == IAM_IT_SKEWED);
2035         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2036                          it_keycmp(it, k) <= 0));
2037         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2038         result = iam_add_rec(h, it, path, k, r);
2039         if (result == 0)
2040                 it->ii_state = IAM_IT_ATTACHED;
2041         assert_corr(ergo(result == 0,
2042                          it_state(it) == IAM_IT_ATTACHED &&
2043                          it_keycmp(it, k) == 0));
2044         return result;
2045 }
2046 EXPORT_SYMBOL(iam_it_rec_insert);
2047
2048 /*
2049  * Delete record under iterator.
2050  *
2051  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2052  *                it->ii_flags&IAM_IT_WRITE &&
2053  *                it_at_rec(it)
2054  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2055  *                it_state(it) == IAM_IT_DETACHED
2056  */
2057 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2058 {
2059         int result;
2060         struct iam_leaf *leaf;
2061         struct iam_path *path;
2062
2063         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2064                     it->ii_flags&IAM_IT_WRITE);
2065         assert_corr(it_at_rec(it));
2066
2067         path = &it->ii_path;
2068         leaf = &path->ip_leaf;
2069
2070         assert_inv(iam_leaf_check(leaf));
2071         assert_inv(iam_path_check(path));
2072
2073         result = iam_txn_add(h, path, leaf->il_bh);
2074         /*
2075          * no compaction for now.
2076          */
2077         if (result == 0) {
2078                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2079                 result = iam_txn_dirty(h, path, leaf->il_bh);
2080                 if (result == 0 && iam_leaf_at_end(leaf) &&
2081                     it->ii_flags&IAM_IT_MOVE) {
2082                         result = iam_it_next(it);
2083                         if (result > 0)
2084                                 result = 0;
2085                 }
2086         }
2087         assert_inv(iam_leaf_check(leaf));
2088         assert_inv(iam_path_check(path));
2089         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2090                     it_state(it) == IAM_IT_DETACHED);
2091         return result;
2092 }
2093 EXPORT_SYMBOL(iam_it_rec_delete);
2094
2095 /*
2096  * Convert iterator to cookie.
2097  *
2098  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2099  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2100  * postcondition: it_state(it) == IAM_IT_ATTACHED
2101  */
2102 iam_pos_t iam_it_store(const struct iam_iterator *it)
2103 {
2104         iam_pos_t result;
2105
2106         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2107         assert_corr(it_at_rec(it));
2108         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2109                     sizeof result);
2110
2111         result = 0;
2112         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2113 }
2114 EXPORT_SYMBOL(iam_it_store);
2115
2116 /*
2117  * Restore iterator from cookie.
2118  *
2119  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2120  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2121  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2122  *                                  iam_it_store(it) == pos)
2123  */
2124 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2125 {
2126         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2127                     it->ii_flags&IAM_IT_MOVE);
2128         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2129         return iam_it_iget(it, (struct iam_ikey *)&pos);
2130 }
2131 EXPORT_SYMBOL(iam_it_load);
2132
2133 /***********************************************************************/
2134 /* invariants                                                          */
2135 /***********************************************************************/
2136
2137 static inline int ptr_inside(void *base, size_t size, void *ptr)
2138 {
2139         return (base <= ptr) && (ptr < base + size);
2140 }
2141
2142 int iam_frame_invariant(struct iam_frame *f)
2143 {
2144         return
2145                 (f->bh != NULL &&
2146                 f->bh->b_data != NULL &&
2147                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2148                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2149                 f->entries <= f->at);
2150 }
2151 int iam_leaf_invariant(struct iam_leaf *l)
2152 {
2153         return
2154                 l->il_bh != NULL &&
2155                 l->il_bh->b_data != NULL &&
2156                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2157                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2158                 l->il_entries <= l->il_at;
2159 }
2160
2161 int iam_path_invariant(struct iam_path *p)
2162 {
2163         int i;
2164
2165         if (p->ip_container == NULL ||
2166             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2167             p->ip_frame != p->ip_frames + p->ip_indirect ||
2168             !iam_leaf_invariant(&p->ip_leaf))
2169                 return 0;
2170         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2171                 if (i <= p->ip_indirect) {
2172                         if (!iam_frame_invariant(&p->ip_frames[i]))
2173                                 return 0;
2174                 }
2175         }
2176         return 1;
2177 }
2178
2179 int iam_it_invariant(struct iam_iterator *it)
2180 {
2181         return
2182                 (it->ii_state == IAM_IT_DETACHED ||
2183                  it->ii_state == IAM_IT_ATTACHED ||
2184                  it->ii_state == IAM_IT_SKEWED) &&
2185                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2186                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2187                      it->ii_state == IAM_IT_SKEWED,
2188                      iam_path_invariant(&it->ii_path) &&
2189                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2190 }
2191
2192 /*
2193  * Search container @c for record with key @k. If record is found, its data
2194  * are moved into @r.
2195  *
2196  * Return values: 0: found, -ENOENT: not-found, -ve: error
2197  */
2198 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2199                struct iam_rec *r, struct iam_path_descr *pd)
2200 {
2201         struct iam_iterator it;
2202         int result;
2203
2204         iam_it_init(&it, c, 0, pd);
2205
2206         result = iam_it_get_exact(&it, k);
2207         if (result == 0)
2208                 /*
2209                  * record with required key found, copy it into user buffer
2210                  */
2211                 iam_reccpy(&it.ii_path.ip_leaf, r);
2212         iam_it_put(&it);
2213         iam_it_fini(&it);
2214         return result;
2215 }
2216 EXPORT_SYMBOL(iam_lookup);
2217
2218 /*
2219  * Insert new record @r with key @k into container @c (within context of
2220  * transaction @h).
2221  *
2222  * Return values: 0: success, -ve: error, including -EEXIST when record with
2223  * given key is already present.
2224  *
2225  * postcondition: ergo(result == 0 || result == -EEXIST,
2226  *                                  iam_lookup(c, k, r2) > 0;
2227  */
2228 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2229                const struct iam_rec *r, struct iam_path_descr *pd)
2230 {
2231         struct iam_iterator it;
2232         int result;
2233
2234         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2235
2236         result = iam_it_get_exact(&it, k);
2237         if (result == -ENOENT)
2238                 result = iam_it_rec_insert(h, &it, k, r);
2239         else if (result == 0)
2240                 result = -EEXIST;
2241         iam_it_put(&it);
2242         iam_it_fini(&it);
2243         return result;
2244 }
2245 EXPORT_SYMBOL(iam_insert);
2246
2247 /*
2248  * Update record with the key @k in container @c (within context of
2249  * transaction @h), new record is given by @r.
2250  *
2251  * Return values: +1: skip because of the same rec value, 0: success,
2252  * -ve: error, including -ENOENT if no record with the given key found.
2253  */
2254 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2255                const struct iam_rec *r, struct iam_path_descr *pd)
2256 {
2257         struct iam_iterator it;
2258         struct iam_leaf *folio;
2259         int result;
2260
2261         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2262
2263         result = iam_it_get_exact(&it, k);
2264         if (result == 0) {
2265                 folio = &it.ii_path.ip_leaf;
2266                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2267                 if (result == 0)
2268                         iam_it_rec_set(h, &it, r);
2269                 else
2270                         result = 1;
2271         }
2272         iam_it_put(&it);
2273         iam_it_fini(&it);
2274         return result;
2275 }
2276 EXPORT_SYMBOL(iam_update);
2277
2278 /*
2279  * Delete existing record with key @k.
2280  *
2281  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2282  *
2283  * postcondition: ergo(result == 0 || result == -ENOENT,
2284  *                                 !iam_lookup(c, k, *));
2285  */
2286 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2287                struct iam_path_descr *pd)
2288 {
2289         struct iam_iterator it;
2290         int result;
2291
2292         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2293
2294         result = iam_it_get_exact(&it, k);
2295         if (result == 0)
2296                 iam_it_rec_delete(h, &it);
2297         iam_it_put(&it);
2298         iam_it_fini(&it);
2299         return result;
2300 }
2301 EXPORT_SYMBOL(iam_delete);
2302
2303 int iam_root_limit(int rootgap, int blocksize, int size)
2304 {
2305         int limit;
2306         int nlimit;
2307
2308         limit = (blocksize - rootgap) / size;
2309         nlimit = blocksize / size;
2310         if (limit == nlimit)
2311                 limit--;
2312         return limit;
2313 }