Whamcloud - gitweb
LU-1548 osd: move i_htree_lock to iam container
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  */
105
106 #include <linux/module.h>
107 #include <linux/fs.h>
108 #include <linux/pagemap.h>
109 #include <linux/time.h>
110 #include <linux/fcntl.h>
111 #include <linux/stat.h>
112 #include <linux/string.h>
113 #include <linux/quotaops.h>
114 #include <linux/buffer_head.h>
115 #include "osd_internal.h"
116
117 #include "xattr.h"
118 #include "acl.h"
119
120 /*
121  * List of all registered formats.
122  *
123  * No locking. Callers synchronize.
124  */
125 static CFS_LIST_HEAD(iam_formats);
126
127 void iam_format_register(struct iam_format *fmt)
128 {
129         cfs_list_add(&fmt->if_linkage, &iam_formats);
130 }
131 EXPORT_SYMBOL(iam_format_register);
132
133 /*
134  * Determine format of given container. This is done by scanning list of
135  * registered formats and calling ->if_guess() method of each in turn.
136  */
137 static int iam_format_guess(struct iam_container *c)
138 {
139         int result;
140         struct iam_format *fmt;
141
142         /*
143          * XXX temporary initialization hook.
144          */
145         {
146                 static int initialized = 0;
147
148                 if (!initialized) {
149                         iam_lvar_format_init();
150                         iam_lfix_format_init();
151                         initialized = 1;
152                 }
153         }
154
155         result = -ENOENT;
156         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
157                 result = fmt->if_guess(c);
158                 if (result == 0)
159                         break;
160         }
161         return result;
162 }
163
164 /*
165  * Initialize container @c.
166  */
167 int iam_container_init(struct iam_container *c,
168                        struct iam_descr *descr, struct inode *inode)
169 {
170         memset(c, 0, sizeof *c);
171         c->ic_descr  = descr;
172         c->ic_object = inode;
173         cfs_init_rwsem(&c->ic_sem);
174         dynlock_init(&c->ic_tree_lock);
175         return 0;
176 }
177 EXPORT_SYMBOL(iam_container_init);
178
179 /*
180  * Determine container format.
181  */
182 int iam_container_setup(struct iam_container *c)
183 {
184         return iam_format_guess(c);
185 }
186 EXPORT_SYMBOL(iam_container_setup);
187
188 /*
189  * Finalize container @c, release all resources.
190  */
191 void iam_container_fini(struct iam_container *c)
192 {
193         brelse(c->ic_root_bh);
194         c->ic_root_bh = NULL;
195 }
196 EXPORT_SYMBOL(iam_container_fini);
197
198 void iam_path_init(struct iam_path *path, struct iam_container *c,
199                    struct iam_path_descr *pd)
200 {
201         memset(path, 0, sizeof *path);
202         path->ip_container = c;
203         path->ip_frame = path->ip_frames;
204         path->ip_data = pd;
205         path->ip_leaf.il_path = path;
206 }
207
208 static void iam_leaf_fini(struct iam_leaf *leaf);
209
210 void iam_path_release(struct iam_path *path)
211 {
212         int i;
213
214         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
215                 if (path->ip_frames[i].bh != NULL) {
216                         brelse(path->ip_frames[i].bh);
217                         path->ip_frames[i].bh = NULL;
218                 }
219         }
220 }
221
222 void iam_path_fini(struct iam_path *path)
223 {
224         iam_leaf_fini(&path->ip_leaf);
225         iam_path_release(path);
226 }
227
228
229 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
230 {
231         int i;
232
233         path->ipc_hinfo = &path->ipc_hinfo_area;
234         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
235                 path->ipc_descr.ipd_key_scratch[i] =
236                         (struct iam_ikey *)&path->ipc_scratch[i];
237
238         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
239 }
240
241 void iam_path_compat_fini(struct iam_path_compat *path)
242 {
243         iam_path_fini(&path->ipc_path);
244 }
245
246 /*
247  * Helper function initializing iam_path_descr and its key scratch area.
248  */
249 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
250 {
251         struct iam_path_descr *ipd;
252         void *karea;
253         int i;
254
255         ipd = area;
256         karea = ipd + 1;
257         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
258                 ipd->ipd_key_scratch[i] = karea;
259         return ipd;
260 }
261 EXPORT_SYMBOL(iam_ipd_alloc);
262
263 void iam_ipd_free(struct iam_path_descr *ipd)
264 {
265 }
266 EXPORT_SYMBOL(iam_ipd_free);
267
268 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
269                   handle_t *h, struct buffer_head **bh)
270 {
271         int result = 0;
272
273         /* NB: it can be called by iam_lfix_guess() which is still at
274          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
275          * haven't been intialized yet.
276          * Also, we don't have this for IAM dir.
277          */
278         if (c->ic_root_bh != NULL &&
279             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
280                 get_bh(c->ic_root_bh);
281                 *bh = c->ic_root_bh;
282                 return 0;
283         }
284
285         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
286         if (*bh == NULL)
287                 result = -EIO;
288         return result;
289 }
290
291 /*
292  * Return pointer to current leaf record. Pointer is valid while corresponding
293  * leaf node is locked and pinned.
294  */
295 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
296 {
297         return iam_leaf_ops(leaf)->rec(leaf);
298 }
299
300 /*
301  * Return pointer to the current leaf key. This function returns pointer to
302  * the key stored in node.
303  *
304  * Caller should assume that returned pointer is only valid while leaf node is
305  * pinned and locked.
306  */
307 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
308 {
309         return iam_leaf_ops(leaf)->key(leaf);
310 }
311
312 static int iam_leaf_key_size(const struct iam_leaf *leaf)
313 {
314         return iam_leaf_ops(leaf)->key_size(leaf);
315 }
316
317 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
318                                       struct iam_ikey *key)
319 {
320         return iam_leaf_ops(leaf)->ikey(leaf, key);
321 }
322
323 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
324                            const struct iam_key *key)
325 {
326         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
327 }
328
329 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
330                           const struct iam_key *key)
331 {
332         return iam_leaf_ops(leaf)->key_eq(leaf, key);
333 }
334
335 #if LDISKFS_INVARIANT_ON
336 static int iam_leaf_check(struct iam_leaf *leaf);
337 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
338
339 static int iam_path_check(struct iam_path *p)
340 {
341         int i;
342         int result;
343         struct iam_frame *f;
344         struct iam_descr *param;
345
346         result = 1;
347         param = iam_path_descr(p);
348         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
349                 f = &p->ip_frames[i];
350                 if (f->bh != NULL) {
351                         result = dx_node_check(p, f);
352                         if (result)
353                                 result = !param->id_ops->id_node_check(p, f);
354                 }
355         }
356         if (result && p->ip_leaf.il_bh != NULL)
357                 result = iam_leaf_check(&p->ip_leaf);
358         if (result == 0) {
359                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
360         }
361         return result;
362 }
363 #endif
364
365 static int iam_leaf_load(struct iam_path *path)
366 {
367         iam_ptr_t block;
368         int err;
369         struct iam_container *c;
370         struct buffer_head   *bh;
371         struct iam_leaf      *leaf;
372         struct iam_descr     *descr;
373
374         c     = path->ip_container;
375         leaf  = &path->ip_leaf;
376         descr = iam_path_descr(path);
377         block = path->ip_frame->leaf;
378         if (block == 0) {
379                 /* XXX bug 11027 */
380                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
381                        (long unsigned)path->ip_frame->leaf,
382                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
383                        path->ip_frames[0].bh, path->ip_frames[1].bh,
384                        path->ip_frames[2].bh);
385         }
386         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
387         if (err == 0) {
388                 leaf->il_bh = bh;
389                 leaf->il_curidx = block;
390                 err = iam_leaf_ops(leaf)->init(leaf);
391                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
392         }
393         return err;
394 }
395
396 static void iam_unlock_htree(struct iam_container *ic,
397                              struct dynlock_handle *lh)
398 {
399         if (lh != NULL)
400                 dynlock_unlock(&ic->ic_tree_lock, lh);
401 }
402
403
404 static void iam_leaf_unlock(struct iam_leaf *leaf)
405 {
406         if (leaf->il_lock != NULL) {
407                 iam_unlock_htree(iam_leaf_container(leaf),
408                                  leaf->il_lock);
409                 do_corr(schedule());
410                 leaf->il_lock = NULL;
411         }
412 }
413
414 static void iam_leaf_fini(struct iam_leaf *leaf)
415 {
416         if (leaf->il_path != NULL) {
417                 iam_leaf_unlock(leaf);
418                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
419                 iam_leaf_ops(leaf)->fini(leaf);
420                 if (leaf->il_bh) {
421                         brelse(leaf->il_bh);
422                         leaf->il_bh = NULL;
423                         leaf->il_curidx = 0;
424                 }
425         }
426 }
427
428 static void iam_leaf_start(struct iam_leaf *folio)
429 {
430         iam_leaf_ops(folio)->start(folio);
431 }
432
433 void iam_leaf_next(struct iam_leaf *folio)
434 {
435         iam_leaf_ops(folio)->next(folio);
436 }
437
438 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
439                              const struct iam_rec *rec)
440 {
441         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
442 }
443
444 static void iam_rec_del(struct iam_leaf *leaf, int shift)
445 {
446         iam_leaf_ops(leaf)->rec_del(leaf, shift);
447 }
448
449 int iam_leaf_at_end(const struct iam_leaf *leaf)
450 {
451         return iam_leaf_ops(leaf)->at_end(leaf);
452 }
453
454 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
455 {
456         iam_leaf_ops(l)->split(l, bh, nr);
457 }
458
459 int iam_leaf_can_add(const struct iam_leaf *l,
460                      const struct iam_key *k, const struct iam_rec *r)
461 {
462         return iam_leaf_ops(l)->can_add(l, k, r);
463 }
464
465 #if LDISKFS_INVARIANT_ON
466 static int iam_leaf_check(struct iam_leaf *leaf)
467 {
468         return 1;
469 #if 0
470         struct iam_lentry    *orig;
471         struct iam_path      *path;
472         struct iam_container *bag;
473         struct iam_ikey       *k0;
474         struct iam_ikey       *k1;
475         int result;
476         int first;
477
478         orig = leaf->il_at;
479         path = iam_leaf_path(leaf);
480         bag  = iam_leaf_container(leaf);
481
482         result = iam_leaf_ops(leaf)->init(leaf);
483         if (result != 0)
484                 return result;
485
486         first = 1;
487         iam_leaf_start(leaf);
488         k0 = iam_path_ikey(path, 0);
489         k1 = iam_path_ikey(path, 1);
490         while (!iam_leaf_at_end(leaf)) {
491                 iam_ikeycpy(bag, k0, k1);
492                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
493                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
494                         return 0;
495                 }
496                 first = 0;
497                 iam_leaf_next(leaf);
498         }
499         leaf->il_at = orig;
500         return 1;
501 #endif
502 }
503 #endif
504
505 static int iam_txn_dirty(handle_t *handle,
506                          struct iam_path *path, struct buffer_head *bh)
507 {
508         int result;
509
510         result = ldiskfs_journal_dirty_metadata(handle, bh);
511         if (result != 0)
512                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
513         return result;
514 }
515
516 static int iam_txn_add(handle_t *handle,
517                        struct iam_path *path, struct buffer_head *bh)
518 {
519         int result;
520
521         result = ldiskfs_journal_get_write_access(handle, bh);
522         if (result != 0)
523                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
524         return result;
525 }
526
527 /***********************************************************************/
528 /* iterator interface                                                  */
529 /***********************************************************************/
530
531 static enum iam_it_state it_state(const struct iam_iterator *it)
532 {
533         return it->ii_state;
534 }
535
536 /*
537  * Helper function returning scratch key.
538  */
539 static struct iam_container *iam_it_container(const struct iam_iterator *it)
540 {
541         return it->ii_path.ip_container;
542 }
543
544 static inline int it_keycmp(const struct iam_iterator *it,
545                             const struct iam_key *k)
546 {
547         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
548 }
549
550 static inline int it_keyeq(const struct iam_iterator *it,
551                            const struct iam_key *k)
552 {
553         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
554 }
555
556 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
557 {
558         return iam_ikeycmp(it->ii_path.ip_container,
559                            iam_leaf_ikey(&it->ii_path.ip_leaf,
560                                          iam_path_ikey(&it->ii_path, 0)), ik);
561 }
562
563 static inline int it_at_rec(const struct iam_iterator *it)
564 {
565         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
566 }
567
568 static inline int it_before(const struct iam_iterator *it)
569 {
570         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
571 }
572
573 /*
574  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
575  * with exactly the same key as asked is found.
576  */
577 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
578 {
579         int result;
580
581         result = iam_it_get(it, k);
582         if (result > 0)
583                 result = 0;
584         else if (result == 0)
585                 /*
586                  * Return -ENOENT if cursor is located above record with a key
587                  * different from one specified, or in the empty leaf.
588                  *
589                  * XXX returning -ENOENT only works if iam_it_get() never
590                  * returns -ENOENT as a legitimate error.
591                  */
592                 result = -ENOENT;
593         return result;
594 }
595
596 void iam_container_write_lock(struct iam_container *ic)
597 {
598         cfs_down_write(&ic->ic_sem);
599 }
600
601 void iam_container_write_unlock(struct iam_container *ic)
602 {
603         cfs_up_write(&ic->ic_sem);
604 }
605
606 void iam_container_read_lock(struct iam_container *ic)
607 {
608         cfs_down_read(&ic->ic_sem);
609 }
610
611 void iam_container_read_unlock(struct iam_container *ic)
612 {
613         cfs_up_read(&ic->ic_sem);
614 }
615
616 /*
617  * Initialize iterator to IAM_IT_DETACHED state.
618  *
619  * postcondition: it_state(it) == IAM_IT_DETACHED
620  */
621 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
622                  struct iam_path_descr *pd)
623 {
624         memset(it, 0, sizeof *it);
625         it->ii_flags  = flags;
626         it->ii_state  = IAM_IT_DETACHED;
627         iam_path_init(&it->ii_path, c, pd);
628         return 0;
629 }
630 EXPORT_SYMBOL(iam_it_init);
631
632 /*
633  * Finalize iterator and release all resources.
634  *
635  * precondition: it_state(it) == IAM_IT_DETACHED
636  */
637 void iam_it_fini(struct iam_iterator *it)
638 {
639         assert_corr(it_state(it) == IAM_IT_DETACHED);
640         iam_path_fini(&it->ii_path);
641 }
642 EXPORT_SYMBOL(iam_it_fini);
643
644 /*
645  * this locking primitives are used to protect parts
646  * of dir's htree. protection unit is block: leaf or index
647  */
648 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
649                                              unsigned long value,
650                                              enum dynlock_type lt)
651 {
652         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
653 }
654
655 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
656 {
657         struct iam_frame *f;
658
659         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
660                 do_corr(schedule());
661                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
662                 if (*lh == NULL)
663                         return -ENOMEM;
664         }
665         return 0;
666 }
667
668 /*
669  * Fast check for frame consistency.
670  */
671 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
672 {
673         struct iam_container *bag;
674         struct iam_entry *next;
675         struct iam_entry *last;
676         struct iam_entry *entries;
677         struct iam_entry *at;
678
679         bag     = path->ip_container;
680         at      = frame->at;
681         entries = frame->entries;
682         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
683
684         if (unlikely(at > last))
685                 return -EAGAIN;
686
687         if (unlikely(dx_get_block(path, at) != frame->leaf))
688                 return -EAGAIN;
689
690         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
691                                  path->ip_ikey_target) > 0))
692                 return -EAGAIN;
693
694         next = iam_entry_shift(path, at, +1);
695         if (next <= last) {
696                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
697                                          path->ip_ikey_target) <= 0))
698                         return -EAGAIN;
699         }
700         return 0;
701 }
702
703 int dx_index_is_compat(struct iam_path *path)
704 {
705         return iam_path_descr(path) == NULL;
706 }
707
708 /*
709  * dx_find_position
710  *
711  * search position of specified hash in index
712  *
713  */
714
715 struct iam_entry *iam_find_position(struct iam_path *path,
716                                    struct iam_frame *frame)
717 {
718         int count;
719         struct iam_entry *p;
720         struct iam_entry *q;
721         struct iam_entry *m;
722
723         count = dx_get_count(frame->entries);
724         assert_corr(count && count <= dx_get_limit(frame->entries));
725         p = iam_entry_shift(path, frame->entries,
726                             dx_index_is_compat(path) ? 1 : 2);
727         q = iam_entry_shift(path, frame->entries, count - 1);
728         while (p <= q) {
729                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
730                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
731                                 path->ip_ikey_target) > 0)
732                         q = iam_entry_shift(path, m, -1);
733                 else
734                         p = iam_entry_shift(path, m, +1);
735         }
736         return iam_entry_shift(path, p, -1);
737 }
738
739
740
741 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
742 {
743         return dx_get_block(path, iam_find_position(path, frame));
744 }
745
746 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
747                     const struct iam_ikey *key, iam_ptr_t ptr)
748 {
749         struct iam_entry *entries = frame->entries;
750         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
751         int count = dx_get_count(entries);
752
753         /*
754          * Unfortunately we cannot assert this, as this function is sometimes
755          * called by VFS under i_sem and without pdirops lock.
756          */
757         assert_corr(1 || iam_frame_is_locked(path, frame));
758         assert_corr(count < dx_get_limit(entries));
759         assert_corr(frame->at < iam_entry_shift(path, entries, count));
760         assert_inv(dx_node_check(path, frame));
761
762         memmove(iam_entry_shift(path, new, 1), new,
763                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
764         dx_set_ikey(path, new, key);
765         dx_set_block(path, new, ptr);
766         dx_set_count(entries, count + 1);
767         assert_inv(dx_node_check(path, frame));
768 }
769
770 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
771                          const struct iam_ikey *key, iam_ptr_t ptr)
772 {
773         iam_lock_bh(frame->bh);
774         iam_insert_key(path, frame, key, ptr);
775         iam_unlock_bh(frame->bh);
776 }
777 /*
778  * returns 0 if path was unchanged, -EAGAIN otherwise.
779  */
780 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
781 {
782         int equal;
783
784         iam_lock_bh(frame->bh);
785         equal = iam_check_fast(path, frame) == 0 ||
786                 frame->leaf == iam_find_ptr(path, frame);
787         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
788         iam_unlock_bh(frame->bh);
789
790         return equal ? 0 : -EAGAIN;
791 }
792
793 static int iam_lookup_try(struct iam_path *path)
794 {
795         u32 ptr;
796         int err = 0;
797         int i;
798
799         struct iam_descr *param;
800         struct iam_frame *frame;
801         struct iam_container *c;
802
803         param = iam_path_descr(path);
804         c = path->ip_container;
805
806         ptr = param->id_ops->id_root_ptr(c);
807         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
808              ++frame, ++i) {
809                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
810                                                   &frame->bh);
811                 do_corr(schedule());
812
813                 iam_lock_bh(frame->bh);
814                 /*
815                  * node must be initialized under bh lock because concurrent
816                  * creation procedure may change it and iam_lookup_try() will
817                  * see obsolete tree height. -bzzz
818                  */
819                 if (err != 0)
820                         break;
821
822                 if (LDISKFS_INVARIANT_ON) {
823                         err = param->id_ops->id_node_check(path, frame);
824                         if (err != 0)
825                                 break;
826                 }
827
828                 err = param->id_ops->id_node_load(path, frame);
829                 if (err != 0)
830                         break;
831
832                 assert_inv(dx_node_check(path, frame));
833                 /*
834                  * splitting may change root index block and move hash we're
835                  * looking for into another index block so, we have to check
836                  * this situation and repeat from begining if path got changed
837                  * -bzzz
838                  */
839                 if (i > 0) {
840                         err = iam_check_path(path, frame - 1);
841                         if (err != 0)
842                                 break;
843                 }
844
845                 frame->at = iam_find_position(path, frame);
846                 frame->curidx = ptr;
847                 frame->leaf = ptr = dx_get_block(path, frame->at);
848
849                 iam_unlock_bh(frame->bh);
850                 do_corr(schedule());
851         }
852         if (err != 0)
853                 iam_unlock_bh(frame->bh);
854         path->ip_frame = --frame;
855         return err;
856 }
857
858 static int __iam_path_lookup(struct iam_path *path)
859 {
860         int err;
861         int i;
862
863         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
864                 assert(path->ip_frames[i].bh == NULL);
865
866         do {
867                 err = iam_lookup_try(path);
868                 do_corr(schedule());
869                 if (err != 0)
870                         iam_path_fini(path);
871         } while (err == -EAGAIN);
872
873         return err;
874 }
875
876 /*
877  * returns 0 if path was unchanged, -EAGAIN otherwise.
878  */
879 static int iam_check_full_path(struct iam_path *path, int search)
880 {
881         struct iam_frame *bottom;
882         struct iam_frame *scan;
883         int i;
884         int result;
885
886         do_corr(schedule());
887
888         for (bottom = path->ip_frames, i = 0;
889              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
890                 ; /* find last filled in frame */
891         }
892
893         /*
894          * Lock frames, bottom to top.
895          */
896         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
897                 iam_lock_bh(scan->bh);
898         /*
899          * Check them top to bottom.
900          */
901         result = 0;
902         for (scan = path->ip_frames; scan < bottom; ++scan) {
903                 struct iam_entry *pos;
904
905                 if (search) {
906                         if (iam_check_fast(path, scan) == 0)
907                                 continue;
908
909                         pos = iam_find_position(path, scan);
910                         if (scan->leaf != dx_get_block(path, pos)) {
911                                 result = -EAGAIN;
912                                 break;
913                         }
914                         scan->at = pos;
915                 } else {
916                         pos = iam_entry_shift(path, scan->entries,
917                                               dx_get_count(scan->entries) - 1);
918                         if (scan->at > pos ||
919                             scan->leaf != dx_get_block(path, scan->at)) {
920                                 result = -EAGAIN;
921                                 break;
922                         }
923                 }
924         }
925
926         /*
927          * Unlock top to bottom.
928          */
929         for (scan = path->ip_frames; scan < bottom; ++scan)
930                 iam_unlock_bh(scan->bh);
931         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
932         do_corr(schedule());
933
934         return result;
935 }
936
937
938 /*
939  * Performs path lookup and returns with found leaf (if any) locked by htree
940  * lock.
941  */
942 int iam_lookup_lock(struct iam_path *path,
943                    struct dynlock_handle **dl, enum dynlock_type lt)
944 {
945         int result;
946         struct inode *dir;
947
948         dir = iam_path_obj(path);
949         while ((result = __iam_path_lookup(path)) == 0) {
950                 do_corr(schedule());
951                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
952                                      lt);
953                 if (*dl == NULL) {
954                         iam_path_fini(path);
955                         result = -ENOMEM;
956                         break;
957                 }
958                 do_corr(schedule());
959                 /*
960                  * while locking leaf we just found may get split so we need
961                  * to check this -bzzz
962                  */
963                 if (iam_check_full_path(path, 1) == 0)
964                         break;
965                 iam_unlock_htree(path->ip_container, *dl);
966                 *dl = NULL;
967                 iam_path_fini(path);
968         }
969         return result;
970 }
971 /*
972  * Performs tree top-to-bottom traversal starting from root, and loads leaf
973  * node.
974  */
975 static int iam_path_lookup(struct iam_path *path, int index)
976 {
977         struct iam_container *c;
978         struct iam_descr *descr;
979         struct iam_leaf  *leaf;
980         int result;
981
982         c = path->ip_container;
983         leaf = &path->ip_leaf;
984         descr = iam_path_descr(path);
985         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
986         assert_inv(iam_path_check(path));
987         do_corr(schedule());
988         if (result == 0) {
989                 result = iam_leaf_load(path);
990                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
991                 if (result == 0) {
992                         do_corr(schedule());
993                         if (index)
994                                 result = iam_leaf_ops(leaf)->
995                                         ilookup(leaf, path->ip_ikey_target);
996                         else
997                                 result = iam_leaf_ops(leaf)->
998                                         lookup(leaf, path->ip_key_target);
999                         do_corr(schedule());
1000                 }
1001                 if (result < 0)
1002                         iam_leaf_unlock(leaf);
1003         }
1004         return result;
1005 }
1006
1007 /*
1008  * Common part of iam_it_{i,}get().
1009  */
1010 static int __iam_it_get(struct iam_iterator *it, int index)
1011 {
1012         int result;
1013         assert_corr(it_state(it) == IAM_IT_DETACHED);
1014
1015         result = iam_path_lookup(&it->ii_path, index);
1016         if (result >= 0) {
1017                 int collision;
1018
1019                 collision = result & IAM_LOOKUP_LAST;
1020                 switch (result & ~IAM_LOOKUP_LAST) {
1021                 case IAM_LOOKUP_EXACT:
1022                         result = +1;
1023                         it->ii_state = IAM_IT_ATTACHED;
1024                         break;
1025                 case IAM_LOOKUP_OK:
1026                         result = 0;
1027                         it->ii_state = IAM_IT_ATTACHED;
1028                         break;
1029                 case IAM_LOOKUP_BEFORE:
1030                 case IAM_LOOKUP_EMPTY:
1031                         result = 0;
1032                         it->ii_state = IAM_IT_SKEWED;
1033                         break;
1034                 default:
1035                         assert(0);
1036                 }
1037                 result |= collision;
1038         }
1039         /*
1040          * See iam_it_get_exact() for explanation.
1041          */
1042         assert_corr(result != -ENOENT);
1043         return result;
1044 }
1045
1046 /*
1047  * Correct hash, but not the same key was found, iterate through hash
1048  * collision chain, looking for correct record.
1049  */
1050 static int iam_it_collision(struct iam_iterator *it)
1051 {
1052         int result;
1053
1054         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1055
1056         while ((result = iam_it_next(it)) == 0) {
1057                 do_corr(schedule());
1058                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1059                         return -ENOENT;
1060                 if (it_keyeq(it, it->ii_path.ip_key_target))
1061                         return 0;
1062         }
1063         return result;
1064 }
1065
1066 /*
1067  * Attach iterator. After successful completion, @it points to record with
1068  * least key not larger than @k.
1069  *
1070  * Return value: 0: positioned on existing record,
1071  *             +ve: exact position found,
1072  *             -ve: error.
1073  *
1074  * precondition:  it_state(it) == IAM_IT_DETACHED
1075  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1076  *                     it_keycmp(it, k) <= 0)
1077  */
1078 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1079 {
1080         int result;
1081         assert_corr(it_state(it) == IAM_IT_DETACHED);
1082
1083         it->ii_path.ip_ikey_target = NULL;
1084         it->ii_path.ip_key_target  = k;
1085
1086         result = __iam_it_get(it, 0);
1087
1088         if (result == IAM_LOOKUP_LAST) {
1089                 result = iam_it_collision(it);
1090                 if (result != 0) {
1091                         iam_it_put(it);
1092                         iam_it_fini(it);
1093                         result = __iam_it_get(it, 0);
1094                 } else
1095                         result = +1;
1096         }
1097         if (result > 0)
1098                 result &= ~IAM_LOOKUP_LAST;
1099
1100         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1101         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1102                          it_keycmp(it, k) <= 0));
1103         return result;
1104 }
1105 EXPORT_SYMBOL(iam_it_get);
1106
1107 /*
1108  * Attach iterator by index key.
1109  */
1110 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1111 {
1112         assert_corr(it_state(it) == IAM_IT_DETACHED);
1113
1114         it->ii_path.ip_ikey_target = k;
1115         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1116 }
1117
1118 /*
1119  * Attach iterator, and assure it points to the record (not skewed).
1120  *
1121  * Return value: 0: positioned on existing record,
1122  *             +ve: exact position found,
1123  *             -ve: error.
1124  *
1125  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1126  *                !(it->ii_flags&IAM_IT_WRITE)
1127  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1128  */
1129 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1130 {
1131         int result;
1132         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1133                     !(it->ii_flags&IAM_IT_WRITE));
1134         result = iam_it_get(it, k);
1135         if (result == 0) {
1136                 if (it_state(it) != IAM_IT_ATTACHED) {
1137                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1138                         result = iam_it_next(it);
1139                 }
1140         }
1141         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1142         return result;
1143 }
1144 EXPORT_SYMBOL(iam_it_get_at);
1145
1146 /*
1147  * Duplicates iterator.
1148  *
1149  * postcondition: it_state(dst) == it_state(src) &&
1150  *                iam_it_container(dst) == iam_it_container(src) &&
1151  *                dst->ii_flags = src->ii_flags &&
1152  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1153  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1154  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1155  */
1156 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1157 {
1158         dst->ii_flags     = src->ii_flags;
1159         dst->ii_state     = src->ii_state;
1160         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1161         /*
1162          * XXX: duplicate lock.
1163          */
1164         assert_corr(it_state(dst) == it_state(src));
1165         assert_corr(iam_it_container(dst) == iam_it_container(src));
1166         assert_corr(dst->ii_flags = src->ii_flags);
1167         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1168                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1169                     iam_it_key_get(dst) == iam_it_key_get(src)));
1170
1171 }
1172
1173 /*
1174  * Detach iterator. Does nothing it detached state.
1175  *
1176  * postcondition: it_state(it) == IAM_IT_DETACHED
1177  */
1178 void iam_it_put(struct iam_iterator *it)
1179 {
1180         if (it->ii_state != IAM_IT_DETACHED) {
1181                 it->ii_state = IAM_IT_DETACHED;
1182                 iam_leaf_fini(&it->ii_path.ip_leaf);
1183         }
1184 }
1185 EXPORT_SYMBOL(iam_it_put);
1186
1187 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1188                                         struct iam_ikey *ikey);
1189
1190
1191 /*
1192  * This function increments the frame pointer to search the next leaf
1193  * block, and reads in the necessary intervening nodes if the search
1194  * should be necessary.  Whether or not the search is necessary is
1195  * controlled by the hash parameter.  If the hash value is even, then
1196  * the search is only continued if the next block starts with that
1197  * hash value.  This is used if we are searching for a specific file.
1198  *
1199  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1200  *
1201  * This function returns 1 if the caller should continue to search,
1202  * or 0 if it should not.  If there is an error reading one of the
1203  * index blocks, it will a negative error code.
1204  *
1205  * If start_hash is non-null, it will be filled in with the starting
1206  * hash of the next page.
1207  */
1208 static int iam_htree_advance(struct inode *dir, __u32 hash,
1209                               struct iam_path *path, __u32 *start_hash,
1210                               int compat)
1211 {
1212         struct iam_frame *p;
1213         struct buffer_head *bh;
1214         int err, num_frames = 0;
1215         __u32 bhash;
1216
1217         p = path->ip_frame;
1218         /*
1219          * Find the next leaf page by incrementing the frame pointer.
1220          * If we run out of entries in the interior node, loop around and
1221          * increment pointer in the parent node.  When we break out of
1222          * this loop, num_frames indicates the number of interior
1223          * nodes need to be read.
1224          */
1225         while (1) {
1226                 do_corr(schedule());
1227                 iam_lock_bh(p->bh);
1228                 p->at = iam_entry_shift(path, p->at, +1);
1229                 if (p->at < iam_entry_shift(path, p->entries,
1230                                             dx_get_count(p->entries))) {
1231                         p->leaf = dx_get_block(path, p->at);
1232                         iam_unlock_bh(p->bh);
1233                         break;
1234                 }
1235                 iam_unlock_bh(p->bh);
1236                 if (p == path->ip_frames)
1237                         return 0;
1238                 num_frames++;
1239                 --p;
1240         }
1241
1242         if (compat) {
1243                 /*
1244                  * Htree hash magic.
1245                  */
1246         /*
1247          * If the hash is 1, then continue only if the next page has a
1248          * continuation hash of any value.  This is used for readdir
1249          * handling.  Otherwise, check to see if the hash matches the
1250          * desired contiuation hash.  If it doesn't, return since
1251          * there's no point to read in the successive index pages.
1252          */
1253                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1254         if (start_hash)
1255                 *start_hash = bhash;
1256         if ((hash & 1) == 0) {
1257                 if ((bhash & ~1) != hash)
1258                         return 0;
1259         }
1260         }
1261         /*
1262          * If the hash is HASH_NB_ALWAYS, we always go to the next
1263          * block so no check is necessary
1264          */
1265         while (num_frames--) {
1266                 iam_ptr_t idx;
1267
1268                 do_corr(schedule());
1269                 iam_lock_bh(p->bh);
1270                 idx = p->leaf = dx_get_block(path, p->at);
1271                 iam_unlock_bh(p->bh);
1272                 err = iam_path_descr(path)->id_ops->
1273                         id_node_read(path->ip_container, idx, NULL, &bh);
1274                 if (err != 0)
1275                         return err; /* Failure */
1276                 ++p;
1277                 brelse(p->bh);
1278                 assert_corr(p->bh != bh);
1279                 p->bh = bh;
1280                 p->entries = dx_node_get_entries(path, p);
1281                 p->at = iam_entry_shift(path, p->entries, !compat);
1282                 assert_corr(p->curidx != idx);
1283                 p->curidx = idx;
1284                 iam_lock_bh(p->bh);
1285                 assert_corr(p->leaf != dx_get_block(path, p->at));
1286                 p->leaf = dx_get_block(path, p->at);
1287                 iam_unlock_bh(p->bh);
1288                 assert_inv(dx_node_check(path, p));
1289         }
1290         return 1;
1291 }
1292
1293
1294 static inline int iam_index_advance(struct iam_path *path)
1295 {
1296         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1297 }
1298
1299 static void iam_unlock_array(struct iam_container *ic,
1300                              struct dynlock_handle **lh)
1301 {
1302         int i;
1303
1304         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1305                 if (*lh != NULL) {
1306                         iam_unlock_htree(ic, *lh);
1307                         *lh = NULL;
1308                 }
1309         }
1310 }
1311 /*
1312  * Advance index part of @path to point to the next leaf. Returns 1 on
1313  * success, 0, when end of container was reached. Leaf node is locked.
1314  */
1315 int iam_index_next(struct iam_container *c, struct iam_path *path)
1316 {
1317         iam_ptr_t cursor;
1318         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1319         int result;
1320         struct inode *object;
1321
1322         /*
1323          * Locking for iam_index_next()... is to be described.
1324          */
1325
1326         object = c->ic_object;
1327         cursor = path->ip_frame->leaf;
1328
1329         while (1) {
1330                 result = iam_index_lock(path, lh);
1331                 do_corr(schedule());
1332                 if (result < 0)
1333                         break;
1334
1335                 result = iam_check_full_path(path, 0);
1336                 if (result == 0 && cursor == path->ip_frame->leaf) {
1337                         result = iam_index_advance(path);
1338
1339                         assert_corr(result == 0 ||
1340                                     cursor != path->ip_frame->leaf);
1341                         break;
1342                 }
1343                 do {
1344                         iam_unlock_array(c, lh);
1345
1346                         iam_path_release(path);
1347                         do_corr(schedule());
1348
1349                         result = __iam_path_lookup(path);
1350                         if (result < 0)
1351                                 break;
1352
1353                         while (path->ip_frame->leaf != cursor) {
1354                                 do_corr(schedule());
1355
1356                                 result = iam_index_lock(path, lh);
1357                                 do_corr(schedule());
1358                                 if (result < 0)
1359                                         break;
1360
1361                                 result = iam_check_full_path(path, 0);
1362                                 if (result != 0)
1363                                         break;
1364
1365                                 result = iam_index_advance(path);
1366                                 if (result == 0) {
1367                                         CERROR("cannot find cursor : %u\n",
1368                                                 cursor);
1369                                         result = -EIO;
1370                                 }
1371                                 if (result < 0)
1372                                         break;
1373                                 result = iam_check_full_path(path, 0);
1374                                 if (result != 0)
1375                                         break;
1376                                 iam_unlock_array(c, lh);
1377                         }
1378                 } while (result == -EAGAIN);
1379                 if (result < 0)
1380                         break;
1381         }
1382         iam_unlock_array(c, lh);
1383         return result;
1384 }
1385
1386 /*
1387  * Move iterator one record right.
1388  *
1389  * Return value: 0: success,
1390  *              +1: end of container reached
1391  *             -ve: error
1392  *
1393  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1394  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1395  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1396  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1397  */
1398 int iam_it_next(struct iam_iterator *it)
1399 {
1400         int result;
1401         struct iam_path      *path;
1402         struct iam_leaf      *leaf;
1403         struct inode         *obj;
1404         do_corr(struct iam_ikey *ik_orig);
1405
1406         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1407         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1408                     it_state(it) == IAM_IT_SKEWED);
1409
1410         path = &it->ii_path;
1411         leaf = &path->ip_leaf;
1412         obj  = iam_path_obj(path);
1413
1414         assert_corr(iam_leaf_is_locked(leaf));
1415
1416         result = 0;
1417         do_corr(ik_orig = it_at_rec(it) ?
1418                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1419         if (it_before(it)) {
1420                 assert_corr(!iam_leaf_at_end(leaf));
1421                 it->ii_state = IAM_IT_ATTACHED;
1422         } else {
1423                 if (!iam_leaf_at_end(leaf))
1424                         /* advance within leaf node */
1425                         iam_leaf_next(leaf);
1426                 /*
1427                  * multiple iterations may be necessary due to empty leaves.
1428                  */
1429                 while (result == 0 && iam_leaf_at_end(leaf)) {
1430                         do_corr(schedule());
1431                         /* advance index portion of the path */
1432                         result = iam_index_next(iam_it_container(it), path);
1433                         assert_corr(iam_leaf_is_locked(leaf));
1434                         if (result == 1) {
1435                                 struct dynlock_handle *lh;
1436                                 lh = iam_lock_htree(iam_it_container(it),
1437                                                     path->ip_frame->leaf,
1438                                                     DLT_WRITE);
1439                                 if (lh != NULL) {
1440                                         iam_leaf_fini(leaf);
1441                                         leaf->il_lock = lh;
1442                                         result = iam_leaf_load(path);
1443                                         if (result == 0)
1444                                                 iam_leaf_start(leaf);
1445                                 } else
1446                                         result = -ENOMEM;
1447                         } else if (result == 0)
1448                                 /* end of container reached */
1449                                 result = +1;
1450                         if (result != 0)
1451                                 iam_it_put(it);
1452                 }
1453                 if (result == 0)
1454                         it->ii_state = IAM_IT_ATTACHED;
1455         }
1456         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1457         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1458         assert_corr(ergo(result == 0 && ik_orig != NULL,
1459                          it_ikeycmp(it, ik_orig) >= 0));
1460         return result;
1461 }
1462 EXPORT_SYMBOL(iam_it_next);
1463
1464 /*
1465  * Return pointer to the record under iterator.
1466  *
1467  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1468  * postcondition: it_state(it) == IAM_IT_ATTACHED
1469  */
1470 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1471 {
1472         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1473         assert_corr(it_at_rec(it));
1474         return iam_leaf_rec(&it->ii_path.ip_leaf);
1475 }
1476 EXPORT_SYMBOL(iam_it_rec_get);
1477
1478 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1479 {
1480         struct iam_leaf *folio;
1481
1482         folio = &it->ii_path.ip_leaf;
1483         iam_leaf_ops(folio)->rec_set(folio, r);
1484 }
1485
1486 /*
1487  * Replace contents of record under iterator.
1488  *
1489  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1490  *                it->ii_flags&IAM_IT_WRITE
1491  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1492  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1493  */
1494 int iam_it_rec_set(handle_t *h,
1495                    struct iam_iterator *it, const struct iam_rec *r)
1496 {
1497         int result;
1498         struct iam_path *path;
1499         struct buffer_head *bh;
1500
1501         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1502                     it->ii_flags&IAM_IT_WRITE);
1503         assert_corr(it_at_rec(it));
1504
1505         path = &it->ii_path;
1506         bh   = path->ip_leaf.il_bh;
1507         result = iam_txn_add(h, path, bh);
1508         if (result == 0) {
1509                 iam_it_reccpy(it, r);
1510                 result = iam_txn_dirty(h, path, bh);
1511         }
1512         return result;
1513 }
1514 EXPORT_SYMBOL(iam_it_rec_set);
1515
1516 /*
1517  * Return pointer to the index key under iterator.
1518  *
1519  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1520  *                it_state(it) == IAM_IT_SKEWED
1521  */
1522 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1523                                         struct iam_ikey *ikey)
1524 {
1525         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1526                     it_state(it) == IAM_IT_SKEWED);
1527         assert_corr(it_at_rec(it));
1528         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1529 }
1530
1531 /*
1532  * Return pointer to the key under iterator.
1533  *
1534  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1535  *                it_state(it) == IAM_IT_SKEWED
1536  */
1537 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1538 {
1539         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1540                     it_state(it) == IAM_IT_SKEWED);
1541         assert_corr(it_at_rec(it));
1542         return iam_leaf_key(&it->ii_path.ip_leaf);
1543 }
1544 EXPORT_SYMBOL(iam_it_key_get);
1545
1546 /*
1547  * Return size of key under iterator (in bytes)
1548  *
1549  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1550  *                it_state(it) == IAM_IT_SKEWED
1551  */
1552 int iam_it_key_size(const struct iam_iterator *it)
1553 {
1554         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1555                     it_state(it) == IAM_IT_SKEWED);
1556         assert_corr(it_at_rec(it));
1557         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1558 }
1559 EXPORT_SYMBOL(iam_it_key_size);
1560
1561 /*
1562  * Insertion of new record. Interaction with jbd during non-trivial case (when
1563  * split happens) is as following:
1564  *
1565  *  - new leaf node is involved into transaction by ldiskfs_append();
1566  *
1567  *  - old leaf node is involved into transaction by iam_add_rec();
1568  *
1569  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1570  *
1571  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1572  *  iam_new_leaf();
1573  *
1574  *  - split index nodes are involved into transaction and marked dirty by
1575  *  split_index_node().
1576  *
1577  *  - "safe" index node, which is no split, but where new pointer is inserted
1578  *  is involved into transaction and marked dirty by split_index_node().
1579  *
1580  *  - index node where pointer to new leaf is inserted is involved into
1581  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1582  *
1583  *  - inode is marked dirty by iam_add_rec().
1584  *
1585  */
1586
1587 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1588 {
1589         int err;
1590         iam_ptr_t blknr;
1591         struct buffer_head   *new_leaf;
1592         struct buffer_head   *old_leaf;
1593         struct iam_container *c;
1594         struct inode         *obj;
1595         struct iam_path      *path;
1596
1597         assert_inv(iam_leaf_check(leaf));
1598
1599         c = iam_leaf_container(leaf);
1600         path = leaf->il_path;
1601
1602         obj = c->ic_object;
1603         new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1604         do_corr(schedule());
1605         if (new_leaf != NULL) {
1606                 struct dynlock_handle *lh;
1607
1608                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1609                 do_corr(schedule());
1610                 if (lh != NULL) {
1611                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1612                         do_corr(schedule());
1613                         old_leaf = leaf->il_bh;
1614                         iam_leaf_split(leaf, &new_leaf, blknr);
1615                         if (old_leaf != leaf->il_bh) {
1616                                 /*
1617                                  * Switched to the new leaf.
1618                                  */
1619                                 iam_leaf_unlock(leaf);
1620                                 leaf->il_lock = lh;
1621                                 path->ip_frame->leaf = blknr;
1622                         } else
1623                                 iam_unlock_htree(path->ip_container, lh);
1624                         do_corr(schedule());
1625                         err = iam_txn_dirty(handle, path, new_leaf);
1626                         brelse(new_leaf);
1627                         if (err == 0)
1628                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1629                         do_corr(schedule());
1630                 } else
1631                         err = -ENOMEM;
1632         }
1633         assert_inv(iam_leaf_check(leaf));
1634         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1635         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1636         return err;
1637 }
1638
1639 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1640 {
1641         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1642 }
1643
1644 static int iam_shift_entries(struct iam_path *path,
1645                          struct iam_frame *frame, unsigned count,
1646                          struct iam_entry *entries, struct iam_entry *entries2,
1647                          u32 newblock)
1648 {
1649         unsigned count1;
1650         unsigned count2;
1651         int delta;
1652
1653         struct iam_frame *parent = frame - 1;
1654         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1655
1656         delta = dx_index_is_compat(path) ? 0 : +1;
1657
1658         count1 = count/2 + delta;
1659         count2 = count - count1;
1660         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1661
1662         dxtrace(printk("Split index %d/%d\n", count1, count2));
1663
1664         memcpy((char *) iam_entry_shift(path, entries2, delta),
1665                (char *) iam_entry_shift(path, entries, count1),
1666                count2 * iam_entry_size(path));
1667
1668         dx_set_count(entries2, count2 + delta);
1669         dx_set_limit(entries2, dx_node_limit(path));
1670
1671         /*
1672          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1673          * level index in root index, then we insert new index here and set
1674          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1675          * index w/o hash it looks for. the solution is to check root index
1676          * after we locked just founded 2nd level index -bzzz
1677          */
1678         iam_insert_key_lock(path, parent, pivot, newblock);
1679
1680         /*
1681          * now old and new 2nd level index blocks contain all pointers, so
1682          * dx_probe() may find it in the both.  it's OK -bzzz
1683          */
1684         iam_lock_bh(frame->bh);
1685         dx_set_count(entries, count1);
1686         iam_unlock_bh(frame->bh);
1687
1688         /*
1689          * now old 2nd level index block points to first half of leafs. it's
1690          * importand that dx_probe() must check root index block for changes
1691          * under dx_lock_bh(frame->bh) -bzzz
1692          */
1693
1694         return count1;
1695 }
1696
1697
1698 int split_index_node(handle_t *handle, struct iam_path *path,
1699                      struct dynlock_handle **lh)
1700 {
1701
1702         struct iam_entry *entries;   /* old block contents */
1703         struct iam_entry *entries2;  /* new block contents */
1704          struct iam_frame *frame, *safe;
1705         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1706         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1707         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1708         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1709         struct inode *dir = iam_path_obj(path);
1710         struct iam_descr *descr;
1711         int nr_splet;
1712         int i, err;
1713
1714         descr = iam_path_descr(path);
1715         /*
1716          * Algorithm below depends on this.
1717          */
1718         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1719
1720         frame = path->ip_frame;
1721         entries = frame->entries;
1722
1723         /*
1724          * Tall-tree handling: we might have to split multiple index blocks
1725          * all the way up to tree root. Tricky point here is error handling:
1726          * to avoid complicated undo/rollback we
1727          *
1728          *   - first allocate all necessary blocks
1729          *
1730          *   - insert pointers into them atomically.
1731          */
1732
1733         /*
1734          * Locking: leaf is already locked. htree-locks are acquired on all
1735          * index nodes that require split bottom-to-top, on the "safe" node,
1736          * and on all new nodes
1737          */
1738
1739         dxtrace(printk("using %u of %u node entries\n",
1740                        dx_get_count(entries), dx_get_limit(entries)));
1741
1742         /* What levels need split? */
1743         for (nr_splet = 0; frame >= path->ip_frames &&
1744              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1745              --frame, ++nr_splet) {
1746                 do_corr(schedule());
1747                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1748                         /*
1749                         CWARN(dir->i_sb, __FUNCTION__,
1750                                      "Directory index full!\n");
1751                                      */
1752                         err = -ENOSPC;
1753                         goto cleanup;
1754                 }
1755         }
1756
1757         safe = frame;
1758
1759         /*
1760          * Lock all nodes, bottom to top.
1761          */
1762         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1763                 do_corr(schedule());
1764                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1765                                          DLT_WRITE);
1766                 if (lock[i] == NULL) {
1767                         err = -ENOMEM;
1768                         goto cleanup;
1769                 }
1770         }
1771
1772         /*
1773          * Check for concurrent index modification.
1774          */
1775         err = iam_check_full_path(path, 1);
1776         if (err)
1777                 goto cleanup;
1778         /*
1779          * And check that the same number of nodes is to be split.
1780          */
1781         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1782              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1783              --frame, ++i) {
1784                 ;
1785         }
1786         if (i != nr_splet) {
1787                 err = -EAGAIN;
1788                 goto cleanup;
1789         }
1790
1791         /* Go back down, allocating blocks, locking them, and adding into
1792          * transaction... */
1793         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1794                 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1795                 do_corr(schedule());
1796                 if (!bh_new[i] ||
1797                     descr->id_ops->id_node_init(path->ip_container,
1798                                                 bh_new[i], 0) != 0)
1799                         goto cleanup;
1800                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1801                                              DLT_WRITE);
1802                 if (new_lock[i] == NULL) {
1803                         err = -ENOMEM;
1804                         goto cleanup;
1805                 }
1806                 do_corr(schedule());
1807                 BUFFER_TRACE(frame->bh, "get_write_access");
1808                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1809                 if (err)
1810                         goto journal_error;
1811         }
1812         /* Add "safe" node to transaction too */
1813         if (safe + 1 != path->ip_frames) {
1814                 do_corr(schedule());
1815                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1816                 if (err)
1817                         goto journal_error;
1818         }
1819
1820         /* Go through nodes once more, inserting pointers */
1821         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1822                 unsigned count;
1823                 int idx;
1824                 struct buffer_head *bh2;
1825                 struct buffer_head *bh;
1826
1827                 entries = frame->entries;
1828                 count = dx_get_count(entries);
1829                 idx = iam_entry_diff(path, frame->at, entries);
1830
1831                 bh2 = bh_new[i];
1832                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1833
1834                 bh = frame->bh;
1835                 if (frame == path->ip_frames) {
1836                         /* splitting root node. Tricky point:
1837                          *
1838                          * In the "normal" B-tree we'd split root *and* add
1839                          * new root to the tree with pointers to the old root
1840                          * and its sibling (thus introducing two new nodes).
1841                          *
1842                          * In htree it's enough to add one node, because
1843                          * capacity of the root node is smaller than that of
1844                          * non-root one.
1845                          */
1846                         struct iam_frame *frames;
1847                         struct iam_entry *next;
1848
1849                         assert_corr(i == 0);
1850
1851                         do_corr(schedule());
1852
1853                         frames = path->ip_frames;
1854                         memcpy((char *) entries2, (char *) entries,
1855                                count * iam_entry_size(path));
1856                         dx_set_limit(entries2, dx_node_limit(path));
1857
1858                         /* Set up root */
1859                           iam_lock_bh(frame->bh);
1860                         next = descr->id_ops->id_root_inc(path->ip_container,
1861                                                           path, frame);
1862                         dx_set_block(path, next, newblock[0]);
1863                           iam_unlock_bh(frame->bh);
1864
1865                         do_corr(schedule());
1866                         /* Shift frames in the path */
1867                         memmove(frames + 2, frames + 1,
1868                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1869                         /* Add new access path frame */
1870                         frames[1].at = iam_entry_shift(path, entries2, idx);
1871                         frames[1].entries = entries = entries2;
1872                         frames[1].bh = bh2;
1873                         assert_inv(dx_node_check(path, frame));
1874                         ++ path->ip_frame;
1875                         ++ frame;
1876                         assert_inv(dx_node_check(path, frame));
1877                         bh_new[0] = NULL; /* buffer head is "consumed" */
1878                         err = ldiskfs_journal_get_write_access(handle, bh2);
1879                         if (err)
1880                                 goto journal_error;
1881                         do_corr(schedule());
1882                 } else {
1883                         /* splitting non-root index node. */
1884                         struct iam_frame *parent = frame - 1;
1885
1886                         do_corr(schedule());
1887                         count = iam_shift_entries(path, frame, count,
1888                                               entries, entries2, newblock[i]);
1889                         /* Which index block gets the new entry? */
1890                         if (idx >= count) {
1891                                 int d = dx_index_is_compat(path) ? 0 : +1;
1892
1893                                 frame->at = iam_entry_shift(path, entries2,
1894                                                             idx - count + d);
1895                                 frame->entries = entries = entries2;
1896                                 frame->curidx = newblock[i];
1897                                 swap(frame->bh, bh2);
1898                                 assert_corr(lock[i + 1] != NULL);
1899                                 assert_corr(new_lock[i] != NULL);
1900                                 swap(lock[i + 1], new_lock[i]);
1901                                 bh_new[i] = bh2;
1902                                 parent->at = iam_entry_shift(path,
1903                                                              parent->at, +1);
1904                         }
1905                         assert_inv(dx_node_check(path, frame));
1906                         assert_inv(dx_node_check(path, parent));
1907                         dxtrace(dx_show_index ("node", frame->entries));
1908                         dxtrace(dx_show_index ("node",
1909                                ((struct dx_node *) bh2->b_data)->entries));
1910                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
1911                         if (err)
1912                                 goto journal_error;
1913                         do_corr(schedule());
1914                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1915                         if (err)
1916                                 goto journal_error;
1917                 }
1918                 do_corr(schedule());
1919                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1920                 if (err)
1921                         goto journal_error;
1922         }
1923                 /*
1924                  * This function was called to make insertion of new leaf
1925                  * possible. Check that it fulfilled its obligations.
1926                  */
1927                 assert_corr(dx_get_count(path->ip_frame->entries) <
1928                             dx_get_limit(path->ip_frame->entries));
1929         assert_corr(lock[nr_splet] != NULL);
1930         *lh = lock[nr_splet];
1931         lock[nr_splet] = NULL;
1932         if (nr_splet > 0) {
1933                 /*
1934                  * Log ->i_size modification.
1935                  */
1936                 err = ldiskfs_mark_inode_dirty(handle, dir);
1937                 if (err)
1938                         goto journal_error;
1939         }
1940         goto cleanup;
1941 journal_error:
1942         ldiskfs_std_error(dir->i_sb, err);
1943
1944 cleanup:
1945         iam_unlock_array(path->ip_container, lock);
1946         iam_unlock_array(path->ip_container, new_lock);
1947
1948         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1949
1950         do_corr(schedule());
1951         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1952                 if (bh_new[i] != NULL)
1953                         brelse(bh_new[i]);
1954         }
1955         return err;
1956 }
1957
1958 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1959                        struct iam_path *path,
1960                        const struct iam_key *k, const struct iam_rec *r)
1961 {
1962         int err;
1963         struct iam_leaf *leaf;
1964
1965         leaf = &path->ip_leaf;
1966         assert_inv(iam_leaf_check(leaf));
1967         assert_inv(iam_path_check(path));
1968         err = iam_txn_add(handle, path, leaf->il_bh);
1969         if (err == 0) {
1970                 do_corr(schedule());
1971                 if (!iam_leaf_can_add(leaf, k, r)) {
1972                         struct dynlock_handle *lh = NULL;
1973
1974                         do {
1975                                 assert_corr(lh == NULL);
1976                                 do_corr(schedule());
1977                                 err = split_index_node(handle, path, &lh);
1978                                 if (err == -EAGAIN) {
1979                                         assert_corr(lh == NULL);
1980
1981                                         iam_path_fini(path);
1982                                         it->ii_state = IAM_IT_DETACHED;
1983
1984                                         do_corr(schedule());
1985                                         err = iam_it_get_exact(it, k);
1986                                         if (err == -ENOENT)
1987                                                 err = +1; /* repeat split */
1988                                         else if (err == 0)
1989                                                 err = -EEXIST;
1990                                 }
1991                         } while (err > 0);
1992                         assert_inv(iam_path_check(path));
1993                         if (err == 0) {
1994                                 assert_corr(lh != NULL);
1995                                 do_corr(schedule());
1996                                 err = iam_new_leaf(handle, leaf);
1997                                 if (err == 0)
1998                                         err = iam_txn_dirty(handle, path,
1999                                                             path->ip_frame->bh);
2000                         }
2001                         iam_unlock_htree(path->ip_container, lh);
2002                         do_corr(schedule());
2003                 }
2004                 if (err == 0) {
2005                         iam_leaf_rec_add(leaf, k, r);
2006                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2007                 }
2008         }
2009         assert_inv(iam_leaf_check(leaf));
2010         assert_inv(iam_leaf_check(&path->ip_leaf));
2011         assert_inv(iam_path_check(path));
2012         return err;
2013 }
2014
2015 /*
2016  * Insert new record with key @k and contents from @r, shifting records to the
2017  * right. On success, iterator is positioned on the newly inserted record.
2018  *
2019  * precondition: it->ii_flags&IAM_IT_WRITE &&
2020  *               (it_state(it) == IAM_IT_ATTACHED ||
2021  *                it_state(it) == IAM_IT_SKEWED) &&
2022  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2023  *                    it_keycmp(it, k) <= 0) &&
2024  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2025  * postcondition: ergo(result == 0,
2026  *                     it_state(it) == IAM_IT_ATTACHED &&
2027  *                     it_keycmp(it, k) == 0 &&
2028  *                     !memcmp(iam_it_rec_get(it), r, ...))
2029  */
2030 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2031                       const struct iam_key *k, const struct iam_rec *r)
2032 {
2033         int result;
2034         struct iam_path *path;
2035
2036         path = &it->ii_path;
2037
2038         assert_corr(it->ii_flags&IAM_IT_WRITE);
2039         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2040                     it_state(it) == IAM_IT_SKEWED);
2041         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2042                          it_keycmp(it, k) <= 0));
2043         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2044         result = iam_add_rec(h, it, path, k, r);
2045         if (result == 0)
2046                 it->ii_state = IAM_IT_ATTACHED;
2047         assert_corr(ergo(result == 0,
2048                          it_state(it) == IAM_IT_ATTACHED &&
2049                          it_keycmp(it, k) == 0));
2050         return result;
2051 }
2052 EXPORT_SYMBOL(iam_it_rec_insert);
2053
2054 /*
2055  * Delete record under iterator.
2056  *
2057  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2058  *                it->ii_flags&IAM_IT_WRITE &&
2059  *                it_at_rec(it)
2060  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2061  *                it_state(it) == IAM_IT_DETACHED
2062  */
2063 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2064 {
2065         int result;
2066         struct iam_leaf *leaf;
2067         struct iam_path *path;
2068
2069         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2070                     it->ii_flags&IAM_IT_WRITE);
2071         assert_corr(it_at_rec(it));
2072
2073         path = &it->ii_path;
2074         leaf = &path->ip_leaf;
2075
2076         assert_inv(iam_leaf_check(leaf));
2077         assert_inv(iam_path_check(path));
2078
2079         result = iam_txn_add(h, path, leaf->il_bh);
2080         /*
2081          * no compaction for now.
2082          */
2083         if (result == 0) {
2084                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2085                 result = iam_txn_dirty(h, path, leaf->il_bh);
2086                 if (result == 0 && iam_leaf_at_end(leaf) &&
2087                     it->ii_flags&IAM_IT_MOVE) {
2088                         result = iam_it_next(it);
2089                         if (result > 0)
2090                                 result = 0;
2091                 }
2092         }
2093         assert_inv(iam_leaf_check(leaf));
2094         assert_inv(iam_path_check(path));
2095         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2096                     it_state(it) == IAM_IT_DETACHED);
2097         return result;
2098 }
2099 EXPORT_SYMBOL(iam_it_rec_delete);
2100
2101 /*
2102  * Convert iterator to cookie.
2103  *
2104  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2105  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2106  * postcondition: it_state(it) == IAM_IT_ATTACHED
2107  */
2108 iam_pos_t iam_it_store(const struct iam_iterator *it)
2109 {
2110         iam_pos_t result;
2111
2112         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2113         assert_corr(it_at_rec(it));
2114         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2115                     sizeof result);
2116
2117         result = 0;
2118         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2119 }
2120 EXPORT_SYMBOL(iam_it_store);
2121
2122 /*
2123  * Restore iterator from cookie.
2124  *
2125  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2126  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2127  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2128  *                                  iam_it_store(it) == pos)
2129  */
2130 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2131 {
2132         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2133                     it->ii_flags&IAM_IT_MOVE);
2134         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2135         return iam_it_iget(it, (struct iam_ikey *)&pos);
2136 }
2137 EXPORT_SYMBOL(iam_it_load);
2138
2139 /***********************************************************************/
2140 /* invariants                                                          */
2141 /***********************************************************************/
2142
2143 static inline int ptr_inside(void *base, size_t size, void *ptr)
2144 {
2145         return (base <= ptr) && (ptr < base + size);
2146 }
2147
2148 int iam_frame_invariant(struct iam_frame *f)
2149 {
2150         return
2151                 (f->bh != NULL &&
2152                 f->bh->b_data != NULL &&
2153                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2154                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2155                 f->entries <= f->at);
2156 }
2157 int iam_leaf_invariant(struct iam_leaf *l)
2158 {
2159         return
2160                 l->il_bh != NULL &&
2161                 l->il_bh->b_data != NULL &&
2162                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2163                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2164                 l->il_entries <= l->il_at;
2165 }
2166
2167 int iam_path_invariant(struct iam_path *p)
2168 {
2169         int i;
2170
2171         if (p->ip_container == NULL ||
2172             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2173             p->ip_frame != p->ip_frames + p->ip_indirect ||
2174             !iam_leaf_invariant(&p->ip_leaf))
2175                 return 0;
2176         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2177                 if (i <= p->ip_indirect) {
2178                         if (!iam_frame_invariant(&p->ip_frames[i]))
2179                                 return 0;
2180                 }
2181         }
2182         return 1;
2183 }
2184
2185 int iam_it_invariant(struct iam_iterator *it)
2186 {
2187         return
2188                 (it->ii_state == IAM_IT_DETACHED ||
2189                  it->ii_state == IAM_IT_ATTACHED ||
2190                  it->ii_state == IAM_IT_SKEWED) &&
2191                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2192                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2193                      it->ii_state == IAM_IT_SKEWED,
2194                      iam_path_invariant(&it->ii_path) &&
2195                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2196 }
2197
2198 /*
2199  * Search container @c for record with key @k. If record is found, its data
2200  * are moved into @r.
2201  *
2202  * Return values: 0: found, -ENOENT: not-found, -ve: error
2203  */
2204 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2205                struct iam_rec *r, struct iam_path_descr *pd)
2206 {
2207         struct iam_iterator it;
2208         int result;
2209
2210         iam_it_init(&it, c, 0, pd);
2211
2212         result = iam_it_get_exact(&it, k);
2213         if (result == 0)
2214                 /*
2215                  * record with required key found, copy it into user buffer
2216                  */
2217                 iam_reccpy(&it.ii_path.ip_leaf, r);
2218         iam_it_put(&it);
2219         iam_it_fini(&it);
2220         return result;
2221 }
2222 EXPORT_SYMBOL(iam_lookup);
2223
2224 /*
2225  * Insert new record @r with key @k into container @c (within context of
2226  * transaction @h).
2227  *
2228  * Return values: 0: success, -ve: error, including -EEXIST when record with
2229  * given key is already present.
2230  *
2231  * postcondition: ergo(result == 0 || result == -EEXIST,
2232  *                                  iam_lookup(c, k, r2) > 0;
2233  */
2234 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2235                const struct iam_rec *r, struct iam_path_descr *pd)
2236 {
2237         struct iam_iterator it;
2238         int result;
2239
2240         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2241
2242         result = iam_it_get_exact(&it, k);
2243         if (result == -ENOENT)
2244                 result = iam_it_rec_insert(h, &it, k, r);
2245         else if (result == 0)
2246                 result = -EEXIST;
2247         iam_it_put(&it);
2248         iam_it_fini(&it);
2249         return result;
2250 }
2251 EXPORT_SYMBOL(iam_insert);
2252
2253 /*
2254  * Update record with the key @k in container @c (within context of
2255  * transaction @h), new record is given by @r.
2256  *
2257  * Return values: +1: skip because of the same rec value, 0: success,
2258  * -ve: error, including -ENOENT if no record with the given key found.
2259  */
2260 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2261                const struct iam_rec *r, struct iam_path_descr *pd)
2262 {
2263         struct iam_iterator it;
2264         struct iam_leaf *folio;
2265         int result;
2266
2267         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2268
2269         result = iam_it_get_exact(&it, k);
2270         if (result == 0) {
2271                 folio = &it.ii_path.ip_leaf;
2272                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2273                 if (result == 0)
2274                         iam_it_rec_set(h, &it, r);
2275                 else
2276                         result = 1;
2277         }
2278         iam_it_put(&it);
2279         iam_it_fini(&it);
2280         return result;
2281 }
2282 EXPORT_SYMBOL(iam_update);
2283
2284 /*
2285  * Delete existing record with key @k.
2286  *
2287  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2288  *
2289  * postcondition: ergo(result == 0 || result == -ENOENT,
2290  *                                 !iam_lookup(c, k, *));
2291  */
2292 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2293                struct iam_path_descr *pd)
2294 {
2295         struct iam_iterator it;
2296         int result;
2297
2298         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2299
2300         result = iam_it_get_exact(&it, k);
2301         if (result == 0)
2302                 iam_it_rec_delete(h, &it);
2303         iam_it_put(&it);
2304         iam_it_fini(&it);
2305         return result;
2306 }
2307 EXPORT_SYMBOL(iam_delete);
2308
2309 int iam_root_limit(int rootgap, int blocksize, int size)
2310 {
2311         int limit;
2312         int nlimit;
2313
2314         limit = (blocksize - rootgap) / size;
2315         nlimit = blocksize / size;
2316         if (limit == nlimit)
2317                 limit--;
2318         return limit;
2319 }