Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see [sun.com URL with a
20  * copy of GPLv2].
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, 2012, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * iam.c
39  * Top-level entry points into iam module
40  *
41  * Author: Wang Di <wangdi@clusterfs.com>
42  * Author: Nikita Danilov <nikita@clusterfs.com>
43  */
44
45 /*
46  * iam: big theory statement.
47  *
48  * iam (Index Access Module) is a module providing abstraction of persistent
49  * transactional container on top of generalized ldiskfs htree.
50  *
51  * iam supports:
52  *
53  *     - key, pointer, and record size specifiable per container.
54  *
55  *     - trees taller than 2 index levels.
56  *
57  *     - read/write to existing ldiskfs htree directories as iam containers.
58  *
59  * iam container is a tree, consisting of leaf nodes containing keys and
60  * records stored in this container, and index nodes, containing keys and
61  * pointers to leaf or index nodes.
62  *
63  * iam does not work with keys directly, instead it calls user-supplied key
64  * comparison function (->dpo_keycmp()).
65  *
66  * Pointers are (currently) interpreted as logical offsets (measured in
67  * blocksful) within underlying flat file on top of which iam tree lives.
68  *
69  * On-disk format:
70  *
71  * iam mostly tries to reuse existing htree formats.
72  *
73  * Format of index node:
74  *
75  * +-----+-------+-------+-------+------+-------+------------+
76  * |     | count |       |       |      |       |            |
77  * | gap |   /   | entry | entry | .... | entry | free space |
78  * |     | limit |       |       |      |       |            |
79  * +-----+-------+-------+-------+------+-------+------------+
80  *
81  *       gap           this part of node is never accessed by iam code. It
82  *                     exists for binary compatibility with ldiskfs htree (that,
83  *                     in turn, stores fake struct ext2_dirent for ext2
84  *                     compatibility), and to keep some unspecified per-node
85  *                     data. Gap can be different for root and non-root index
86  *                     nodes. Gap size can be specified for each container
87  *                     (gap of 0 is allowed).
88  *
89  *       count/limit   current number of entries in this node, and the maximal
90  *                     number of entries that can fit into node. count/limit
91  *                     has the same size as entry, and is itself counted in
92  *                     count.
93  *
94  *       entry         index entry: consists of a key immediately followed by
95  *                     a pointer to a child node. Size of a key and size of a
96  *                     pointer depends on container. Entry has neither
97  *                     alignment nor padding.
98  *
99  *       free space    portion of node new entries are added to
100  *
101  * Entries in index node are sorted by their key value.
102  *
103  * Format of a leaf node is not specified. Generic iam code accesses leaf
104  * nodes through ->id_leaf methods in struct iam_descr.
105  *
106  */
107
108 #include <linux/module.h>
109 #include <linux/fs.h>
110 #include <linux/pagemap.h>
111 #include <linux/time.h>
112 #include <linux/fcntl.h>
113 #include <linux/stat.h>
114 #include <linux/string.h>
115 #include <linux/quotaops.h>
116 #include <linux/buffer_head.h>
117 #include <linux/smp_lock.h>
118 #include "osd_internal.h"
119
120 #include "xattr.h"
121 #include "acl.h"
122
123 /*
124  * List of all registered formats.
125  *
126  * No locking. Callers synchronize.
127  */
128 static CFS_LIST_HEAD(iam_formats);
129
130 void iam_format_register(struct iam_format *fmt)
131 {
132         cfs_list_add(&fmt->if_linkage, &iam_formats);
133 }
134 EXPORT_SYMBOL(iam_format_register);
135
136 /*
137  * Determine format of given container. This is done by scanning list of
138  * registered formats and calling ->if_guess() method of each in turn.
139  */
140 static int iam_format_guess(struct iam_container *c)
141 {
142         int result;
143         struct iam_format *fmt;
144
145         /*
146          * XXX temporary initialization hook.
147          */
148         {
149                 static int initialized = 0;
150
151                 if (!initialized) {
152                         iam_lvar_format_init();
153                         iam_lfix_format_init();
154                         initialized = 1;
155                 }
156         }
157
158         result = -ENOENT;
159         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
160                 result = fmt->if_guess(c);
161                 if (result == 0)
162                         break;
163         }
164         return result;
165 }
166
167 /*
168  * Initialize container @c.
169  */
170 int iam_container_init(struct iam_container *c,
171                        struct iam_descr *descr, struct inode *inode)
172 {
173         memset(c, 0, sizeof *c);
174         c->ic_descr  = descr;
175         c->ic_object = inode;
176         cfs_init_rwsem(&c->ic_sem);
177         return 0;
178 }
179 EXPORT_SYMBOL(iam_container_init);
180
181 /*
182  * Determine container format.
183  */
184 int iam_container_setup(struct iam_container *c)
185 {
186         return iam_format_guess(c);
187 }
188 EXPORT_SYMBOL(iam_container_setup);
189
190 /*
191  * Finalize container @c, release all resources.
192  */
193 void iam_container_fini(struct iam_container *c)
194 {
195         brelse(c->ic_root_bh);
196         c->ic_root_bh = NULL;
197 }
198 EXPORT_SYMBOL(iam_container_fini);
199
200 void iam_path_init(struct iam_path *path, struct iam_container *c,
201                    struct iam_path_descr *pd)
202 {
203         memset(path, 0, sizeof *path);
204         path->ip_container = c;
205         path->ip_frame = path->ip_frames;
206         path->ip_data = pd;
207         path->ip_leaf.il_path = path;
208 }
209
210 static void iam_leaf_fini(struct iam_leaf *leaf);
211
212 void iam_path_release(struct iam_path *path)
213 {
214         int i;
215
216         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
217                 if (path->ip_frames[i].bh != NULL) {
218                         brelse(path->ip_frames[i].bh);
219                         path->ip_frames[i].bh = NULL;
220                 }
221         }
222 }
223
224 void iam_path_fini(struct iam_path *path)
225 {
226         iam_leaf_fini(&path->ip_leaf);
227         iam_path_release(path);
228 }
229
230
231 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
232 {
233         int i;
234
235         path->ipc_hinfo = &path->ipc_hinfo_area;
236         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
237                 path->ipc_descr.ipd_key_scratch[i] =
238                         (struct iam_ikey *)&path->ipc_scratch[i];
239
240         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
241 }
242
243 void iam_path_compat_fini(struct iam_path_compat *path)
244 {
245         iam_path_fini(&path->ipc_path);
246 }
247
248 /*
249  * Helper function initializing iam_path_descr and its key scratch area.
250  */
251 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
252 {
253         struct iam_path_descr *ipd;
254         void *karea;
255         int i;
256
257         ipd = area;
258         karea = ipd + 1;
259         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
260                 ipd->ipd_key_scratch[i] = karea;
261         return ipd;
262 }
263 EXPORT_SYMBOL(iam_ipd_alloc);
264
265 void iam_ipd_free(struct iam_path_descr *ipd)
266 {
267 }
268 EXPORT_SYMBOL(iam_ipd_free);
269
270 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
271                   handle_t *h, struct buffer_head **bh)
272 {
273         int result = 0;
274
275         /* NB: it can be called by iam_lfix_guess() which is still at
276          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
277          * haven't been intialized yet.
278          * Also, we don't have this for IAM dir.
279          */
280         if (c->ic_root_bh != NULL &&
281             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
282                 get_bh(c->ic_root_bh);
283                 *bh = c->ic_root_bh;
284                 return 0;
285         }
286
287         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
288         if (*bh == NULL)
289                 result = -EIO;
290         return result;
291 }
292
293 /*
294  * Return pointer to current leaf record. Pointer is valid while corresponding
295  * leaf node is locked and pinned.
296  */
297 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
298 {
299         return iam_leaf_ops(leaf)->rec(leaf);
300 }
301
302 /*
303  * Return pointer to the current leaf key. This function returns pointer to
304  * the key stored in node.
305  *
306  * Caller should assume that returned pointer is only valid while leaf node is
307  * pinned and locked.
308  */
309 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
310 {
311         return iam_leaf_ops(leaf)->key(leaf);
312 }
313
314 static int iam_leaf_key_size(const struct iam_leaf *leaf)
315 {
316         return iam_leaf_ops(leaf)->key_size(leaf);
317 }
318
319 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
320                                       struct iam_ikey *key)
321 {
322         return iam_leaf_ops(leaf)->ikey(leaf, key);
323 }
324
325 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
326                            const struct iam_key *key)
327 {
328         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
329 }
330
331 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
332                           const struct iam_key *key)
333 {
334         return iam_leaf_ops(leaf)->key_eq(leaf, key);
335 }
336
337 #if LDISKFS_INVARIANT_ON
338 static int iam_leaf_check(struct iam_leaf *leaf);
339 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
340
341 static int iam_path_check(struct iam_path *p)
342 {
343         int i;
344         int result;
345         struct iam_frame *f;
346         struct iam_descr *param;
347
348         result = 1;
349         param = iam_path_descr(p);
350         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
351                 f = &p->ip_frames[i];
352                 if (f->bh != NULL) {
353                         result = dx_node_check(p, f);
354                         if (result)
355                                 result = !param->id_ops->id_node_check(p, f);
356                 }
357         }
358         if (result && p->ip_leaf.il_bh != NULL)
359                 result = iam_leaf_check(&p->ip_leaf);
360         if (result == 0) {
361                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
362         }
363         return result;
364 }
365 #endif
366
367 static int iam_leaf_load(struct iam_path *path)
368 {
369         iam_ptr_t block;
370         int err;
371         struct iam_container *c;
372         struct buffer_head   *bh;
373         struct iam_leaf      *leaf;
374         struct iam_descr     *descr;
375
376         c     = path->ip_container;
377         leaf  = &path->ip_leaf;
378         descr = iam_path_descr(path);
379         block = path->ip_frame->leaf;
380         if (block == 0) {
381                 /* XXX bug 11027 */
382                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
383                        (long unsigned)path->ip_frame->leaf,
384                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
385                        path->ip_frames[0].bh, path->ip_frames[1].bh,
386                        path->ip_frames[2].bh);
387         }
388         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
389         if (err == 0) {
390                 leaf->il_bh = bh;
391                 leaf->il_curidx = block;
392                 err = iam_leaf_ops(leaf)->init(leaf);
393                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
394         }
395         return err;
396 }
397
398 static void iam_unlock_htree(struct inode *dir, struct dynlock_handle *lh)
399 {
400         if (lh != NULL)
401                 dynlock_unlock(&LDISKFS_I(dir)->i_htree_lock, lh);
402 }
403
404
405 static void iam_leaf_unlock(struct iam_leaf *leaf)
406 {
407         if (leaf->il_lock != NULL) {
408                 iam_unlock_htree(iam_leaf_container(leaf)->ic_object,
409                                 leaf->il_lock);
410                 do_corr(schedule());
411                 leaf->il_lock = NULL;
412         }
413 }
414
415 static void iam_leaf_fini(struct iam_leaf *leaf)
416 {
417         if (leaf->il_path != NULL) {
418                 iam_leaf_unlock(leaf);
419                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
420                 iam_leaf_ops(leaf)->fini(leaf);
421                 if (leaf->il_bh) {
422                         brelse(leaf->il_bh);
423                         leaf->il_bh = NULL;
424                         leaf->il_curidx = 0;
425                 }
426         }
427 }
428
429 static void iam_leaf_start(struct iam_leaf *folio)
430 {
431         iam_leaf_ops(folio)->start(folio);
432 }
433
434 void iam_leaf_next(struct iam_leaf *folio)
435 {
436         iam_leaf_ops(folio)->next(folio);
437 }
438
439 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
440                              const struct iam_rec *rec)
441 {
442         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
443 }
444
445 static void iam_rec_del(struct iam_leaf *leaf, int shift)
446 {
447         iam_leaf_ops(leaf)->rec_del(leaf, shift);
448 }
449
450 int iam_leaf_at_end(const struct iam_leaf *leaf)
451 {
452         return iam_leaf_ops(leaf)->at_end(leaf);
453 }
454
455 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
456 {
457         iam_leaf_ops(l)->split(l, bh, nr);
458 }
459
460 int iam_leaf_can_add(const struct iam_leaf *l,
461                      const struct iam_key *k, const struct iam_rec *r)
462 {
463         return iam_leaf_ops(l)->can_add(l, k, r);
464 }
465
466 #if LDISKFS_INVARIANT_ON
467 static int iam_leaf_check(struct iam_leaf *leaf)
468 {
469         return 1;
470 #if 0
471         struct iam_lentry    *orig;
472         struct iam_path      *path;
473         struct iam_container *bag;
474         struct iam_ikey       *k0;
475         struct iam_ikey       *k1;
476         int result;
477         int first;
478
479         orig = leaf->il_at;
480         path = iam_leaf_path(leaf);
481         bag  = iam_leaf_container(leaf);
482
483         result = iam_leaf_ops(leaf)->init(leaf);
484         if (result != 0)
485                 return result;
486
487         first = 1;
488         iam_leaf_start(leaf);
489         k0 = iam_path_ikey(path, 0);
490         k1 = iam_path_ikey(path, 1);
491         while (!iam_leaf_at_end(leaf)) {
492                 iam_ikeycpy(bag, k0, k1);
493                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
494                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
495                         return 0;
496                 }
497                 first = 0;
498                 iam_leaf_next(leaf);
499         }
500         leaf->il_at = orig;
501         return 1;
502 #endif
503 }
504 #endif
505
506 static int iam_txn_dirty(handle_t *handle,
507                          struct iam_path *path, struct buffer_head *bh)
508 {
509         int result;
510
511         result = ldiskfs_journal_dirty_metadata(handle, bh);
512         if (result != 0)
513                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
514         return result;
515 }
516
517 static int iam_txn_add(handle_t *handle,
518                        struct iam_path *path, struct buffer_head *bh)
519 {
520         int result;
521
522         result = ldiskfs_journal_get_write_access(handle, bh);
523         if (result != 0)
524                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
525         return result;
526 }
527
528 /***********************************************************************/
529 /* iterator interface                                                  */
530 /***********************************************************************/
531
532 static enum iam_it_state it_state(const struct iam_iterator *it)
533 {
534         return it->ii_state;
535 }
536
537 /*
538  * Helper function returning scratch key.
539  */
540 static struct iam_container *iam_it_container(const struct iam_iterator *it)
541 {
542         return it->ii_path.ip_container;
543 }
544
545 static inline int it_keycmp(const struct iam_iterator *it,
546                             const struct iam_key *k)
547 {
548         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
549 }
550
551 static inline int it_keyeq(const struct iam_iterator *it,
552                            const struct iam_key *k)
553 {
554         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
555 }
556
557 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
558 {
559         return iam_ikeycmp(it->ii_path.ip_container,
560                            iam_leaf_ikey(&it->ii_path.ip_leaf,
561                                          iam_path_ikey(&it->ii_path, 0)), ik);
562 }
563
564 static inline int it_at_rec(const struct iam_iterator *it)
565 {
566         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
567 }
568
569 static inline int it_before(const struct iam_iterator *it)
570 {
571         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
572 }
573
574 /*
575  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
576  * with exactly the same key as asked is found.
577  */
578 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
579 {
580         int result;
581
582         result = iam_it_get(it, k);
583         if (result > 0)
584                 result = 0;
585         else if (result == 0)
586                 /*
587                  * Return -ENOENT if cursor is located above record with a key
588                  * different from one specified, or in the empty leaf.
589                  *
590                  * XXX returning -ENOENT only works if iam_it_get() never
591                  * returns -ENOENT as a legitimate error.
592                  */
593                 result = -ENOENT;
594         return result;
595 }
596
597 void iam_container_write_lock(struct iam_container *ic)
598 {
599         cfs_down_write(&ic->ic_sem);
600 }
601
602 void iam_container_write_unlock(struct iam_container *ic)
603 {
604         cfs_up_write(&ic->ic_sem);
605 }
606
607 void iam_container_read_lock(struct iam_container *ic)
608 {
609         cfs_down_read(&ic->ic_sem);
610 }
611
612 void iam_container_read_unlock(struct iam_container *ic)
613 {
614         cfs_up_read(&ic->ic_sem);
615 }
616
617 /*
618  * Initialize iterator to IAM_IT_DETACHED state.
619  *
620  * postcondition: it_state(it) == IAM_IT_DETACHED
621  */
622 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
623                  struct iam_path_descr *pd)
624 {
625         memset(it, 0, sizeof *it);
626         it->ii_flags  = flags;
627         it->ii_state  = IAM_IT_DETACHED;
628         iam_path_init(&it->ii_path, c, pd);
629         return 0;
630 }
631 EXPORT_SYMBOL(iam_it_init);
632
633 /*
634  * Finalize iterator and release all resources.
635  *
636  * precondition: it_state(it) == IAM_IT_DETACHED
637  */
638 void iam_it_fini(struct iam_iterator *it)
639 {
640         assert_corr(it_state(it) == IAM_IT_DETACHED);
641         iam_path_fini(&it->ii_path);
642 }
643 EXPORT_SYMBOL(iam_it_fini);
644
645 /*
646  * this locking primitives are used to protect parts
647  * of dir's htree. protection unit is block: leaf or index
648  */
649 struct dynlock_handle *iam_lock_htree(struct inode *dir, unsigned long value,
650                                      enum dynlock_type lt)
651 {
652         return dynlock_lock(&LDISKFS_I(dir)->i_htree_lock, value, lt, GFP_NOFS);
653 }
654
655
656
657 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
658 {
659         struct iam_frame *f;
660
661         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
662                 do_corr(schedule());
663                 *lh = iam_lock_htree(iam_path_obj(path), f->curidx, DLT_READ);
664                 if (*lh == NULL)
665                         return -ENOMEM;
666         }
667         return 0;
668 }
669
670 /*
671  * Fast check for frame consistency.
672  */
673 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
674 {
675         struct iam_container *bag;
676         struct iam_entry *next;
677         struct iam_entry *last;
678         struct iam_entry *entries;
679         struct iam_entry *at;
680
681         bag     = path->ip_container;
682         at      = frame->at;
683         entries = frame->entries;
684         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
685
686         if (unlikely(at > last))
687                 return -EAGAIN;
688
689         if (unlikely(dx_get_block(path, at) != frame->leaf))
690                 return -EAGAIN;
691
692         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
693                                  path->ip_ikey_target) > 0))
694                 return -EAGAIN;
695
696         next = iam_entry_shift(path, at, +1);
697         if (next <= last) {
698                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
699                                          path->ip_ikey_target) <= 0))
700                         return -EAGAIN;
701         }
702         return 0;
703 }
704
705 int dx_index_is_compat(struct iam_path *path)
706 {
707         return iam_path_descr(path) == NULL;
708 }
709
710 /*
711  * dx_find_position
712  *
713  * search position of specified hash in index
714  *
715  */
716
717 struct iam_entry *iam_find_position(struct iam_path *path,
718                                    struct iam_frame *frame)
719 {
720         int count;
721         struct iam_entry *p;
722         struct iam_entry *q;
723         struct iam_entry *m;
724
725         count = dx_get_count(frame->entries);
726         assert_corr(count && count <= dx_get_limit(frame->entries));
727         p = iam_entry_shift(path, frame->entries,
728                             dx_index_is_compat(path) ? 1 : 2);
729         q = iam_entry_shift(path, frame->entries, count - 1);
730         while (p <= q) {
731                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
732                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
733                                 path->ip_ikey_target) > 0)
734                         q = iam_entry_shift(path, m, -1);
735                 else
736                         p = iam_entry_shift(path, m, +1);
737         }
738         return iam_entry_shift(path, p, -1);
739 }
740
741
742
743 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
744 {
745         return dx_get_block(path, iam_find_position(path, frame));
746 }
747
748 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
749                     const struct iam_ikey *key, iam_ptr_t ptr)
750 {
751         struct iam_entry *entries = frame->entries;
752         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
753         int count = dx_get_count(entries);
754
755         /*
756          * Unfortunately we cannot assert this, as this function is sometimes
757          * called by VFS under i_sem and without pdirops lock.
758          */
759         assert_corr(1 || iam_frame_is_locked(path, frame));
760         assert_corr(count < dx_get_limit(entries));
761         assert_corr(frame->at < iam_entry_shift(path, entries, count));
762         assert_inv(dx_node_check(path, frame));
763
764         memmove(iam_entry_shift(path, new, 1), new,
765                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
766         dx_set_ikey(path, new, key);
767         dx_set_block(path, new, ptr);
768         dx_set_count(entries, count + 1);
769         assert_inv(dx_node_check(path, frame));
770 }
771
772 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
773                          const struct iam_ikey *key, iam_ptr_t ptr)
774 {
775         iam_lock_bh(frame->bh);
776         iam_insert_key(path, frame, key, ptr);
777         iam_unlock_bh(frame->bh);
778 }
779 /*
780  * returns 0 if path was unchanged, -EAGAIN otherwise.
781  */
782 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
783 {
784         int equal;
785
786         iam_lock_bh(frame->bh);
787         equal = iam_check_fast(path, frame) == 0 ||
788                 frame->leaf == iam_find_ptr(path, frame);
789         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
790         iam_unlock_bh(frame->bh);
791
792         return equal ? 0 : -EAGAIN;
793 }
794
795 static int iam_lookup_try(struct iam_path *path)
796 {
797         u32 ptr;
798         int err = 0;
799         int i;
800
801         struct iam_descr *param;
802         struct iam_frame *frame;
803         struct iam_container *c;
804
805         param = iam_path_descr(path);
806         c = path->ip_container;
807
808         ptr = param->id_ops->id_root_ptr(c);
809         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
810              ++frame, ++i) {
811                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
812                                                   &frame->bh);
813                 do_corr(schedule());
814
815                 iam_lock_bh(frame->bh);
816                 /*
817                  * node must be initialized under bh lock because concurrent
818                  * creation procedure may change it and iam_lookup_try() will
819                  * see obsolete tree height. -bzzz
820                  */
821                 if (err != 0)
822                         break;
823
824                 if (LDISKFS_INVARIANT_ON) {
825                         err = param->id_ops->id_node_check(path, frame);
826                         if (err != 0)
827                                 break;
828                 }
829
830                 err = param->id_ops->id_node_load(path, frame);
831                 if (err != 0)
832                         break;
833
834                 assert_inv(dx_node_check(path, frame));
835                 /*
836                  * splitting may change root index block and move hash we're
837                  * looking for into another index block so, we have to check
838                  * this situation and repeat from begining if path got changed
839                  * -bzzz
840                  */
841                 if (i > 0) {
842                         err = iam_check_path(path, frame - 1);
843                         if (err != 0)
844                                 break;
845                 }
846
847                 frame->at = iam_find_position(path, frame);
848                 frame->curidx = ptr;
849                 frame->leaf = ptr = dx_get_block(path, frame->at);
850
851                 iam_unlock_bh(frame->bh);
852                 do_corr(schedule());
853         }
854         if (err != 0)
855                 iam_unlock_bh(frame->bh);
856         path->ip_frame = --frame;
857         return err;
858 }
859
860 static int __iam_path_lookup(struct iam_path *path)
861 {
862         int err;
863         int i;
864
865         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
866                 assert(path->ip_frames[i].bh == NULL);
867
868         do {
869                 err = iam_lookup_try(path);
870                 do_corr(schedule());
871                 if (err != 0)
872                         iam_path_fini(path);
873         } while (err == -EAGAIN);
874
875         return err;
876 }
877
878 /*
879  * returns 0 if path was unchanged, -EAGAIN otherwise.
880  */
881 static int iam_check_full_path(struct iam_path *path, int search)
882 {
883         struct iam_frame *bottom;
884         struct iam_frame *scan;
885         int i;
886         int result;
887
888         do_corr(schedule());
889
890         for (bottom = path->ip_frames, i = 0;
891              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
892                 ; /* find last filled in frame */
893         }
894
895         /*
896          * Lock frames, bottom to top.
897          */
898         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
899                 iam_lock_bh(scan->bh);
900         /*
901          * Check them top to bottom.
902          */
903         result = 0;
904         for (scan = path->ip_frames; scan < bottom; ++scan) {
905                 struct iam_entry *pos;
906
907                 if (search) {
908                         if (iam_check_fast(path, scan) == 0)
909                                 continue;
910
911                         pos = iam_find_position(path, scan);
912                         if (scan->leaf != dx_get_block(path, pos)) {
913                                 result = -EAGAIN;
914                                 break;
915                         }
916                         scan->at = pos;
917                 } else {
918                         pos = iam_entry_shift(path, scan->entries,
919                                               dx_get_count(scan->entries) - 1);
920                         if (scan->at > pos ||
921                             scan->leaf != dx_get_block(path, scan->at)) {
922                                 result = -EAGAIN;
923                                 break;
924                         }
925                 }
926         }
927
928         /*
929          * Unlock top to bottom.
930          */
931         for (scan = path->ip_frames; scan < bottom; ++scan)
932                 iam_unlock_bh(scan->bh);
933         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
934         do_corr(schedule());
935
936         return result;
937 }
938
939
940 /*
941  * Performs path lookup and returns with found leaf (if any) locked by htree
942  * lock.
943  */
944 int iam_lookup_lock(struct iam_path *path,
945                    struct dynlock_handle **dl, enum dynlock_type lt)
946 {
947         int result;
948         struct inode *dir;
949
950         dir = iam_path_obj(path);
951         while ((result = __iam_path_lookup(path)) == 0) {
952                 do_corr(schedule());
953                 *dl = iam_lock_htree(dir, path->ip_frame->leaf, lt);
954                 if (*dl == NULL) {
955                         iam_path_fini(path);
956                         result = -ENOMEM;
957                         break;
958                 }
959                 do_corr(schedule());
960                 /*
961                  * while locking leaf we just found may get split so we need
962                  * to check this -bzzz
963                  */
964                 if (iam_check_full_path(path, 1) == 0)
965                         break;
966                 iam_unlock_htree(dir, *dl);
967                 *dl = NULL;
968                 iam_path_fini(path);
969         }
970         return result;
971 }
972 /*
973  * Performs tree top-to-bottom traversal starting from root, and loads leaf
974  * node.
975  */
976 static int iam_path_lookup(struct iam_path *path, int index)
977 {
978         struct iam_container *c;
979         struct iam_descr *descr;
980         struct iam_leaf  *leaf;
981         int result;
982
983         c = path->ip_container;
984         leaf = &path->ip_leaf;
985         descr = iam_path_descr(path);
986         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
987         assert_inv(iam_path_check(path));
988         do_corr(schedule());
989         if (result == 0) {
990                 result = iam_leaf_load(path);
991                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
992                 if (result == 0) {
993                         do_corr(schedule());
994                         if (index)
995                                 result = iam_leaf_ops(leaf)->
996                                         ilookup(leaf, path->ip_ikey_target);
997                         else
998                                 result = iam_leaf_ops(leaf)->
999                                         lookup(leaf, path->ip_key_target);
1000                         do_corr(schedule());
1001                 }
1002                 if (result < 0)
1003                         iam_leaf_unlock(leaf);
1004         }
1005         return result;
1006 }
1007
1008 /*
1009  * Common part of iam_it_{i,}get().
1010  */
1011 static int __iam_it_get(struct iam_iterator *it, int index)
1012 {
1013         int result;
1014         assert_corr(it_state(it) == IAM_IT_DETACHED);
1015
1016         result = iam_path_lookup(&it->ii_path, index);
1017         if (result >= 0) {
1018                 int collision;
1019
1020                 collision = result & IAM_LOOKUP_LAST;
1021                 switch (result & ~IAM_LOOKUP_LAST) {
1022                 case IAM_LOOKUP_EXACT:
1023                         result = +1;
1024                         it->ii_state = IAM_IT_ATTACHED;
1025                         break;
1026                 case IAM_LOOKUP_OK:
1027                         result = 0;
1028                         it->ii_state = IAM_IT_ATTACHED;
1029                         break;
1030                 case IAM_LOOKUP_BEFORE:
1031                 case IAM_LOOKUP_EMPTY:
1032                         result = 0;
1033                         it->ii_state = IAM_IT_SKEWED;
1034                         break;
1035                 default:
1036                         assert(0);
1037                 }
1038                 result |= collision;
1039         }
1040         /*
1041          * See iam_it_get_exact() for explanation.
1042          */
1043         assert_corr(result != -ENOENT);
1044         return result;
1045 }
1046
1047 /*
1048  * Correct hash, but not the same key was found, iterate through hash
1049  * collision chain, looking for correct record.
1050  */
1051 static int iam_it_collision(struct iam_iterator *it)
1052 {
1053         int result;
1054
1055         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1056
1057         while ((result = iam_it_next(it)) == 0) {
1058                 do_corr(schedule());
1059                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1060                         return -ENOENT;
1061                 if (it_keyeq(it, it->ii_path.ip_key_target))
1062                         return 0;
1063         }
1064         return result;
1065 }
1066
1067 /*
1068  * Attach iterator. After successful completion, @it points to record with
1069  * least key not larger than @k.
1070  *
1071  * Return value: 0: positioned on existing record,
1072  *             +ve: exact position found,
1073  *             -ve: error.
1074  *
1075  * precondition:  it_state(it) == IAM_IT_DETACHED
1076  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1077  *                     it_keycmp(it, k) <= 0)
1078  */
1079 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1080 {
1081         int result;
1082         assert_corr(it_state(it) == IAM_IT_DETACHED);
1083
1084         it->ii_path.ip_ikey_target = NULL;
1085         it->ii_path.ip_key_target  = k;
1086
1087         result = __iam_it_get(it, 0);
1088
1089         if (result == IAM_LOOKUP_LAST) {
1090                 result = iam_it_collision(it);
1091                 if (result != 0) {
1092                         iam_it_put(it);
1093                         iam_it_fini(it);
1094                         result = __iam_it_get(it, 0);
1095                 } else
1096                         result = +1;
1097         }
1098         if (result > 0)
1099                 result &= ~IAM_LOOKUP_LAST;
1100
1101         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1102         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1103                          it_keycmp(it, k) <= 0));
1104         return result;
1105 }
1106 EXPORT_SYMBOL(iam_it_get);
1107
1108 /*
1109  * Attach iterator by index key.
1110  */
1111 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1112 {
1113         assert_corr(it_state(it) == IAM_IT_DETACHED);
1114
1115         it->ii_path.ip_ikey_target = k;
1116         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1117 }
1118
1119 /*
1120  * Attach iterator, and assure it points to the record (not skewed).
1121  *
1122  * Return value: 0: positioned on existing record,
1123  *             +ve: exact position found,
1124  *             -ve: error.
1125  *
1126  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1127  *                !(it->ii_flags&IAM_IT_WRITE)
1128  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1129  */
1130 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1131 {
1132         int result;
1133         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1134                     !(it->ii_flags&IAM_IT_WRITE));
1135         result = iam_it_get(it, k);
1136         if (result == 0) {
1137                 if (it_state(it) != IAM_IT_ATTACHED) {
1138                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1139                         result = iam_it_next(it);
1140                 }
1141         }
1142         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1143         return result;
1144 }
1145 EXPORT_SYMBOL(iam_it_get_at);
1146
1147 /*
1148  * Duplicates iterator.
1149  *
1150  * postcondition: it_state(dst) == it_state(src) &&
1151  *                iam_it_container(dst) == iam_it_container(src) &&
1152  *                dst->ii_flags = src->ii_flags &&
1153  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1154  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1155  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1156  */
1157 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1158 {
1159         dst->ii_flags     = src->ii_flags;
1160         dst->ii_state     = src->ii_state;
1161         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1162         /*
1163          * XXX: duplicate lock.
1164          */
1165         assert_corr(it_state(dst) == it_state(src));
1166         assert_corr(iam_it_container(dst) == iam_it_container(src));
1167         assert_corr(dst->ii_flags = src->ii_flags);
1168         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1169                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1170                     iam_it_key_get(dst) == iam_it_key_get(src)));
1171
1172 }
1173
1174 /*
1175  * Detach iterator. Does nothing it detached state.
1176  *
1177  * postcondition: it_state(it) == IAM_IT_DETACHED
1178  */
1179 void iam_it_put(struct iam_iterator *it)
1180 {
1181         if (it->ii_state != IAM_IT_DETACHED) {
1182                 it->ii_state = IAM_IT_DETACHED;
1183                 iam_leaf_fini(&it->ii_path.ip_leaf);
1184         }
1185 }
1186 EXPORT_SYMBOL(iam_it_put);
1187
1188 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1189                                         struct iam_ikey *ikey);
1190
1191
1192 /*
1193  * This function increments the frame pointer to search the next leaf
1194  * block, and reads in the necessary intervening nodes if the search
1195  * should be necessary.  Whether or not the search is necessary is
1196  * controlled by the hash parameter.  If the hash value is even, then
1197  * the search is only continued if the next block starts with that
1198  * hash value.  This is used if we are searching for a specific file.
1199  *
1200  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1201  *
1202  * This function returns 1 if the caller should continue to search,
1203  * or 0 if it should not.  If there is an error reading one of the
1204  * index blocks, it will a negative error code.
1205  *
1206  * If start_hash is non-null, it will be filled in with the starting
1207  * hash of the next page.
1208  */
1209 static int iam_htree_advance(struct inode *dir, __u32 hash,
1210                               struct iam_path *path, __u32 *start_hash,
1211                               int compat)
1212 {
1213         struct iam_frame *p;
1214         struct buffer_head *bh;
1215         int err, num_frames = 0;
1216         __u32 bhash;
1217
1218         p = path->ip_frame;
1219         /*
1220          * Find the next leaf page by incrementing the frame pointer.
1221          * If we run out of entries in the interior node, loop around and
1222          * increment pointer in the parent node.  When we break out of
1223          * this loop, num_frames indicates the number of interior
1224          * nodes need to be read.
1225          */
1226         while (1) {
1227                 do_corr(schedule());
1228                 iam_lock_bh(p->bh);
1229                 p->at = iam_entry_shift(path, p->at, +1);
1230                 if (p->at < iam_entry_shift(path, p->entries,
1231                                             dx_get_count(p->entries))) {
1232                         p->leaf = dx_get_block(path, p->at);
1233                         iam_unlock_bh(p->bh);
1234                         break;
1235                 }
1236                 iam_unlock_bh(p->bh);
1237                 if (p == path->ip_frames)
1238                         return 0;
1239                 num_frames++;
1240                 --p;
1241         }
1242
1243         if (compat) {
1244                 /*
1245                  * Htree hash magic.
1246                  */
1247         /*
1248          * If the hash is 1, then continue only if the next page has a
1249          * continuation hash of any value.  This is used for readdir
1250          * handling.  Otherwise, check to see if the hash matches the
1251          * desired contiuation hash.  If it doesn't, return since
1252          * there's no point to read in the successive index pages.
1253          */
1254                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1255         if (start_hash)
1256                 *start_hash = bhash;
1257         if ((hash & 1) == 0) {
1258                 if ((bhash & ~1) != hash)
1259                         return 0;
1260         }
1261         }
1262         /*
1263          * If the hash is HASH_NB_ALWAYS, we always go to the next
1264          * block so no check is necessary
1265          */
1266         while (num_frames--) {
1267                 iam_ptr_t idx;
1268
1269                 do_corr(schedule());
1270                 iam_lock_bh(p->bh);
1271                 idx = p->leaf = dx_get_block(path, p->at);
1272                 iam_unlock_bh(p->bh);
1273                 err = iam_path_descr(path)->id_ops->
1274                         id_node_read(path->ip_container, idx, NULL, &bh);
1275                 if (err != 0)
1276                         return err; /* Failure */
1277                 ++p;
1278                 brelse(p->bh);
1279                 assert_corr(p->bh != bh);
1280                 p->bh = bh;
1281                 p->entries = dx_node_get_entries(path, p);
1282                 p->at = iam_entry_shift(path, p->entries, !compat);
1283                 assert_corr(p->curidx != idx);
1284                 p->curidx = idx;
1285                 iam_lock_bh(p->bh);
1286                 assert_corr(p->leaf != dx_get_block(path, p->at));
1287                 p->leaf = dx_get_block(path, p->at);
1288                 iam_unlock_bh(p->bh);
1289                 assert_inv(dx_node_check(path, p));
1290         }
1291         return 1;
1292 }
1293
1294
1295 static inline int iam_index_advance(struct iam_path *path)
1296 {
1297         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1298 }
1299
1300 static void iam_unlock_array(struct inode *dir, struct dynlock_handle **lh)
1301 {
1302         int i;
1303
1304         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1305                 if (*lh != NULL) {
1306                         iam_unlock_htree(dir, *lh);
1307                         *lh = NULL;
1308                 }
1309         }
1310 }
1311 /*
1312  * Advance index part of @path to point to the next leaf. Returns 1 on
1313  * success, 0, when end of container was reached. Leaf node is locked.
1314  */
1315 int iam_index_next(struct iam_container *c, struct iam_path *path)
1316 {
1317         iam_ptr_t cursor;
1318         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1319         int result;
1320         struct inode *object;
1321
1322         /*
1323          * Locking for iam_index_next()... is to be described.
1324          */
1325
1326         object = c->ic_object;
1327         cursor = path->ip_frame->leaf;
1328
1329         while (1) {
1330                 result = iam_index_lock(path, lh);
1331                 do_corr(schedule());
1332                 if (result < 0)
1333                         break;
1334
1335                 result = iam_check_full_path(path, 0);
1336                 if (result == 0 && cursor == path->ip_frame->leaf) {
1337                         result = iam_index_advance(path);
1338
1339                         assert_corr(result == 0 ||
1340                                     cursor != path->ip_frame->leaf);
1341                         break;
1342                 }
1343                 do {
1344                         iam_unlock_array(object, lh);
1345
1346                         iam_path_release(path);
1347                         do_corr(schedule());
1348
1349                         result = __iam_path_lookup(path);
1350                         if (result < 0)
1351                                 break;
1352
1353                         while (path->ip_frame->leaf != cursor) {
1354                                 do_corr(schedule());
1355
1356                                 result = iam_index_lock(path, lh);
1357                                 do_corr(schedule());
1358                                 if (result < 0)
1359                                         break;
1360
1361                                 result = iam_check_full_path(path, 0);
1362                                 if (result != 0)
1363                                         break;
1364
1365                                 result = iam_index_advance(path);
1366                                 if (result == 0) {
1367                                         CERROR("cannot find cursor : %u\n",
1368                                                 cursor);
1369                                         result = -EIO;
1370                                 }
1371                                 if (result < 0)
1372                                         break;
1373                                 result = iam_check_full_path(path, 0);
1374                                 if (result != 0)
1375                                         break;
1376                                 iam_unlock_array(object, lh);
1377                         }
1378                 } while (result == -EAGAIN);
1379                 if (result < 0)
1380                         break;
1381         }
1382         iam_unlock_array(object, lh);
1383         return result;
1384 }
1385
1386 /*
1387  * Move iterator one record right.
1388  *
1389  * Return value: 0: success,
1390  *              +1: end of container reached
1391  *             -ve: error
1392  *
1393  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1394  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1395  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1396  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1397  */
1398 int iam_it_next(struct iam_iterator *it)
1399 {
1400         int result;
1401         struct iam_path      *path;
1402         struct iam_leaf      *leaf;
1403         struct inode         *obj;
1404         do_corr(struct iam_ikey *ik_orig);
1405
1406         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1407         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1408                     it_state(it) == IAM_IT_SKEWED);
1409
1410         path = &it->ii_path;
1411         leaf = &path->ip_leaf;
1412         obj  = iam_path_obj(path);
1413
1414         assert_corr(iam_leaf_is_locked(leaf));
1415
1416         result = 0;
1417         do_corr(ik_orig = it_at_rec(it) ?
1418                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1419         if (it_before(it)) {
1420                 assert_corr(!iam_leaf_at_end(leaf));
1421                 it->ii_state = IAM_IT_ATTACHED;
1422         } else {
1423                 if (!iam_leaf_at_end(leaf))
1424                         /* advance within leaf node */
1425                         iam_leaf_next(leaf);
1426                 /*
1427                  * multiple iterations may be necessary due to empty leaves.
1428                  */
1429                 while (result == 0 && iam_leaf_at_end(leaf)) {
1430                         do_corr(schedule());
1431                         /* advance index portion of the path */
1432                         result = iam_index_next(iam_it_container(it), path);
1433                         assert_corr(iam_leaf_is_locked(leaf));
1434                         if (result == 1) {
1435                                 struct dynlock_handle *lh;
1436                                 lh = iam_lock_htree(obj, path->ip_frame->leaf,
1437                                                    DLT_WRITE);
1438                                 if (lh != NULL) {
1439                                         iam_leaf_fini(leaf);
1440                                         leaf->il_lock = lh;
1441                                         result = iam_leaf_load(path);
1442                                         if (result == 0)
1443                                                 iam_leaf_start(leaf);
1444                                 } else
1445                                         result = -ENOMEM;
1446                         } else if (result == 0)
1447                                 /* end of container reached */
1448                                 result = +1;
1449                         if (result != 0)
1450                                 iam_it_put(it);
1451                 }
1452                 if (result == 0)
1453                         it->ii_state = IAM_IT_ATTACHED;
1454         }
1455         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1456         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1457         assert_corr(ergo(result == 0 && ik_orig != NULL,
1458                          it_ikeycmp(it, ik_orig) >= 0));
1459         return result;
1460 }
1461 EXPORT_SYMBOL(iam_it_next);
1462
1463 /*
1464  * Return pointer to the record under iterator.
1465  *
1466  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1467  * postcondition: it_state(it) == IAM_IT_ATTACHED
1468  */
1469 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1470 {
1471         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1472         assert_corr(it_at_rec(it));
1473         return iam_leaf_rec(&it->ii_path.ip_leaf);
1474 }
1475 EXPORT_SYMBOL(iam_it_rec_get);
1476
1477 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1478 {
1479         struct iam_leaf *folio;
1480
1481         folio = &it->ii_path.ip_leaf;
1482         iam_leaf_ops(folio)->rec_set(folio, r);
1483 }
1484
1485 /*
1486  * Replace contents of record under iterator.
1487  *
1488  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1489  *                it->ii_flags&IAM_IT_WRITE
1490  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1491  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1492  */
1493 int iam_it_rec_set(handle_t *h,
1494                    struct iam_iterator *it, const struct iam_rec *r)
1495 {
1496         int result;
1497         struct iam_path *path;
1498         struct buffer_head *bh;
1499
1500         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1501                     it->ii_flags&IAM_IT_WRITE);
1502         assert_corr(it_at_rec(it));
1503
1504         path = &it->ii_path;
1505         bh   = path->ip_leaf.il_bh;
1506         result = iam_txn_add(h, path, bh);
1507         if (result == 0) {
1508                 iam_it_reccpy(it, r);
1509                 result = iam_txn_dirty(h, path, bh);
1510         }
1511         return result;
1512 }
1513 EXPORT_SYMBOL(iam_it_rec_set);
1514
1515 /*
1516  * Return pointer to the index key under iterator.
1517  *
1518  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1519  *                it_state(it) == IAM_IT_SKEWED
1520  */
1521 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1522                                         struct iam_ikey *ikey)
1523 {
1524         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1525                     it_state(it) == IAM_IT_SKEWED);
1526         assert_corr(it_at_rec(it));
1527         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1528 }
1529
1530 /*
1531  * Return pointer to the key under iterator.
1532  *
1533  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1534  *                it_state(it) == IAM_IT_SKEWED
1535  */
1536 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1537 {
1538         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1539                     it_state(it) == IAM_IT_SKEWED);
1540         assert_corr(it_at_rec(it));
1541         return iam_leaf_key(&it->ii_path.ip_leaf);
1542 }
1543 EXPORT_SYMBOL(iam_it_key_get);
1544
1545 /*
1546  * Return size of key under iterator (in bytes)
1547  *
1548  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1549  *                it_state(it) == IAM_IT_SKEWED
1550  */
1551 int iam_it_key_size(const struct iam_iterator *it)
1552 {
1553         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1554                     it_state(it) == IAM_IT_SKEWED);
1555         assert_corr(it_at_rec(it));
1556         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1557 }
1558 EXPORT_SYMBOL(iam_it_key_size);
1559
1560 /*
1561  * Insertion of new record. Interaction with jbd during non-trivial case (when
1562  * split happens) is as following:
1563  *
1564  *  - new leaf node is involved into transaction by ldiskfs_append();
1565  *
1566  *  - old leaf node is involved into transaction by iam_add_rec();
1567  *
1568  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1569  *
1570  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1571  *  iam_new_leaf();
1572  *
1573  *  - split index nodes are involved into transaction and marked dirty by
1574  *  split_index_node().
1575  *
1576  *  - "safe" index node, which is no split, but where new pointer is inserted
1577  *  is involved into transaction and marked dirty by split_index_node().
1578  *
1579  *  - index node where pointer to new leaf is inserted is involved into
1580  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1581  *
1582  *  - inode is marked dirty by iam_add_rec().
1583  *
1584  */
1585
1586 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1587 {
1588         int err;
1589         iam_ptr_t blknr;
1590         struct buffer_head   *new_leaf;
1591         struct buffer_head   *old_leaf;
1592         struct iam_container *c;
1593         struct inode         *obj;
1594         struct iam_path      *path;
1595
1596         assert_inv(iam_leaf_check(leaf));
1597
1598         c = iam_leaf_container(leaf);
1599         path = leaf->il_path;
1600
1601         obj = c->ic_object;
1602         new_leaf = ldiskfs_append(handle, obj, (__u32 *)&blknr, &err);
1603         do_corr(schedule());
1604         if (new_leaf != NULL) {
1605                 struct dynlock_handle *lh;
1606
1607                 lh = iam_lock_htree(obj, blknr, DLT_WRITE);
1608                 do_corr(schedule());
1609                 if (lh != NULL) {
1610                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1611                         do_corr(schedule());
1612                         old_leaf = leaf->il_bh;
1613                         iam_leaf_split(leaf, &new_leaf, blknr);
1614                         if (old_leaf != leaf->il_bh) {
1615                                 /*
1616                                  * Switched to the new leaf.
1617                                  */
1618                                 iam_leaf_unlock(leaf);
1619                                 leaf->il_lock = lh;
1620                                 path->ip_frame->leaf = blknr;
1621                         } else
1622                                 iam_unlock_htree(obj, lh);
1623                         do_corr(schedule());
1624                         err = iam_txn_dirty(handle, path, new_leaf);
1625                         brelse(new_leaf);
1626                         if (err == 0)
1627                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1628                         do_corr(schedule());
1629                 } else
1630                         err = -ENOMEM;
1631         }
1632         assert_inv(iam_leaf_check(leaf));
1633         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1634         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1635         return err;
1636 }
1637
1638 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1639 {
1640         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1641 }
1642
1643 static int iam_shift_entries(struct iam_path *path,
1644                          struct iam_frame *frame, unsigned count,
1645                          struct iam_entry *entries, struct iam_entry *entries2,
1646                          u32 newblock)
1647 {
1648         unsigned count1;
1649         unsigned count2;
1650         int delta;
1651
1652         struct iam_frame *parent = frame - 1;
1653         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1654
1655         delta = dx_index_is_compat(path) ? 0 : +1;
1656
1657         count1 = count/2 + delta;
1658         count2 = count - count1;
1659         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1660
1661         dxtrace(printk("Split index %d/%d\n", count1, count2));
1662
1663         memcpy((char *) iam_entry_shift(path, entries2, delta),
1664                (char *) iam_entry_shift(path, entries, count1),
1665                count2 * iam_entry_size(path));
1666
1667         dx_set_count(entries2, count2 + delta);
1668         dx_set_limit(entries2, dx_node_limit(path));
1669
1670         /*
1671          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1672          * level index in root index, then we insert new index here and set
1673          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1674          * index w/o hash it looks for. the solution is to check root index
1675          * after we locked just founded 2nd level index -bzzz
1676          */
1677         iam_insert_key_lock(path, parent, pivot, newblock);
1678
1679         /*
1680          * now old and new 2nd level index blocks contain all pointers, so
1681          * dx_probe() may find it in the both.  it's OK -bzzz
1682          */
1683         iam_lock_bh(frame->bh);
1684         dx_set_count(entries, count1);
1685         iam_unlock_bh(frame->bh);
1686
1687         /*
1688          * now old 2nd level index block points to first half of leafs. it's
1689          * importand that dx_probe() must check root index block for changes
1690          * under dx_lock_bh(frame->bh) -bzzz
1691          */
1692
1693         return count1;
1694 }
1695
1696
1697 int split_index_node(handle_t *handle, struct iam_path *path,
1698                      struct dynlock_handle **lh)
1699 {
1700
1701         struct iam_entry *entries;   /* old block contents */
1702         struct iam_entry *entries2;  /* new block contents */
1703          struct iam_frame *frame, *safe;
1704         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1705         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1706         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1707         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1708         struct inode *dir = iam_path_obj(path);
1709         struct iam_descr *descr;
1710         int nr_splet;
1711         int i, err;
1712
1713         descr = iam_path_descr(path);
1714         /*
1715          * Algorithm below depends on this.
1716          */
1717         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1718
1719         frame = path->ip_frame;
1720         entries = frame->entries;
1721
1722         /*
1723          * Tall-tree handling: we might have to split multiple index blocks
1724          * all the way up to tree root. Tricky point here is error handling:
1725          * to avoid complicated undo/rollback we
1726          *
1727          *   - first allocate all necessary blocks
1728          *
1729          *   - insert pointers into them atomically.
1730          */
1731
1732         /*
1733          * Locking: leaf is already locked. htree-locks are acquired on all
1734          * index nodes that require split bottom-to-top, on the "safe" node,
1735          * and on all new nodes
1736          */
1737
1738         dxtrace(printk("using %u of %u node entries\n",
1739                        dx_get_count(entries), dx_get_limit(entries)));
1740
1741         /* What levels need split? */
1742         for (nr_splet = 0; frame >= path->ip_frames &&
1743              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1744              --frame, ++nr_splet) {
1745                 do_corr(schedule());
1746                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1747                         /*
1748                         CWARN(dir->i_sb, __FUNCTION__,
1749                                      "Directory index full!\n");
1750                                      */
1751                         err = -ENOSPC;
1752                         goto cleanup;
1753                 }
1754         }
1755
1756         safe = frame;
1757
1758         /*
1759          * Lock all nodes, bottom to top.
1760          */
1761         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1762                 do_corr(schedule());
1763                 lock[i] = iam_lock_htree(dir, frame->curidx, DLT_WRITE);
1764                 if (lock[i] == NULL) {
1765                         err = -ENOMEM;
1766                         goto cleanup;
1767                 }
1768         }
1769
1770         /*
1771          * Check for concurrent index modification.
1772          */
1773         err = iam_check_full_path(path, 1);
1774         if (err)
1775                 goto cleanup;
1776         /*
1777          * And check that the same number of nodes is to be split.
1778          */
1779         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1780              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1781              --frame, ++i) {
1782                 ;
1783         }
1784         if (i != nr_splet) {
1785                 err = -EAGAIN;
1786                 goto cleanup;
1787         }
1788
1789         /* Go back down, allocating blocks, locking them, and adding into
1790          * transaction... */
1791         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1792                 bh_new[i] = ldiskfs_append (handle, dir, &newblock[i], &err);
1793                 do_corr(schedule());
1794                 if (!bh_new[i] ||
1795                     descr->id_ops->id_node_init(path->ip_container,
1796                                                 bh_new[i], 0) != 0)
1797                         goto cleanup;
1798                 new_lock[i] = iam_lock_htree(dir, newblock[i], DLT_WRITE);
1799                 if (new_lock[i] == NULL) {
1800                         err = -ENOMEM;
1801                         goto cleanup;
1802                 }
1803                 do_corr(schedule());
1804                 BUFFER_TRACE(frame->bh, "get_write_access");
1805                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1806                 if (err)
1807                         goto journal_error;
1808         }
1809         /* Add "safe" node to transaction too */
1810         if (safe + 1 != path->ip_frames) {
1811                 do_corr(schedule());
1812                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1813                 if (err)
1814                         goto journal_error;
1815         }
1816
1817         /* Go through nodes once more, inserting pointers */
1818         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1819                 unsigned count;
1820                 int idx;
1821                 struct buffer_head *bh2;
1822                 struct buffer_head *bh;
1823
1824                 entries = frame->entries;
1825                 count = dx_get_count(entries);
1826                 idx = iam_entry_diff(path, frame->at, entries);
1827
1828                 bh2 = bh_new[i];
1829                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1830
1831                 bh = frame->bh;
1832                 if (frame == path->ip_frames) {
1833                         /* splitting root node. Tricky point:
1834                          *
1835                          * In the "normal" B-tree we'd split root *and* add
1836                          * new root to the tree with pointers to the old root
1837                          * and its sibling (thus introducing two new nodes).
1838                          *
1839                          * In htree it's enough to add one node, because
1840                          * capacity of the root node is smaller than that of
1841                          * non-root one.
1842                          */
1843                         struct iam_frame *frames;
1844                         struct iam_entry *next;
1845
1846                         assert_corr(i == 0);
1847
1848                         do_corr(schedule());
1849
1850                         frames = path->ip_frames;
1851                         memcpy((char *) entries2, (char *) entries,
1852                                count * iam_entry_size(path));
1853                         dx_set_limit(entries2, dx_node_limit(path));
1854
1855                         /* Set up root */
1856                           iam_lock_bh(frame->bh);
1857                         next = descr->id_ops->id_root_inc(path->ip_container,
1858                                                           path, frame);
1859                         dx_set_block(path, next, newblock[0]);
1860                           iam_unlock_bh(frame->bh);
1861
1862                         do_corr(schedule());
1863                         /* Shift frames in the path */
1864                         memmove(frames + 2, frames + 1,
1865                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1866                         /* Add new access path frame */
1867                         frames[1].at = iam_entry_shift(path, entries2, idx);
1868                         frames[1].entries = entries = entries2;
1869                         frames[1].bh = bh2;
1870                         assert_inv(dx_node_check(path, frame));
1871                         ++ path->ip_frame;
1872                         ++ frame;
1873                         assert_inv(dx_node_check(path, frame));
1874                         bh_new[0] = NULL; /* buffer head is "consumed" */
1875                         err = ldiskfs_journal_get_write_access(handle, bh2);
1876                         if (err)
1877                                 goto journal_error;
1878                         do_corr(schedule());
1879                 } else {
1880                         /* splitting non-root index node. */
1881                         struct iam_frame *parent = frame - 1;
1882
1883                         do_corr(schedule());
1884                         count = iam_shift_entries(path, frame, count,
1885                                               entries, entries2, newblock[i]);
1886                         /* Which index block gets the new entry? */
1887                         if (idx >= count) {
1888                                 int d = dx_index_is_compat(path) ? 0 : +1;
1889
1890                                 frame->at = iam_entry_shift(path, entries2,
1891                                                             idx - count + d);
1892                                 frame->entries = entries = entries2;
1893                                 frame->curidx = newblock[i];
1894                                 swap(frame->bh, bh2);
1895                                 assert_corr(lock[i + 1] != NULL);
1896                                 assert_corr(new_lock[i] != NULL);
1897                                 swap(lock[i + 1], new_lock[i]);
1898                                 bh_new[i] = bh2;
1899                                 parent->at = iam_entry_shift(path,
1900                                                              parent->at, +1);
1901                         }
1902                         assert_inv(dx_node_check(path, frame));
1903                         assert_inv(dx_node_check(path, parent));
1904                         dxtrace(dx_show_index ("node", frame->entries));
1905                         dxtrace(dx_show_index ("node",
1906                                ((struct dx_node *) bh2->b_data)->entries));
1907                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
1908                         if (err)
1909                                 goto journal_error;
1910                         do_corr(schedule());
1911                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
1912                         if (err)
1913                                 goto journal_error;
1914                 }
1915                 do_corr(schedule());
1916                 err = ldiskfs_journal_dirty_metadata(handle, bh);
1917                 if (err)
1918                         goto journal_error;
1919         }
1920                 /*
1921                  * This function was called to make insertion of new leaf
1922                  * possible. Check that it fulfilled its obligations.
1923                  */
1924                 assert_corr(dx_get_count(path->ip_frame->entries) <
1925                             dx_get_limit(path->ip_frame->entries));
1926         assert_corr(lock[nr_splet] != NULL);
1927         *lh = lock[nr_splet];
1928         lock[nr_splet] = NULL;
1929         if (nr_splet > 0) {
1930                 /*
1931                  * Log ->i_size modification.
1932                  */
1933                 err = ldiskfs_mark_inode_dirty(handle, dir);
1934                 if (err)
1935                         goto journal_error;
1936         }
1937         goto cleanup;
1938 journal_error:
1939         ldiskfs_std_error(dir->i_sb, err);
1940
1941 cleanup:
1942         iam_unlock_array(dir, lock);
1943         iam_unlock_array(dir, new_lock);
1944
1945         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
1946
1947         do_corr(schedule());
1948         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
1949                 if (bh_new[i] != NULL)
1950                         brelse(bh_new[i]);
1951         }
1952         return err;
1953 }
1954
1955 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
1956                        struct iam_path *path,
1957                        const struct iam_key *k, const struct iam_rec *r)
1958 {
1959         int err;
1960         struct iam_leaf *leaf;
1961
1962         leaf = &path->ip_leaf;
1963         assert_inv(iam_leaf_check(leaf));
1964         assert_inv(iam_path_check(path));
1965         err = iam_txn_add(handle, path, leaf->il_bh);
1966         if (err == 0) {
1967                 do_corr(schedule());
1968                 if (!iam_leaf_can_add(leaf, k, r)) {
1969                         struct dynlock_handle *lh = NULL;
1970
1971                         do {
1972                                 assert_corr(lh == NULL);
1973                                 do_corr(schedule());
1974                                 err = split_index_node(handle, path, &lh);
1975                                 if (err == -EAGAIN) {
1976                                         assert_corr(lh == NULL);
1977
1978                                         iam_path_fini(path);
1979                                         it->ii_state = IAM_IT_DETACHED;
1980
1981                                         do_corr(schedule());
1982                                         err = iam_it_get_exact(it, k);
1983                                         if (err == -ENOENT)
1984                                                 err = +1; /* repeat split */
1985                                         else if (err == 0)
1986                                                 err = -EEXIST;
1987                                 }
1988                         } while (err > 0);
1989                         assert_inv(iam_path_check(path));
1990                         if (err == 0) {
1991                                 assert_corr(lh != NULL);
1992                                 do_corr(schedule());
1993                                 err = iam_new_leaf(handle, leaf);
1994                                 if (err == 0)
1995                                         err = iam_txn_dirty(handle, path,
1996                                                             path->ip_frame->bh);
1997                         }
1998                         iam_unlock_htree(iam_path_obj(path), lh);
1999                         do_corr(schedule());
2000                 }
2001                 if (err == 0) {
2002                         iam_leaf_rec_add(leaf, k, r);
2003                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2004                 }
2005         }
2006         assert_inv(iam_leaf_check(leaf));
2007         assert_inv(iam_leaf_check(&path->ip_leaf));
2008         assert_inv(iam_path_check(path));
2009         return err;
2010 }
2011
2012 /*
2013  * Insert new record with key @k and contents from @r, shifting records to the
2014  * right. On success, iterator is positioned on the newly inserted record.
2015  *
2016  * precondition: it->ii_flags&IAM_IT_WRITE &&
2017  *               (it_state(it) == IAM_IT_ATTACHED ||
2018  *                it_state(it) == IAM_IT_SKEWED) &&
2019  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2020  *                    it_keycmp(it, k) <= 0) &&
2021  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2022  * postcondition: ergo(result == 0,
2023  *                     it_state(it) == IAM_IT_ATTACHED &&
2024  *                     it_keycmp(it, k) == 0 &&
2025  *                     !memcmp(iam_it_rec_get(it), r, ...))
2026  */
2027 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2028                       const struct iam_key *k, const struct iam_rec *r)
2029 {
2030         int result;
2031         struct iam_path *path;
2032
2033         path = &it->ii_path;
2034
2035         assert_corr(it->ii_flags&IAM_IT_WRITE);
2036         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2037                     it_state(it) == IAM_IT_SKEWED);
2038         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2039                          it_keycmp(it, k) <= 0));
2040         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2041         result = iam_add_rec(h, it, path, k, r);
2042         if (result == 0)
2043                 it->ii_state = IAM_IT_ATTACHED;
2044         assert_corr(ergo(result == 0,
2045                          it_state(it) == IAM_IT_ATTACHED &&
2046                          it_keycmp(it, k) == 0));
2047         return result;
2048 }
2049 EXPORT_SYMBOL(iam_it_rec_insert);
2050
2051 /*
2052  * Delete record under iterator.
2053  *
2054  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2055  *                it->ii_flags&IAM_IT_WRITE &&
2056  *                it_at_rec(it)
2057  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2058  *                it_state(it) == IAM_IT_DETACHED
2059  */
2060 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2061 {
2062         int result;
2063         struct iam_leaf *leaf;
2064         struct iam_path *path;
2065
2066         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2067                     it->ii_flags&IAM_IT_WRITE);
2068         assert_corr(it_at_rec(it));
2069
2070         path = &it->ii_path;
2071         leaf = &path->ip_leaf;
2072
2073         assert_inv(iam_leaf_check(leaf));
2074         assert_inv(iam_path_check(path));
2075
2076         result = iam_txn_add(h, path, leaf->il_bh);
2077         /*
2078          * no compaction for now.
2079          */
2080         if (result == 0) {
2081                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2082                 result = iam_txn_dirty(h, path, leaf->il_bh);
2083                 if (result == 0 && iam_leaf_at_end(leaf) &&
2084                     it->ii_flags&IAM_IT_MOVE) {
2085                         result = iam_it_next(it);
2086                         if (result > 0)
2087                                 result = 0;
2088                 }
2089         }
2090         assert_inv(iam_leaf_check(leaf));
2091         assert_inv(iam_path_check(path));
2092         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2093                     it_state(it) == IAM_IT_DETACHED);
2094         return result;
2095 }
2096 EXPORT_SYMBOL(iam_it_rec_delete);
2097
2098 /*
2099  * Convert iterator to cookie.
2100  *
2101  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2102  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2103  * postcondition: it_state(it) == IAM_IT_ATTACHED
2104  */
2105 iam_pos_t iam_it_store(const struct iam_iterator *it)
2106 {
2107         iam_pos_t result;
2108
2109         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2110         assert_corr(it_at_rec(it));
2111         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2112                     sizeof result);
2113
2114         result = 0;
2115         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2116 }
2117 EXPORT_SYMBOL(iam_it_store);
2118
2119 /*
2120  * Restore iterator from cookie.
2121  *
2122  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2123  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2124  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2125  *                                  iam_it_store(it) == pos)
2126  */
2127 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2128 {
2129         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2130                     it->ii_flags&IAM_IT_MOVE);
2131         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2132         return iam_it_iget(it, (struct iam_ikey *)&pos);
2133 }
2134 EXPORT_SYMBOL(iam_it_load);
2135
2136 /***********************************************************************/
2137 /* invariants                                                          */
2138 /***********************************************************************/
2139
2140 static inline int ptr_inside(void *base, size_t size, void *ptr)
2141 {
2142         return (base <= ptr) && (ptr < base + size);
2143 }
2144
2145 int iam_frame_invariant(struct iam_frame *f)
2146 {
2147         return
2148                 (f->bh != NULL &&
2149                 f->bh->b_data != NULL &&
2150                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2151                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2152                 f->entries <= f->at);
2153 }
2154 int iam_leaf_invariant(struct iam_leaf *l)
2155 {
2156         return
2157                 l->il_bh != NULL &&
2158                 l->il_bh->b_data != NULL &&
2159                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2160                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2161                 l->il_entries <= l->il_at;
2162 }
2163
2164 int iam_path_invariant(struct iam_path *p)
2165 {
2166         int i;
2167
2168         if (p->ip_container == NULL ||
2169             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2170             p->ip_frame != p->ip_frames + p->ip_indirect ||
2171             !iam_leaf_invariant(&p->ip_leaf))
2172                 return 0;
2173         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2174                 if (i <= p->ip_indirect) {
2175                         if (!iam_frame_invariant(&p->ip_frames[i]))
2176                                 return 0;
2177                 }
2178         }
2179         return 1;
2180 }
2181
2182 int iam_it_invariant(struct iam_iterator *it)
2183 {
2184         return
2185                 (it->ii_state == IAM_IT_DETACHED ||
2186                  it->ii_state == IAM_IT_ATTACHED ||
2187                  it->ii_state == IAM_IT_SKEWED) &&
2188                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2189                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2190                      it->ii_state == IAM_IT_SKEWED,
2191                      iam_path_invariant(&it->ii_path) &&
2192                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2193 }
2194
2195 /*
2196  * Search container @c for record with key @k. If record is found, its data
2197  * are moved into @r.
2198  *
2199  * Return values: 0: found, -ENOENT: not-found, -ve: error
2200  */
2201 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2202                struct iam_rec *r, struct iam_path_descr *pd)
2203 {
2204         struct iam_iterator it;
2205         int result;
2206
2207         iam_it_init(&it, c, 0, pd);
2208
2209         result = iam_it_get_exact(&it, k);
2210         if (result == 0)
2211                 /*
2212                  * record with required key found, copy it into user buffer
2213                  */
2214                 iam_reccpy(&it.ii_path.ip_leaf, r);
2215         iam_it_put(&it);
2216         iam_it_fini(&it);
2217         return result;
2218 }
2219 EXPORT_SYMBOL(iam_lookup);
2220
2221 /*
2222  * Insert new record @r with key @k into container @c (within context of
2223  * transaction @h).
2224  *
2225  * Return values: 0: success, -ve: error, including -EEXIST when record with
2226  * given key is already present.
2227  *
2228  * postcondition: ergo(result == 0 || result == -EEXIST,
2229  *                                  iam_lookup(c, k, r2) > 0;
2230  */
2231 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2232                const struct iam_rec *r, struct iam_path_descr *pd)
2233 {
2234         struct iam_iterator it;
2235         int result;
2236
2237         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2238
2239         result = iam_it_get_exact(&it, k);
2240         if (result == -ENOENT)
2241                 result = iam_it_rec_insert(h, &it, k, r);
2242         else if (result == 0)
2243                 result = -EEXIST;
2244         iam_it_put(&it);
2245         iam_it_fini(&it);
2246         return result;
2247 }
2248 EXPORT_SYMBOL(iam_insert);
2249
2250 /*
2251  * Update record with the key @k in container @c (within context of
2252  * transaction @h), new record is given by @r.
2253  *
2254  * Return values: 0: success, -ve: error, including -ENOENT if no record with
2255  * the given key found.
2256  */
2257 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2258                const struct iam_rec *r, struct iam_path_descr *pd)
2259 {
2260         struct iam_iterator it;
2261         int result;
2262
2263         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2264
2265         result = iam_it_get_exact(&it, k);
2266         if (result == 0)
2267                 iam_it_rec_set(h, &it, r);
2268         iam_it_put(&it);
2269         iam_it_fini(&it);
2270         return result;
2271 }
2272 EXPORT_SYMBOL(iam_update);
2273
2274 /*
2275  * Delete existing record with key @k.
2276  *
2277  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2278  *
2279  * postcondition: ergo(result == 0 || result == -ENOENT,
2280  *                                 !iam_lookup(c, k, *));
2281  */
2282 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2283                struct iam_path_descr *pd)
2284 {
2285         struct iam_iterator it;
2286         int result;
2287
2288         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2289
2290         result = iam_it_get_exact(&it, k);
2291         if (result == 0)
2292                 iam_it_rec_delete(h, &it);
2293         iam_it_put(&it);
2294         iam_it_fini(&it);
2295         return result;
2296 }
2297 EXPORT_SYMBOL(iam_delete);
2298
2299 int iam_root_limit(int rootgap, int blocksize, int size)
2300 {
2301         int limit;
2302         int nlimit;
2303
2304         limit = (blocksize - rootgap) / size;
2305         nlimit = blocksize / size;
2306         if (limit == nlimit)
2307                 limit--;
2308         return limit;
2309 }