Whamcloud - gitweb
LU-6698 kernel: kernel update RHEL 6.6 [2.6.32-504.23.4.el6]
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150
151 #include <ldiskfs/ldiskfs.h>
152 #include <ldiskfs/xattr.h>
153 #undef ENTRY
154
155 #include "osd_internal.h"
156
157 #include <ldiskfs/acl.h>
158
159 /*
160  * List of all registered formats.
161  *
162  * No locking. Callers synchronize.
163  */
164 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
165
166 void iam_format_register(struct iam_format *fmt)
167 {
168         list_add(&fmt->if_linkage, &iam_formats);
169 }
170
171 static struct buffer_head *
172 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
173 {
174         struct inode *inode = c->ic_object;
175         struct iam_idle_head *head;
176         struct buffer_head *bh;
177         int err;
178
179         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
180
181         if (blk == 0)
182                 return NULL;
183
184         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
185         if (bh == NULL) {
186                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
187                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
188                 c->ic_idle_failed = 1;
189                 return ERR_PTR(err);
190         }
191
192         head = (struct iam_idle_head *)(bh->b_data);
193         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
194                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
195                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
196                        le16_to_cpu(head->iih_magic));
197                 brelse(bh);
198                 c->ic_idle_failed = 1;
199                 return ERR_PTR(-EBADF);
200         }
201
202         return bh;
203 }
204
205 /*
206  * Determine format of given container. This is done by scanning list of
207  * registered formats and calling ->if_guess() method of each in turn.
208  */
209 static int iam_format_guess(struct iam_container *c)
210 {
211         int result;
212         struct iam_format *fmt;
213
214         /*
215          * XXX temporary initialization hook.
216          */
217         {
218                 static int initialized = 0;
219
220                 if (!initialized) {
221                         iam_lvar_format_init();
222                         iam_lfix_format_init();
223                         initialized = 1;
224                 }
225         }
226
227         result = -ENOENT;
228         list_for_each_entry(fmt, &iam_formats, if_linkage) {
229                 result = fmt->if_guess(c);
230                 if (result == 0)
231                         break;
232         }
233
234         if (result == 0) {
235                 struct buffer_head *bh;
236                 __u32 *idle_blocks;
237
238                 LASSERT(c->ic_root_bh != NULL);
239
240                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
241                                         c->ic_descr->id_root_gap +
242                                         sizeof(struct dx_countlimit));
243                 mutex_lock(&c->ic_idle_mutex);
244                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
245                 if (bh != NULL && IS_ERR(bh))
246                         result = PTR_ERR(bh);
247                 else
248                         c->ic_idle_bh = bh;
249                 mutex_unlock(&c->ic_idle_mutex);
250         }
251
252         return result;
253 }
254
255 /*
256  * Initialize container @c.
257  */
258 int iam_container_init(struct iam_container *c,
259                        struct iam_descr *descr, struct inode *inode)
260 {
261         memset(c, 0, sizeof *c);
262         c->ic_descr  = descr;
263         c->ic_object = inode;
264         init_rwsem(&c->ic_sem);
265         dynlock_init(&c->ic_tree_lock);
266         mutex_init(&c->ic_idle_mutex);
267         return 0;
268 }
269
270 /*
271  * Determine container format.
272  */
273 int iam_container_setup(struct iam_container *c)
274 {
275         return iam_format_guess(c);
276 }
277
278 /*
279  * Finalize container @c, release all resources.
280  */
281 void iam_container_fini(struct iam_container *c)
282 {
283         brelse(c->ic_idle_bh);
284         c->ic_idle_bh = NULL;
285         brelse(c->ic_root_bh);
286         c->ic_root_bh = NULL;
287 }
288
289 void iam_path_init(struct iam_path *path, struct iam_container *c,
290                    struct iam_path_descr *pd)
291 {
292         memset(path, 0, sizeof *path);
293         path->ip_container = c;
294         path->ip_frame = path->ip_frames;
295         path->ip_data = pd;
296         path->ip_leaf.il_path = path;
297 }
298
299 static void iam_leaf_fini(struct iam_leaf *leaf);
300
301 void iam_path_release(struct iam_path *path)
302 {
303         int i;
304
305         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
306                 if (path->ip_frames[i].bh != NULL) {
307                         path->ip_frames[i].at_shifted = 0;
308                         brelse(path->ip_frames[i].bh);
309                         path->ip_frames[i].bh = NULL;
310                 }
311         }
312 }
313
314 void iam_path_fini(struct iam_path *path)
315 {
316         iam_leaf_fini(&path->ip_leaf);
317         iam_path_release(path);
318 }
319
320
321 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
322 {
323         int i;
324
325         path->ipc_hinfo = &path->ipc_hinfo_area;
326         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
327                 path->ipc_descr.ipd_key_scratch[i] =
328                         (struct iam_ikey *)&path->ipc_scratch[i];
329
330         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
331 }
332
333 void iam_path_compat_fini(struct iam_path_compat *path)
334 {
335         iam_path_fini(&path->ipc_path);
336 }
337
338 /*
339  * Helper function initializing iam_path_descr and its key scratch area.
340  */
341 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
342 {
343         struct iam_path_descr *ipd;
344         void *karea;
345         int i;
346
347         ipd = area;
348         karea = ipd + 1;
349         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
350                 ipd->ipd_key_scratch[i] = karea;
351         return ipd;
352 }
353
354 void iam_ipd_free(struct iam_path_descr *ipd)
355 {
356 }
357
358 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
359                   handle_t *h, struct buffer_head **bh)
360 {
361         int result = 0;
362
363         /* NB: it can be called by iam_lfix_guess() which is still at
364          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
365          * haven't been intialized yet.
366          * Also, we don't have this for IAM dir.
367          */
368         if (c->ic_root_bh != NULL &&
369             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
370                 get_bh(c->ic_root_bh);
371                 *bh = c->ic_root_bh;
372                 return 0;
373         }
374
375         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
376         if (*bh == NULL)
377                 result = -EIO;
378         return result;
379 }
380
381 /*
382  * Return pointer to current leaf record. Pointer is valid while corresponding
383  * leaf node is locked and pinned.
384  */
385 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
386 {
387         return iam_leaf_ops(leaf)->rec(leaf);
388 }
389
390 /*
391  * Return pointer to the current leaf key. This function returns pointer to
392  * the key stored in node.
393  *
394  * Caller should assume that returned pointer is only valid while leaf node is
395  * pinned and locked.
396  */
397 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
398 {
399         return iam_leaf_ops(leaf)->key(leaf);
400 }
401
402 static int iam_leaf_key_size(const struct iam_leaf *leaf)
403 {
404         return iam_leaf_ops(leaf)->key_size(leaf);
405 }
406
407 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
408                                       struct iam_ikey *key)
409 {
410         return iam_leaf_ops(leaf)->ikey(leaf, key);
411 }
412
413 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
414                            const struct iam_key *key)
415 {
416         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
417 }
418
419 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
420                           const struct iam_key *key)
421 {
422         return iam_leaf_ops(leaf)->key_eq(leaf, key);
423 }
424
425 #if LDISKFS_INVARIANT_ON
426 static int iam_leaf_check(struct iam_leaf *leaf);
427 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
428
429 static int iam_path_check(struct iam_path *p)
430 {
431         int i;
432         int result;
433         struct iam_frame *f;
434         struct iam_descr *param;
435
436         result = 1;
437         param = iam_path_descr(p);
438         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
439                 f = &p->ip_frames[i];
440                 if (f->bh != NULL) {
441                         result = dx_node_check(p, f);
442                         if (result)
443                                 result = !param->id_ops->id_node_check(p, f);
444                 }
445         }
446         if (result && p->ip_leaf.il_bh != NULL)
447                 result = iam_leaf_check(&p->ip_leaf);
448         if (result == 0) {
449                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
450         }
451         return result;
452 }
453 #endif
454
455 static int iam_leaf_load(struct iam_path *path)
456 {
457         iam_ptr_t block;
458         int err;
459         struct iam_container *c;
460         struct buffer_head   *bh;
461         struct iam_leaf      *leaf;
462         struct iam_descr     *descr;
463
464         c     = path->ip_container;
465         leaf  = &path->ip_leaf;
466         descr = iam_path_descr(path);
467         block = path->ip_frame->leaf;
468         if (block == 0) {
469                 /* XXX bug 11027 */
470                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
471                        (long unsigned)path->ip_frame->leaf,
472                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
473                        path->ip_frames[0].bh, path->ip_frames[1].bh,
474                        path->ip_frames[2].bh);
475         }
476         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
477         if (err == 0) {
478                 leaf->il_bh = bh;
479                 leaf->il_curidx = block;
480                 err = iam_leaf_ops(leaf)->init(leaf);
481                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
482         }
483         return err;
484 }
485
486 static void iam_unlock_htree(struct iam_container *ic,
487                              struct dynlock_handle *lh)
488 {
489         if (lh != NULL)
490                 dynlock_unlock(&ic->ic_tree_lock, lh);
491 }
492
493
494 static void iam_leaf_unlock(struct iam_leaf *leaf)
495 {
496         if (leaf->il_lock != NULL) {
497                 iam_unlock_htree(iam_leaf_container(leaf),
498                                  leaf->il_lock);
499                 do_corr(schedule());
500                 leaf->il_lock = NULL;
501         }
502 }
503
504 static void iam_leaf_fini(struct iam_leaf *leaf)
505 {
506         if (leaf->il_path != NULL) {
507                 iam_leaf_unlock(leaf);
508                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
509                 iam_leaf_ops(leaf)->fini(leaf);
510                 if (leaf->il_bh) {
511                         brelse(leaf->il_bh);
512                         leaf->il_bh = NULL;
513                         leaf->il_curidx = 0;
514                 }
515         }
516 }
517
518 static void iam_leaf_start(struct iam_leaf *folio)
519 {
520         iam_leaf_ops(folio)->start(folio);
521 }
522
523 void iam_leaf_next(struct iam_leaf *folio)
524 {
525         iam_leaf_ops(folio)->next(folio);
526 }
527
528 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
529                              const struct iam_rec *rec)
530 {
531         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
532 }
533
534 static void iam_rec_del(struct iam_leaf *leaf, int shift)
535 {
536         iam_leaf_ops(leaf)->rec_del(leaf, shift);
537 }
538
539 int iam_leaf_at_end(const struct iam_leaf *leaf)
540 {
541         return iam_leaf_ops(leaf)->at_end(leaf);
542 }
543
544 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
545                            iam_ptr_t nr)
546 {
547         iam_leaf_ops(l)->split(l, bh, nr);
548 }
549
550 static inline int iam_leaf_empty(struct iam_leaf *l)
551 {
552         return iam_leaf_ops(l)->leaf_empty(l);
553 }
554
555 int iam_leaf_can_add(const struct iam_leaf *l,
556                      const struct iam_key *k, const struct iam_rec *r)
557 {
558         return iam_leaf_ops(l)->can_add(l, k, r);
559 }
560
561 #if LDISKFS_INVARIANT_ON
562 static int iam_leaf_check(struct iam_leaf *leaf)
563 {
564         return 1;
565 #if 0
566         struct iam_lentry    *orig;
567         struct iam_path      *path;
568         struct iam_container *bag;
569         struct iam_ikey       *k0;
570         struct iam_ikey       *k1;
571         int result;
572         int first;
573
574         orig = leaf->il_at;
575         path = iam_leaf_path(leaf);
576         bag  = iam_leaf_container(leaf);
577
578         result = iam_leaf_ops(leaf)->init(leaf);
579         if (result != 0)
580                 return result;
581
582         first = 1;
583         iam_leaf_start(leaf);
584         k0 = iam_path_ikey(path, 0);
585         k1 = iam_path_ikey(path, 1);
586         while (!iam_leaf_at_end(leaf)) {
587                 iam_ikeycpy(bag, k0, k1);
588                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
589                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
590                         return 0;
591                 }
592                 first = 0;
593                 iam_leaf_next(leaf);
594         }
595         leaf->il_at = orig;
596         return 1;
597 #endif
598 }
599 #endif
600
601 static int iam_txn_dirty(handle_t *handle,
602                          struct iam_path *path, struct buffer_head *bh)
603 {
604         int result;
605
606         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
607         if (result != 0)
608                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
609         return result;
610 }
611
612 static int iam_txn_add(handle_t *handle,
613                        struct iam_path *path, struct buffer_head *bh)
614 {
615         int result;
616
617         result = ldiskfs_journal_get_write_access(handle, bh);
618         if (result != 0)
619                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
620         return result;
621 }
622
623 /***********************************************************************/
624 /* iterator interface                                                  */
625 /***********************************************************************/
626
627 static enum iam_it_state it_state(const struct iam_iterator *it)
628 {
629         return it->ii_state;
630 }
631
632 /*
633  * Helper function returning scratch key.
634  */
635 static struct iam_container *iam_it_container(const struct iam_iterator *it)
636 {
637         return it->ii_path.ip_container;
638 }
639
640 static inline int it_keycmp(const struct iam_iterator *it,
641                             const struct iam_key *k)
642 {
643         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
644 }
645
646 static inline int it_keyeq(const struct iam_iterator *it,
647                            const struct iam_key *k)
648 {
649         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
650 }
651
652 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
653 {
654         return iam_ikeycmp(it->ii_path.ip_container,
655                            iam_leaf_ikey(&it->ii_path.ip_leaf,
656                                          iam_path_ikey(&it->ii_path, 0)), ik);
657 }
658
659 static inline int it_at_rec(const struct iam_iterator *it)
660 {
661         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
662 }
663
664 static inline int it_before(const struct iam_iterator *it)
665 {
666         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
667 }
668
669 /*
670  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
671  * with exactly the same key as asked is found.
672  */
673 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
674 {
675         int result;
676
677         result = iam_it_get(it, k);
678         if (result > 0)
679                 result = 0;
680         else if (result == 0)
681                 /*
682                  * Return -ENOENT if cursor is located above record with a key
683                  * different from one specified, or in the empty leaf.
684                  *
685                  * XXX returning -ENOENT only works if iam_it_get() never
686                  * returns -ENOENT as a legitimate error.
687                  */
688                 result = -ENOENT;
689         return result;
690 }
691
692 void iam_container_write_lock(struct iam_container *ic)
693 {
694         down_write(&ic->ic_sem);
695 }
696
697 void iam_container_write_unlock(struct iam_container *ic)
698 {
699         up_write(&ic->ic_sem);
700 }
701
702 void iam_container_read_lock(struct iam_container *ic)
703 {
704         down_read(&ic->ic_sem);
705 }
706
707 void iam_container_read_unlock(struct iam_container *ic)
708 {
709         up_read(&ic->ic_sem);
710 }
711
712 /*
713  * Initialize iterator to IAM_IT_DETACHED state.
714  *
715  * postcondition: it_state(it) == IAM_IT_DETACHED
716  */
717 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
718                  struct iam_path_descr *pd)
719 {
720         memset(it, 0, sizeof *it);
721         it->ii_flags  = flags;
722         it->ii_state  = IAM_IT_DETACHED;
723         iam_path_init(&it->ii_path, c, pd);
724         return 0;
725 }
726
727 /*
728  * Finalize iterator and release all resources.
729  *
730  * precondition: it_state(it) == IAM_IT_DETACHED
731  */
732 void iam_it_fini(struct iam_iterator *it)
733 {
734         assert_corr(it_state(it) == IAM_IT_DETACHED);
735         iam_path_fini(&it->ii_path);
736 }
737
738 /*
739  * this locking primitives are used to protect parts
740  * of dir's htree. protection unit is block: leaf or index
741  */
742 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
743                                              unsigned long value,
744                                              enum dynlock_type lt)
745 {
746         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
747 }
748
749 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
750 {
751         struct iam_frame *f;
752
753         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
754                 do_corr(schedule());
755                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
756                 if (*lh == NULL)
757                         return -ENOMEM;
758         }
759         return 0;
760 }
761
762 /*
763  * Fast check for frame consistency.
764  */
765 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
766 {
767         struct iam_container *bag;
768         struct iam_entry *next;
769         struct iam_entry *last;
770         struct iam_entry *entries;
771         struct iam_entry *at;
772
773         bag     = path->ip_container;
774         at      = frame->at;
775         entries = frame->entries;
776         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
777
778         if (unlikely(at > last))
779                 return -EAGAIN;
780
781         if (unlikely(dx_get_block(path, at) != frame->leaf))
782                 return -EAGAIN;
783
784         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
785                                  path->ip_ikey_target) > 0))
786                 return -EAGAIN;
787
788         next = iam_entry_shift(path, at, +1);
789         if (next <= last) {
790                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
791                                          path->ip_ikey_target) <= 0))
792                         return -EAGAIN;
793         }
794         return 0;
795 }
796
797 int dx_index_is_compat(struct iam_path *path)
798 {
799         return iam_path_descr(path) == NULL;
800 }
801
802 /*
803  * dx_find_position
804  *
805  * search position of specified hash in index
806  *
807  */
808
809 static struct iam_entry *iam_find_position(struct iam_path *path,
810                                            struct iam_frame *frame)
811 {
812         int count;
813         struct iam_entry *p;
814         struct iam_entry *q;
815         struct iam_entry *m;
816
817         count = dx_get_count(frame->entries);
818         assert_corr(count && count <= dx_get_limit(frame->entries));
819         p = iam_entry_shift(path, frame->entries,
820                             dx_index_is_compat(path) ? 1 : 2);
821         q = iam_entry_shift(path, frame->entries, count - 1);
822         while (p <= q) {
823                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
824                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
825                                 path->ip_ikey_target) > 0)
826                         q = iam_entry_shift(path, m, -1);
827                 else
828                         p = iam_entry_shift(path, m, +1);
829         }
830         return iam_entry_shift(path, p, -1);
831 }
832
833
834
835 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
836 {
837         return dx_get_block(path, iam_find_position(path, frame));
838 }
839
840 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
841                     const struct iam_ikey *key, iam_ptr_t ptr)
842 {
843         struct iam_entry *entries = frame->entries;
844         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
845         int count = dx_get_count(entries);
846
847         /*
848          * Unfortunately we cannot assert this, as this function is sometimes
849          * called by VFS under i_sem and without pdirops lock.
850          */
851         assert_corr(1 || iam_frame_is_locked(path, frame));
852         assert_corr(count < dx_get_limit(entries));
853         assert_corr(frame->at < iam_entry_shift(path, entries, count));
854         assert_inv(dx_node_check(path, frame));
855
856         memmove(iam_entry_shift(path, new, 1), new,
857                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
858         dx_set_ikey(path, new, key);
859         dx_set_block(path, new, ptr);
860         dx_set_count(entries, count + 1);
861         assert_inv(dx_node_check(path, frame));
862 }
863
864 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
865                          const struct iam_ikey *key, iam_ptr_t ptr)
866 {
867         iam_lock_bh(frame->bh);
868         iam_insert_key(path, frame, key, ptr);
869         iam_unlock_bh(frame->bh);
870 }
871 /*
872  * returns 0 if path was unchanged, -EAGAIN otherwise.
873  */
874 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
875 {
876         int equal;
877
878         iam_lock_bh(frame->bh);
879         equal = iam_check_fast(path, frame) == 0 ||
880                 frame->leaf == iam_find_ptr(path, frame);
881         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
882         iam_unlock_bh(frame->bh);
883
884         return equal ? 0 : -EAGAIN;
885 }
886
887 static int iam_lookup_try(struct iam_path *path)
888 {
889         u32 ptr;
890         int err = 0;
891         int i;
892
893         struct iam_descr *param;
894         struct iam_frame *frame;
895         struct iam_container *c;
896
897         param = iam_path_descr(path);
898         c = path->ip_container;
899
900         ptr = param->id_ops->id_root_ptr(c);
901         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
902              ++frame, ++i) {
903                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
904                                                   &frame->bh);
905                 do_corr(schedule());
906
907                 iam_lock_bh(frame->bh);
908                 /*
909                  * node must be initialized under bh lock because concurrent
910                  * creation procedure may change it and iam_lookup_try() will
911                  * see obsolete tree height. -bzzz
912                  */
913                 if (err != 0)
914                         break;
915
916                 if (LDISKFS_INVARIANT_ON) {
917                         err = param->id_ops->id_node_check(path, frame);
918                         if (err != 0)
919                                 break;
920                 }
921
922                 err = param->id_ops->id_node_load(path, frame);
923                 if (err != 0)
924                         break;
925
926                 assert_inv(dx_node_check(path, frame));
927                 /*
928                  * splitting may change root index block and move hash we're
929                  * looking for into another index block so, we have to check
930                  * this situation and repeat from begining if path got changed
931                  * -bzzz
932                  */
933                 if (i > 0) {
934                         err = iam_check_path(path, frame - 1);
935                         if (err != 0)
936                                 break;
937                 }
938
939                 frame->at = iam_find_position(path, frame);
940                 frame->curidx = ptr;
941                 frame->leaf = ptr = dx_get_block(path, frame->at);
942
943                 iam_unlock_bh(frame->bh);
944                 do_corr(schedule());
945         }
946         if (err != 0)
947                 iam_unlock_bh(frame->bh);
948         path->ip_frame = --frame;
949         return err;
950 }
951
952 static int __iam_path_lookup(struct iam_path *path)
953 {
954         int err;
955         int i;
956
957         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
958                 assert(path->ip_frames[i].bh == NULL);
959
960         do {
961                 err = iam_lookup_try(path);
962                 do_corr(schedule());
963                 if (err != 0)
964                         iam_path_fini(path);
965         } while (err == -EAGAIN);
966
967         return err;
968 }
969
970 /*
971  * returns 0 if path was unchanged, -EAGAIN otherwise.
972  */
973 static int iam_check_full_path(struct iam_path *path, int search)
974 {
975         struct iam_frame *bottom;
976         struct iam_frame *scan;
977         int i;
978         int result;
979
980         do_corr(schedule());
981
982         for (bottom = path->ip_frames, i = 0;
983              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
984                 ; /* find last filled in frame */
985         }
986
987         /*
988          * Lock frames, bottom to top.
989          */
990         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
991                 iam_lock_bh(scan->bh);
992         /*
993          * Check them top to bottom.
994          */
995         result = 0;
996         for (scan = path->ip_frames; scan < bottom; ++scan) {
997                 struct iam_entry *pos;
998
999                 if (search) {
1000                         if (iam_check_fast(path, scan) == 0)
1001                                 continue;
1002
1003                         pos = iam_find_position(path, scan);
1004                         if (scan->leaf != dx_get_block(path, pos)) {
1005                                 result = -EAGAIN;
1006                                 break;
1007                         }
1008                         scan->at = pos;
1009                 } else {
1010                         pos = iam_entry_shift(path, scan->entries,
1011                                               dx_get_count(scan->entries) - 1);
1012                         if (scan->at > pos ||
1013                             scan->leaf != dx_get_block(path, scan->at)) {
1014                                 result = -EAGAIN;
1015                                 break;
1016                         }
1017                 }
1018         }
1019
1020         /*
1021          * Unlock top to bottom.
1022          */
1023         for (scan = path->ip_frames; scan < bottom; ++scan)
1024                 iam_unlock_bh(scan->bh);
1025         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1026         do_corr(schedule());
1027
1028         return result;
1029 }
1030
1031
1032 /*
1033  * Performs path lookup and returns with found leaf (if any) locked by htree
1034  * lock.
1035  */
1036 static int iam_lookup_lock(struct iam_path *path,
1037                            struct dynlock_handle **dl, enum dynlock_type lt)
1038 {
1039         int result;
1040
1041         while ((result = __iam_path_lookup(path)) == 0) {
1042                 do_corr(schedule());
1043                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1044                                      lt);
1045                 if (*dl == NULL) {
1046                         iam_path_fini(path);
1047                         result = -ENOMEM;
1048                         break;
1049                 }
1050                 do_corr(schedule());
1051                 /*
1052                  * while locking leaf we just found may get split so we need
1053                  * to check this -bzzz
1054                  */
1055                 if (iam_check_full_path(path, 1) == 0)
1056                         break;
1057                 iam_unlock_htree(path->ip_container, *dl);
1058                 *dl = NULL;
1059                 iam_path_fini(path);
1060         }
1061         return result;
1062 }
1063 /*
1064  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1065  * node.
1066  */
1067 static int iam_path_lookup(struct iam_path *path, int index)
1068 {
1069         struct iam_container *c;
1070         struct iam_leaf  *leaf;
1071         int result;
1072
1073         c = path->ip_container;
1074         leaf = &path->ip_leaf;
1075         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1076         assert_inv(iam_path_check(path));
1077         do_corr(schedule());
1078         if (result == 0) {
1079                 result = iam_leaf_load(path);
1080                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1081                 if (result == 0) {
1082                         do_corr(schedule());
1083                         if (index)
1084                                 result = iam_leaf_ops(leaf)->
1085                                         ilookup(leaf, path->ip_ikey_target);
1086                         else
1087                                 result = iam_leaf_ops(leaf)->
1088                                         lookup(leaf, path->ip_key_target);
1089                         do_corr(schedule());
1090                 }
1091                 if (result < 0)
1092                         iam_leaf_unlock(leaf);
1093         }
1094         return result;
1095 }
1096
1097 /*
1098  * Common part of iam_it_{i,}get().
1099  */
1100 static int __iam_it_get(struct iam_iterator *it, int index)
1101 {
1102         int result;
1103         assert_corr(it_state(it) == IAM_IT_DETACHED);
1104
1105         result = iam_path_lookup(&it->ii_path, index);
1106         if (result >= 0) {
1107                 int collision;
1108
1109                 collision = result & IAM_LOOKUP_LAST;
1110                 switch (result & ~IAM_LOOKUP_LAST) {
1111                 case IAM_LOOKUP_EXACT:
1112                         result = +1;
1113                         it->ii_state = IAM_IT_ATTACHED;
1114                         break;
1115                 case IAM_LOOKUP_OK:
1116                         result = 0;
1117                         it->ii_state = IAM_IT_ATTACHED;
1118                         break;
1119                 case IAM_LOOKUP_BEFORE:
1120                 case IAM_LOOKUP_EMPTY:
1121                         result = 0;
1122                         it->ii_state = IAM_IT_SKEWED;
1123                         break;
1124                 default:
1125                         assert(0);
1126                 }
1127                 result |= collision;
1128         }
1129         /*
1130          * See iam_it_get_exact() for explanation.
1131          */
1132         assert_corr(result != -ENOENT);
1133         return result;
1134 }
1135
1136 /*
1137  * Correct hash, but not the same key was found, iterate through hash
1138  * collision chain, looking for correct record.
1139  */
1140 static int iam_it_collision(struct iam_iterator *it)
1141 {
1142         int result;
1143
1144         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1145
1146         while ((result = iam_it_next(it)) == 0) {
1147                 do_corr(schedule());
1148                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1149                         return -ENOENT;
1150                 if (it_keyeq(it, it->ii_path.ip_key_target))
1151                         return 0;
1152         }
1153         return result;
1154 }
1155
1156 /*
1157  * Attach iterator. After successful completion, @it points to record with
1158  * least key not larger than @k.
1159  *
1160  * Return value: 0: positioned on existing record,
1161  *             +ve: exact position found,
1162  *             -ve: error.
1163  *
1164  * precondition:  it_state(it) == IAM_IT_DETACHED
1165  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1166  *                     it_keycmp(it, k) <= 0)
1167  */
1168 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1169 {
1170         int result;
1171         assert_corr(it_state(it) == IAM_IT_DETACHED);
1172
1173         it->ii_path.ip_ikey_target = NULL;
1174         it->ii_path.ip_key_target  = k;
1175
1176         result = __iam_it_get(it, 0);
1177
1178         if (result == IAM_LOOKUP_LAST) {
1179                 result = iam_it_collision(it);
1180                 if (result != 0) {
1181                         iam_it_put(it);
1182                         iam_it_fini(it);
1183                         result = __iam_it_get(it, 0);
1184                 } else
1185                         result = +1;
1186         }
1187         if (result > 0)
1188                 result &= ~IAM_LOOKUP_LAST;
1189
1190         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1191         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1192                          it_keycmp(it, k) <= 0));
1193         return result;
1194 }
1195
1196 /*
1197  * Attach iterator by index key.
1198  */
1199 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1200 {
1201         assert_corr(it_state(it) == IAM_IT_DETACHED);
1202
1203         it->ii_path.ip_ikey_target = k;
1204         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1205 }
1206
1207 /*
1208  * Attach iterator, and assure it points to the record (not skewed).
1209  *
1210  * Return value: 0: positioned on existing record,
1211  *             +ve: exact position found,
1212  *             -ve: error.
1213  *
1214  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1215  *                !(it->ii_flags&IAM_IT_WRITE)
1216  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1217  */
1218 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1219 {
1220         int result;
1221         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1222                     !(it->ii_flags&IAM_IT_WRITE));
1223         result = iam_it_get(it, k);
1224         if (result == 0) {
1225                 if (it_state(it) != IAM_IT_ATTACHED) {
1226                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1227                         result = iam_it_next(it);
1228                 }
1229         }
1230         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1231         return result;
1232 }
1233
1234 /*
1235  * Duplicates iterator.
1236  *
1237  * postcondition: it_state(dst) == it_state(src) &&
1238  *                iam_it_container(dst) == iam_it_container(src) &&
1239  *                dst->ii_flags = src->ii_flags &&
1240  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1241  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1242  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1243  */
1244 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1245 {
1246         dst->ii_flags     = src->ii_flags;
1247         dst->ii_state     = src->ii_state;
1248         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1249         /*
1250          * XXX: duplicate lock.
1251          */
1252         assert_corr(it_state(dst) == it_state(src));
1253         assert_corr(iam_it_container(dst) == iam_it_container(src));
1254         assert_corr(dst->ii_flags = src->ii_flags);
1255         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1256                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1257                     iam_it_key_get(dst) == iam_it_key_get(src)));
1258
1259 }
1260
1261 /*
1262  * Detach iterator. Does nothing it detached state.
1263  *
1264  * postcondition: it_state(it) == IAM_IT_DETACHED
1265  */
1266 void iam_it_put(struct iam_iterator *it)
1267 {
1268         if (it->ii_state != IAM_IT_DETACHED) {
1269                 it->ii_state = IAM_IT_DETACHED;
1270                 iam_leaf_fini(&it->ii_path.ip_leaf);
1271         }
1272 }
1273
1274 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1275                                         struct iam_ikey *ikey);
1276
1277
1278 /*
1279  * This function increments the frame pointer to search the next leaf
1280  * block, and reads in the necessary intervening nodes if the search
1281  * should be necessary.  Whether or not the search is necessary is
1282  * controlled by the hash parameter.  If the hash value is even, then
1283  * the search is only continued if the next block starts with that
1284  * hash value.  This is used if we are searching for a specific file.
1285  *
1286  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1287  *
1288  * This function returns 1 if the caller should continue to search,
1289  * or 0 if it should not.  If there is an error reading one of the
1290  * index blocks, it will a negative error code.
1291  *
1292  * If start_hash is non-null, it will be filled in with the starting
1293  * hash of the next page.
1294  */
1295 static int iam_htree_advance(struct inode *dir, __u32 hash,
1296                               struct iam_path *path, __u32 *start_hash,
1297                               int compat)
1298 {
1299         struct iam_frame *p;
1300         struct buffer_head *bh;
1301         int err, num_frames = 0;
1302         __u32 bhash;
1303
1304         p = path->ip_frame;
1305         /*
1306          * Find the next leaf page by incrementing the frame pointer.
1307          * If we run out of entries in the interior node, loop around and
1308          * increment pointer in the parent node.  When we break out of
1309          * this loop, num_frames indicates the number of interior
1310          * nodes need to be read.
1311          */
1312         while (1) {
1313                 do_corr(schedule());
1314                 iam_lock_bh(p->bh);
1315                 if (p->at_shifted)
1316                         p->at_shifted = 0;
1317                 else
1318                         p->at = iam_entry_shift(path, p->at, +1);
1319                 if (p->at < iam_entry_shift(path, p->entries,
1320                                             dx_get_count(p->entries))) {
1321                         p->leaf = dx_get_block(path, p->at);
1322                         iam_unlock_bh(p->bh);
1323                         break;
1324                 }
1325                 iam_unlock_bh(p->bh);
1326                 if (p == path->ip_frames)
1327                         return 0;
1328                 num_frames++;
1329                 --p;
1330         }
1331
1332         if (compat) {
1333                 /*
1334                  * Htree hash magic.
1335                  */
1336         /*
1337          * If the hash is 1, then continue only if the next page has a
1338          * continuation hash of any value.  This is used for readdir
1339          * handling.  Otherwise, check to see if the hash matches the
1340          * desired contiuation hash.  If it doesn't, return since
1341          * there's no point to read in the successive index pages.
1342          */
1343                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1344         if (start_hash)
1345                 *start_hash = bhash;
1346         if ((hash & 1) == 0) {
1347                 if ((bhash & ~1) != hash)
1348                         return 0;
1349         }
1350         }
1351         /*
1352          * If the hash is HASH_NB_ALWAYS, we always go to the next
1353          * block so no check is necessary
1354          */
1355         while (num_frames--) {
1356                 iam_ptr_t idx;
1357
1358                 do_corr(schedule());
1359                 iam_lock_bh(p->bh);
1360                 idx = p->leaf = dx_get_block(path, p->at);
1361                 iam_unlock_bh(p->bh);
1362                 err = iam_path_descr(path)->id_ops->
1363                         id_node_read(path->ip_container, idx, NULL, &bh);
1364                 if (err != 0)
1365                         return err; /* Failure */
1366                 ++p;
1367                 brelse(p->bh);
1368                 assert_corr(p->bh != bh);
1369                 p->bh = bh;
1370                 p->entries = dx_node_get_entries(path, p);
1371                 p->at = iam_entry_shift(path, p->entries, !compat);
1372                 assert_corr(p->curidx != idx);
1373                 p->curidx = idx;
1374                 iam_lock_bh(p->bh);
1375                 assert_corr(p->leaf != dx_get_block(path, p->at));
1376                 p->leaf = dx_get_block(path, p->at);
1377                 iam_unlock_bh(p->bh);
1378                 assert_inv(dx_node_check(path, p));
1379         }
1380         return 1;
1381 }
1382
1383
1384 static inline int iam_index_advance(struct iam_path *path)
1385 {
1386         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1387 }
1388
1389 static void iam_unlock_array(struct iam_container *ic,
1390                              struct dynlock_handle **lh)
1391 {
1392         int i;
1393
1394         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1395                 if (*lh != NULL) {
1396                         iam_unlock_htree(ic, *lh);
1397                         *lh = NULL;
1398                 }
1399         }
1400 }
1401 /*
1402  * Advance index part of @path to point to the next leaf. Returns 1 on
1403  * success, 0, when end of container was reached. Leaf node is locked.
1404  */
1405 int iam_index_next(struct iam_container *c, struct iam_path *path)
1406 {
1407         iam_ptr_t cursor;
1408         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1409         int result;
1410         struct inode *object;
1411
1412         /*
1413          * Locking for iam_index_next()... is to be described.
1414          */
1415
1416         object = c->ic_object;
1417         cursor = path->ip_frame->leaf;
1418
1419         while (1) {
1420                 result = iam_index_lock(path, lh);
1421                 do_corr(schedule());
1422                 if (result < 0)
1423                         break;
1424
1425                 result = iam_check_full_path(path, 0);
1426                 if (result == 0 && cursor == path->ip_frame->leaf) {
1427                         result = iam_index_advance(path);
1428
1429                         assert_corr(result == 0 ||
1430                                     cursor != path->ip_frame->leaf);
1431                         break;
1432                 }
1433                 do {
1434                         iam_unlock_array(c, lh);
1435
1436                         iam_path_release(path);
1437                         do_corr(schedule());
1438
1439                         result = __iam_path_lookup(path);
1440                         if (result < 0)
1441                                 break;
1442
1443                         while (path->ip_frame->leaf != cursor) {
1444                                 do_corr(schedule());
1445
1446                                 result = iam_index_lock(path, lh);
1447                                 do_corr(schedule());
1448                                 if (result < 0)
1449                                         break;
1450
1451                                 result = iam_check_full_path(path, 0);
1452                                 if (result != 0)
1453                                         break;
1454
1455                                 result = iam_index_advance(path);
1456                                 if (result == 0) {
1457                                         CERROR("cannot find cursor : %u\n",
1458                                                 cursor);
1459                                         result = -EIO;
1460                                 }
1461                                 if (result < 0)
1462                                         break;
1463                                 result = iam_check_full_path(path, 0);
1464                                 if (result != 0)
1465                                         break;
1466                                 iam_unlock_array(c, lh);
1467                         }
1468                 } while (result == -EAGAIN);
1469                 if (result < 0)
1470                         break;
1471         }
1472         iam_unlock_array(c, lh);
1473         return result;
1474 }
1475
1476 /*
1477  * Move iterator one record right.
1478  *
1479  * Return value: 0: success,
1480  *              +1: end of container reached
1481  *             -ve: error
1482  *
1483  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1484  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1485  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1486  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1487  */
1488 int iam_it_next(struct iam_iterator *it)
1489 {
1490         int result;
1491         struct iam_path      *path;
1492         struct iam_leaf      *leaf;
1493         do_corr(struct iam_ikey *ik_orig);
1494
1495         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1496         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1497                     it_state(it) == IAM_IT_SKEWED);
1498
1499         path = &it->ii_path;
1500         leaf = &path->ip_leaf;
1501
1502         assert_corr(iam_leaf_is_locked(leaf));
1503
1504         result = 0;
1505         do_corr(ik_orig = it_at_rec(it) ?
1506                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1507         if (it_before(it)) {
1508                 assert_corr(!iam_leaf_at_end(leaf));
1509                 it->ii_state = IAM_IT_ATTACHED;
1510         } else {
1511                 if (!iam_leaf_at_end(leaf))
1512                         /* advance within leaf node */
1513                         iam_leaf_next(leaf);
1514                 /*
1515                  * multiple iterations may be necessary due to empty leaves.
1516                  */
1517                 while (result == 0 && iam_leaf_at_end(leaf)) {
1518                         do_corr(schedule());
1519                         /* advance index portion of the path */
1520                         result = iam_index_next(iam_it_container(it), path);
1521                         assert_corr(iam_leaf_is_locked(leaf));
1522                         if (result == 1) {
1523                                 struct dynlock_handle *lh;
1524                                 lh = iam_lock_htree(iam_it_container(it),
1525                                                     path->ip_frame->leaf,
1526                                                     DLT_WRITE);
1527                                 if (lh != NULL) {
1528                                         iam_leaf_fini(leaf);
1529                                         leaf->il_lock = lh;
1530                                         result = iam_leaf_load(path);
1531                                         if (result == 0)
1532                                                 iam_leaf_start(leaf);
1533                                 } else
1534                                         result = -ENOMEM;
1535                         } else if (result == 0)
1536                                 /* end of container reached */
1537                                 result = +1;
1538                         if (result != 0)
1539                                 iam_it_put(it);
1540                 }
1541                 if (result == 0)
1542                         it->ii_state = IAM_IT_ATTACHED;
1543         }
1544         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1545         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1546         assert_corr(ergo(result == 0 && ik_orig != NULL,
1547                          it_ikeycmp(it, ik_orig) >= 0));
1548         return result;
1549 }
1550
1551 /*
1552  * Return pointer to the record under iterator.
1553  *
1554  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1555  * postcondition: it_state(it) == IAM_IT_ATTACHED
1556  */
1557 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1558 {
1559         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1560         assert_corr(it_at_rec(it));
1561         return iam_leaf_rec(&it->ii_path.ip_leaf);
1562 }
1563
1564 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1565 {
1566         struct iam_leaf *folio;
1567
1568         folio = &it->ii_path.ip_leaf;
1569         iam_leaf_ops(folio)->rec_set(folio, r);
1570 }
1571
1572 /*
1573  * Replace contents of record under iterator.
1574  *
1575  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1576  *                it->ii_flags&IAM_IT_WRITE
1577  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1578  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1579  */
1580 int iam_it_rec_set(handle_t *h,
1581                    struct iam_iterator *it, const struct iam_rec *r)
1582 {
1583         int result;
1584         struct iam_path *path;
1585         struct buffer_head *bh;
1586
1587         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1588                     it->ii_flags&IAM_IT_WRITE);
1589         assert_corr(it_at_rec(it));
1590
1591         path = &it->ii_path;
1592         bh   = path->ip_leaf.il_bh;
1593         result = iam_txn_add(h, path, bh);
1594         if (result == 0) {
1595                 iam_it_reccpy(it, r);
1596                 result = iam_txn_dirty(h, path, bh);
1597         }
1598         return result;
1599 }
1600
1601 /*
1602  * Return pointer to the index key under iterator.
1603  *
1604  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1605  *                it_state(it) == IAM_IT_SKEWED
1606  */
1607 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1608                                         struct iam_ikey *ikey)
1609 {
1610         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1611                     it_state(it) == IAM_IT_SKEWED);
1612         assert_corr(it_at_rec(it));
1613         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1614 }
1615
1616 /*
1617  * Return pointer to the key under iterator.
1618  *
1619  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1620  *                it_state(it) == IAM_IT_SKEWED
1621  */
1622 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1623 {
1624         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1625                     it_state(it) == IAM_IT_SKEWED);
1626         assert_corr(it_at_rec(it));
1627         return iam_leaf_key(&it->ii_path.ip_leaf);
1628 }
1629
1630 /*
1631  * Return size of key under iterator (in bytes)
1632  *
1633  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1634  *                it_state(it) == IAM_IT_SKEWED
1635  */
1636 int iam_it_key_size(const struct iam_iterator *it)
1637 {
1638         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1639                     it_state(it) == IAM_IT_SKEWED);
1640         assert_corr(it_at_rec(it));
1641         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1642 }
1643
1644 static struct buffer_head *
1645 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1646 {
1647         struct inode *inode = c->ic_object;
1648         struct buffer_head *bh = NULL;
1649         struct iam_idle_head *head;
1650         struct buffer_head *idle;
1651         __u32 *idle_blocks;
1652         __u16 count;
1653
1654         if (c->ic_idle_bh == NULL)
1655                 goto newblock;
1656
1657         mutex_lock(&c->ic_idle_mutex);
1658         if (unlikely(c->ic_idle_bh == NULL)) {
1659                 mutex_unlock(&c->ic_idle_mutex);
1660                 goto newblock;
1661         }
1662
1663         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1664         count = le16_to_cpu(head->iih_count);
1665         if (count > 0) {
1666                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1667                 if (*e != 0)
1668                         goto fail;
1669
1670                 --count;
1671                 *b = le32_to_cpu(head->iih_blks[count]);
1672                 head->iih_count = cpu_to_le16(count);
1673                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1674                 if (*e != 0)
1675                         goto fail;
1676
1677                 mutex_unlock(&c->ic_idle_mutex);
1678                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1679                 if (bh == NULL)
1680                         return NULL;
1681                 goto got;
1682         }
1683
1684         /* The block itself which contains the iam_idle_head is
1685          * also an idle block, and can be used as the new node. */
1686         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1687                                 c->ic_descr->id_root_gap +
1688                                 sizeof(struct dx_countlimit));
1689         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1690         if (*e != 0)
1691                 goto fail;
1692
1693         *b = le32_to_cpu(*idle_blocks);
1694         iam_lock_bh(c->ic_root_bh);
1695         *idle_blocks = head->iih_next;
1696         iam_unlock_bh(c->ic_root_bh);
1697         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1698         if (*e != 0) {
1699                 iam_lock_bh(c->ic_root_bh);
1700                 *idle_blocks = cpu_to_le32(*b);
1701                 iam_unlock_bh(c->ic_root_bh);
1702                 goto fail;
1703         }
1704
1705         bh = c->ic_idle_bh;
1706         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1707         if (idle != NULL && IS_ERR(idle)) {
1708                 *e = PTR_ERR(idle);
1709                 c->ic_idle_bh = NULL;
1710                 brelse(bh);
1711                 goto fail;
1712         }
1713
1714         c->ic_idle_bh = idle;
1715         mutex_unlock(&c->ic_idle_mutex);
1716
1717 got:
1718         /* get write access for the found buffer head */
1719         *e = ldiskfs_journal_get_write_access(h, bh);
1720         if (*e != 0) {
1721                 brelse(bh);
1722                 bh = NULL;
1723                 ldiskfs_std_error(inode->i_sb, *e);
1724         } else {
1725                 /* Clear the reused node as new node does. */
1726                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1727                 set_buffer_uptodate(bh);
1728         }
1729         return bh;
1730
1731 newblock:
1732         bh = osd_ldiskfs_append(h, inode, b, e);
1733         return bh;
1734
1735 fail:
1736         mutex_unlock(&c->ic_idle_mutex);
1737         ldiskfs_std_error(inode->i_sb, *e);
1738         return NULL;
1739 }
1740
1741 /*
1742  * Insertion of new record. Interaction with jbd during non-trivial case (when
1743  * split happens) is as following:
1744  *
1745  *  - new leaf node is involved into transaction by iam_new_node();
1746  *
1747  *  - old leaf node is involved into transaction by iam_add_rec();
1748  *
1749  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1750  *
1751  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1752  *  iam_new_leaf();
1753  *
1754  *  - split index nodes are involved into transaction and marked dirty by
1755  *  split_index_node().
1756  *
1757  *  - "safe" index node, which is no split, but where new pointer is inserted
1758  *  is involved into transaction and marked dirty by split_index_node().
1759  *
1760  *  - index node where pointer to new leaf is inserted is involved into
1761  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1762  *
1763  *  - inode is marked dirty by iam_add_rec().
1764  *
1765  */
1766
1767 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1768 {
1769         int err;
1770         iam_ptr_t blknr;
1771         struct buffer_head   *new_leaf;
1772         struct buffer_head   *old_leaf;
1773         struct iam_container *c;
1774         struct inode         *obj;
1775         struct iam_path      *path;
1776
1777         assert_inv(iam_leaf_check(leaf));
1778
1779         c = iam_leaf_container(leaf);
1780         path = leaf->il_path;
1781
1782         obj = c->ic_object;
1783         new_leaf = iam_new_node(handle, c, &blknr, &err);
1784         do_corr(schedule());
1785         if (new_leaf != NULL) {
1786                 struct dynlock_handle *lh;
1787
1788                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1789                 do_corr(schedule());
1790                 if (lh != NULL) {
1791                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1792                         do_corr(schedule());
1793                         old_leaf = leaf->il_bh;
1794                         iam_leaf_split(leaf, &new_leaf, blknr);
1795                         if (old_leaf != leaf->il_bh) {
1796                                 /*
1797                                  * Switched to the new leaf.
1798                                  */
1799                                 iam_leaf_unlock(leaf);
1800                                 leaf->il_lock = lh;
1801                                 path->ip_frame->leaf = blknr;
1802                         } else
1803                                 iam_unlock_htree(path->ip_container, lh);
1804                         do_corr(schedule());
1805                         err = iam_txn_dirty(handle, path, new_leaf);
1806                         brelse(new_leaf);
1807                         if (err == 0)
1808                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1809                         do_corr(schedule());
1810                 } else
1811                         err = -ENOMEM;
1812         }
1813         assert_inv(iam_leaf_check(leaf));
1814         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1815         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1816         return err;
1817 }
1818
1819 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1820 {
1821         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1822 }
1823
1824 static int iam_shift_entries(struct iam_path *path,
1825                          struct iam_frame *frame, unsigned count,
1826                          struct iam_entry *entries, struct iam_entry *entries2,
1827                          u32 newblock)
1828 {
1829         unsigned count1;
1830         unsigned count2;
1831         int delta;
1832
1833         struct iam_frame *parent = frame - 1;
1834         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1835
1836         delta = dx_index_is_compat(path) ? 0 : +1;
1837
1838         count1 = count/2 + delta;
1839         count2 = count - count1;
1840         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1841
1842         dxtrace(printk("Split index %d/%d\n", count1, count2));
1843
1844         memcpy((char *) iam_entry_shift(path, entries2, delta),
1845                (char *) iam_entry_shift(path, entries, count1),
1846                count2 * iam_entry_size(path));
1847
1848         dx_set_count(entries2, count2 + delta);
1849         dx_set_limit(entries2, dx_node_limit(path));
1850
1851         /*
1852          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1853          * level index in root index, then we insert new index here and set
1854          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1855          * index w/o hash it looks for. the solution is to check root index
1856          * after we locked just founded 2nd level index -bzzz
1857          */
1858         iam_insert_key_lock(path, parent, pivot, newblock);
1859
1860         /*
1861          * now old and new 2nd level index blocks contain all pointers, so
1862          * dx_probe() may find it in the both.  it's OK -bzzz
1863          */
1864         iam_lock_bh(frame->bh);
1865         dx_set_count(entries, count1);
1866         iam_unlock_bh(frame->bh);
1867
1868         /*
1869          * now old 2nd level index block points to first half of leafs. it's
1870          * importand that dx_probe() must check root index block for changes
1871          * under dx_lock_bh(frame->bh) -bzzz
1872          */
1873
1874         return count1;
1875 }
1876
1877
1878 int split_index_node(handle_t *handle, struct iam_path *path,
1879                      struct dynlock_handle **lh)
1880 {
1881
1882         struct iam_entry *entries;   /* old block contents */
1883         struct iam_entry *entries2;  /* new block contents */
1884         struct iam_frame *frame, *safe;
1885         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1886         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1887         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1888         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1889         struct inode *dir = iam_path_obj(path);
1890         struct iam_descr *descr;
1891         int nr_splet;
1892         int i, err;
1893
1894         descr = iam_path_descr(path);
1895         /*
1896          * Algorithm below depends on this.
1897          */
1898         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1899
1900         frame = path->ip_frame;
1901         entries = frame->entries;
1902
1903         /*
1904          * Tall-tree handling: we might have to split multiple index blocks
1905          * all the way up to tree root. Tricky point here is error handling:
1906          * to avoid complicated undo/rollback we
1907          *
1908          *   - first allocate all necessary blocks
1909          *
1910          *   - insert pointers into them atomically.
1911          */
1912
1913         /*
1914          * Locking: leaf is already locked. htree-locks are acquired on all
1915          * index nodes that require split bottom-to-top, on the "safe" node,
1916          * and on all new nodes
1917          */
1918
1919         dxtrace(printk("using %u of %u node entries\n",
1920                        dx_get_count(entries), dx_get_limit(entries)));
1921
1922         /* What levels need split? */
1923         for (nr_splet = 0; frame >= path->ip_frames &&
1924              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1925              --frame, ++nr_splet) {
1926                 do_corr(schedule());
1927                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1928                         /*
1929                         CWARN(dir->i_sb, __FUNCTION__,
1930                                      "Directory index full!\n");
1931                                      */
1932                         err = -ENOSPC;
1933                         goto cleanup;
1934                 }
1935         }
1936
1937         safe = frame;
1938
1939         /*
1940          * Lock all nodes, bottom to top.
1941          */
1942         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1943                 do_corr(schedule());
1944                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1945                                          DLT_WRITE);
1946                 if (lock[i] == NULL) {
1947                         err = -ENOMEM;
1948                         goto cleanup;
1949                 }
1950         }
1951
1952         /*
1953          * Check for concurrent index modification.
1954          */
1955         err = iam_check_full_path(path, 1);
1956         if (err)
1957                 goto cleanup;
1958         /*
1959          * And check that the same number of nodes is to be split.
1960          */
1961         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1962              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1963              --frame, ++i) {
1964                 ;
1965         }
1966         if (i != nr_splet) {
1967                 err = -EAGAIN;
1968                 goto cleanup;
1969         }
1970
1971         /* Go back down, allocating blocks, locking them, and adding into
1972          * transaction... */
1973         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1974                 bh_new[i] = iam_new_node(handle, path->ip_container,
1975                                          &newblock[i], &err);
1976                 do_corr(schedule());
1977                 if (!bh_new[i] ||
1978                     descr->id_ops->id_node_init(path->ip_container,
1979                                                 bh_new[i], 0) != 0)
1980                         goto cleanup;
1981                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1982                                              DLT_WRITE);
1983                 if (new_lock[i] == NULL) {
1984                         err = -ENOMEM;
1985                         goto cleanup;
1986                 }
1987                 do_corr(schedule());
1988                 BUFFER_TRACE(frame->bh, "get_write_access");
1989                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1990                 if (err)
1991                         goto journal_error;
1992         }
1993         /* Add "safe" node to transaction too */
1994         if (safe + 1 != path->ip_frames) {
1995                 do_corr(schedule());
1996                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1997                 if (err)
1998                         goto journal_error;
1999         }
2000
2001         /* Go through nodes once more, inserting pointers */
2002         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
2003                 unsigned count;
2004                 int idx;
2005                 struct buffer_head *bh2;
2006                 struct buffer_head *bh;
2007
2008                 entries = frame->entries;
2009                 count = dx_get_count(entries);
2010                 idx = iam_entry_diff(path, frame->at, entries);
2011
2012                 bh2 = bh_new[i];
2013                 entries2 = dx_get_entries(path, bh2->b_data, 0);
2014
2015                 bh = frame->bh;
2016                 if (frame == path->ip_frames) {
2017                         /* splitting root node. Tricky point:
2018                          *
2019                          * In the "normal" B-tree we'd split root *and* add
2020                          * new root to the tree with pointers to the old root
2021                          * and its sibling (thus introducing two new nodes).
2022                          *
2023                          * In htree it's enough to add one node, because
2024                          * capacity of the root node is smaller than that of
2025                          * non-root one.
2026                          */
2027                         struct iam_frame *frames;
2028                         struct iam_entry *next;
2029
2030                         assert_corr(i == 0);
2031
2032                         do_corr(schedule());
2033
2034                         frames = path->ip_frames;
2035                         memcpy((char *) entries2, (char *) entries,
2036                                count * iam_entry_size(path));
2037                         dx_set_limit(entries2, dx_node_limit(path));
2038
2039                         /* Set up root */
2040                           iam_lock_bh(frame->bh);
2041                         next = descr->id_ops->id_root_inc(path->ip_container,
2042                                                           path, frame);
2043                         dx_set_block(path, next, newblock[0]);
2044                           iam_unlock_bh(frame->bh);
2045
2046                         do_corr(schedule());
2047                         /* Shift frames in the path */
2048                         memmove(frames + 2, frames + 1,
2049                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2050                         /* Add new access path frame */
2051                         frames[1].at = iam_entry_shift(path, entries2, idx);
2052                         frames[1].entries = entries = entries2;
2053                         frames[1].bh = bh2;
2054                         assert_inv(dx_node_check(path, frame));
2055                         ++ path->ip_frame;
2056                         ++ frame;
2057                         assert_inv(dx_node_check(path, frame));
2058                         bh_new[0] = NULL; /* buffer head is "consumed" */
2059                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2060                         if (err)
2061                                 goto journal_error;
2062                         do_corr(schedule());
2063                 } else {
2064                         /* splitting non-root index node. */
2065                         struct iam_frame *parent = frame - 1;
2066
2067                         do_corr(schedule());
2068                         count = iam_shift_entries(path, frame, count,
2069                                               entries, entries2, newblock[i]);
2070                         /* Which index block gets the new entry? */
2071                         if (idx >= count) {
2072                                 int d = dx_index_is_compat(path) ? 0 : +1;
2073
2074                                 frame->at = iam_entry_shift(path, entries2,
2075                                                             idx - count + d);
2076                                 frame->entries = entries = entries2;
2077                                 frame->curidx = newblock[i];
2078                                 swap(frame->bh, bh2);
2079                                 assert_corr(lock[i + 1] != NULL);
2080                                 assert_corr(new_lock[i] != NULL);
2081                                 swap(lock[i + 1], new_lock[i]);
2082                                 bh_new[i] = bh2;
2083                                 parent->at = iam_entry_shift(path,
2084                                                              parent->at, +1);
2085                         }
2086                         assert_inv(dx_node_check(path, frame));
2087                         assert_inv(dx_node_check(path, parent));
2088                         dxtrace(dx_show_index ("node", frame->entries));
2089                         dxtrace(dx_show_index ("node",
2090                                ((struct dx_node *) bh2->b_data)->entries));
2091                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2092                         if (err)
2093                                 goto journal_error;
2094                         do_corr(schedule());
2095                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2096                                                             parent->bh);
2097                         if (err)
2098                                 goto journal_error;
2099                 }
2100                 do_corr(schedule());
2101                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2102                 if (err)
2103                         goto journal_error;
2104         }
2105                 /*
2106                  * This function was called to make insertion of new leaf
2107                  * possible. Check that it fulfilled its obligations.
2108                  */
2109                 assert_corr(dx_get_count(path->ip_frame->entries) <
2110                             dx_get_limit(path->ip_frame->entries));
2111         assert_corr(lock[nr_splet] != NULL);
2112         *lh = lock[nr_splet];
2113         lock[nr_splet] = NULL;
2114         if (nr_splet > 0) {
2115                 /*
2116                  * Log ->i_size modification.
2117                  */
2118                 err = ldiskfs_mark_inode_dirty(handle, dir);
2119                 if (err)
2120                         goto journal_error;
2121         }
2122         goto cleanup;
2123 journal_error:
2124         ldiskfs_std_error(dir->i_sb, err);
2125
2126 cleanup:
2127         iam_unlock_array(path->ip_container, lock);
2128         iam_unlock_array(path->ip_container, new_lock);
2129
2130         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2131
2132         do_corr(schedule());
2133         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2134                 if (bh_new[i] != NULL)
2135                         brelse(bh_new[i]);
2136         }
2137         return err;
2138 }
2139
2140 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2141                        struct iam_path *path,
2142                        const struct iam_key *k, const struct iam_rec *r)
2143 {
2144         int err;
2145         struct iam_leaf *leaf;
2146
2147         leaf = &path->ip_leaf;
2148         assert_inv(iam_leaf_check(leaf));
2149         assert_inv(iam_path_check(path));
2150         err = iam_txn_add(handle, path, leaf->il_bh);
2151         if (err == 0) {
2152                 do_corr(schedule());
2153                 if (!iam_leaf_can_add(leaf, k, r)) {
2154                         struct dynlock_handle *lh = NULL;
2155
2156                         do {
2157                                 assert_corr(lh == NULL);
2158                                 do_corr(schedule());
2159                                 err = split_index_node(handle, path, &lh);
2160                                 if (err == -EAGAIN) {
2161                                         assert_corr(lh == NULL);
2162
2163                                         iam_path_fini(path);
2164                                         it->ii_state = IAM_IT_DETACHED;
2165
2166                                         do_corr(schedule());
2167                                         err = iam_it_get_exact(it, k);
2168                                         if (err == -ENOENT)
2169                                                 err = +1; /* repeat split */
2170                                         else if (err == 0)
2171                                                 err = -EEXIST;
2172                                 }
2173                         } while (err > 0);
2174                         assert_inv(iam_path_check(path));
2175                         if (err == 0) {
2176                                 assert_corr(lh != NULL);
2177                                 do_corr(schedule());
2178                                 err = iam_new_leaf(handle, leaf);
2179                                 if (err == 0)
2180                                         err = iam_txn_dirty(handle, path,
2181                                                             path->ip_frame->bh);
2182                         }
2183                         iam_unlock_htree(path->ip_container, lh);
2184                         do_corr(schedule());
2185                 }
2186                 if (err == 0) {
2187                         iam_leaf_rec_add(leaf, k, r);
2188                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2189                 }
2190         }
2191         assert_inv(iam_leaf_check(leaf));
2192         assert_inv(iam_leaf_check(&path->ip_leaf));
2193         assert_inv(iam_path_check(path));
2194         return err;
2195 }
2196
2197 /*
2198  * Insert new record with key @k and contents from @r, shifting records to the
2199  * right. On success, iterator is positioned on the newly inserted record.
2200  *
2201  * precondition: it->ii_flags&IAM_IT_WRITE &&
2202  *               (it_state(it) == IAM_IT_ATTACHED ||
2203  *                it_state(it) == IAM_IT_SKEWED) &&
2204  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2205  *                    it_keycmp(it, k) <= 0) &&
2206  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2207  * postcondition: ergo(result == 0,
2208  *                     it_state(it) == IAM_IT_ATTACHED &&
2209  *                     it_keycmp(it, k) == 0 &&
2210  *                     !memcmp(iam_it_rec_get(it), r, ...))
2211  */
2212 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2213                       const struct iam_key *k, const struct iam_rec *r)
2214 {
2215         int result;
2216         struct iam_path *path;
2217
2218         path = &it->ii_path;
2219
2220         assert_corr(it->ii_flags&IAM_IT_WRITE);
2221         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2222                     it_state(it) == IAM_IT_SKEWED);
2223         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2224                          it_keycmp(it, k) <= 0));
2225         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2226         result = iam_add_rec(h, it, path, k, r);
2227         if (result == 0)
2228                 it->ii_state = IAM_IT_ATTACHED;
2229         assert_corr(ergo(result == 0,
2230                          it_state(it) == IAM_IT_ATTACHED &&
2231                          it_keycmp(it, k) == 0));
2232         return result;
2233 }
2234
2235 static inline int iam_idle_blocks_limit(struct inode *inode)
2236 {
2237         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2238 }
2239
2240 /*
2241  * If the leaf cannnot be recycled, we will lose one block for reusing.
2242  * It is not a serious issue because it almost the same of non-recycle.
2243  */
2244 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2245                                   struct iam_leaf *l, struct buffer_head **bh)
2246 {
2247         struct iam_container *c = p->ip_container;
2248         struct inode *inode = c->ic_object;
2249         struct iam_frame *frame = p->ip_frame;
2250         struct iam_entry *entries;
2251         struct iam_entry *pos;
2252         struct dynlock_handle *lh;
2253         int count;
2254         int rc;
2255
2256         if (c->ic_idle_failed)
2257                 return 0;
2258
2259         if (unlikely(frame == NULL))
2260                 return 0;
2261
2262         if (!iam_leaf_empty(l))
2263                 return 0;
2264
2265         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2266         if (lh == NULL) {
2267                 CWARN("%.16s: No memory to recycle idle blocks\n",
2268                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2269                 return 0;
2270         }
2271
2272         rc = iam_txn_add(h, p, frame->bh);
2273         if (rc != 0) {
2274                 iam_unlock_htree(c, lh);
2275                 return 0;
2276         }
2277
2278         iam_lock_bh(frame->bh);
2279         entries = frame->entries;
2280         count = dx_get_count(entries);
2281         /* NOT shrink the last entry in the index node, which can be reused
2282          * directly by next new node. */
2283         if (count == 2) {
2284                 iam_unlock_bh(frame->bh);
2285                 iam_unlock_htree(c, lh);
2286                 return 0;
2287         }
2288
2289         pos = iam_find_position(p, frame);
2290         /* There may be some new leaf nodes have been added or empty leaf nodes
2291          * have been shrinked during my delete operation.
2292          *
2293          * If the empty leaf is not under current index node because the index
2294          * node has been split, then just skip the empty leaf, which is rare. */
2295         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2296                 iam_unlock_bh(frame->bh);
2297                 iam_unlock_htree(c, lh);
2298                 return 0;
2299         }
2300
2301         frame->at = pos;
2302         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2303                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2304
2305                 memmove(frame->at, n,
2306                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2307                 frame->at_shifted = 1;
2308         }
2309         dx_set_count(entries, count - 1);
2310         iam_unlock_bh(frame->bh);
2311         rc = iam_txn_dirty(h, p, frame->bh);
2312         iam_unlock_htree(c, lh);
2313         if (rc != 0)
2314                 return 0;
2315
2316         get_bh(l->il_bh);
2317         *bh = l->il_bh;
2318         return frame->leaf;
2319 }
2320
2321 static int
2322 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2323                         __u32 *idle_blocks, iam_ptr_t blk)
2324 {
2325         struct iam_container *c = p->ip_container;
2326         struct buffer_head *old = c->ic_idle_bh;
2327         struct iam_idle_head *head;
2328         int rc;
2329
2330         head = (struct iam_idle_head *)(bh->b_data);
2331         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2332         head->iih_count = 0;
2333         head->iih_next = *idle_blocks;
2334         /* The bh already get_write_accessed. */
2335         rc = iam_txn_dirty(h, p, bh);
2336         if (rc != 0)
2337                 return rc;
2338
2339         rc = iam_txn_add(h, p, c->ic_root_bh);
2340         if (rc != 0)
2341                 return rc;
2342
2343         iam_lock_bh(c->ic_root_bh);
2344         *idle_blocks = cpu_to_le32(blk);
2345         iam_unlock_bh(c->ic_root_bh);
2346         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2347         if (rc == 0) {
2348                 /* NOT release old before new assigned. */
2349                 get_bh(bh);
2350                 c->ic_idle_bh = bh;
2351                 brelse(old);
2352         } else {
2353                 iam_lock_bh(c->ic_root_bh);
2354                 *idle_blocks = head->iih_next;
2355                 iam_unlock_bh(c->ic_root_bh);
2356         }
2357         return rc;
2358 }
2359
2360 /*
2361  * If the leaf cannnot be recycled, we will lose one block for reusing.
2362  * It is not a serious issue because it almost the same of non-recycle.
2363  */
2364 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2365                              struct buffer_head *bh, iam_ptr_t blk)
2366 {
2367         struct iam_container *c = p->ip_container;
2368         struct inode *inode = c->ic_object;
2369         struct iam_idle_head *head;
2370         __u32 *idle_blocks;
2371         int count;
2372         int rc;
2373
2374         mutex_lock(&c->ic_idle_mutex);
2375         if (unlikely(c->ic_idle_failed)) {
2376                 rc = -EFAULT;
2377                 goto unlock;
2378         }
2379
2380         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2381                                 c->ic_descr->id_root_gap +
2382                                 sizeof(struct dx_countlimit));
2383         /* It is the first idle block. */
2384         if (c->ic_idle_bh == NULL) {
2385                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2386                 goto unlock;
2387         }
2388
2389         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2390         count = le16_to_cpu(head->iih_count);
2391         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2392         if (count == iam_idle_blocks_limit(inode)) {
2393                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2394                 goto unlock;
2395         }
2396
2397         /* Just add to ic_idle_bh. */
2398         rc = iam_txn_add(h, p, c->ic_idle_bh);
2399         if (rc != 0)
2400                 goto unlock;
2401
2402         head->iih_blks[count] = cpu_to_le32(blk);
2403         head->iih_count = cpu_to_le16(count + 1);
2404         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2405
2406 unlock:
2407         mutex_unlock(&c->ic_idle_mutex);
2408         if (rc != 0)
2409                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2410                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2411 }
2412
2413 /*
2414  * Delete record under iterator.
2415  *
2416  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2417  *                it->ii_flags&IAM_IT_WRITE &&
2418  *                it_at_rec(it)
2419  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2420  *                it_state(it) == IAM_IT_DETACHED
2421  */
2422 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2423 {
2424         int result;
2425         struct iam_leaf *leaf;
2426         struct iam_path *path;
2427
2428         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2429                     it->ii_flags&IAM_IT_WRITE);
2430         assert_corr(it_at_rec(it));
2431
2432         path = &it->ii_path;
2433         leaf = &path->ip_leaf;
2434
2435         assert_inv(iam_leaf_check(leaf));
2436         assert_inv(iam_path_check(path));
2437
2438         result = iam_txn_add(h, path, leaf->il_bh);
2439         /*
2440          * no compaction for now.
2441          */
2442         if (result == 0) {
2443                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2444                 result = iam_txn_dirty(h, path, leaf->il_bh);
2445                 if (result == 0 && iam_leaf_at_end(leaf)) {
2446                         struct buffer_head *bh = NULL;
2447                         iam_ptr_t blk;
2448
2449                         blk = iam_index_shrink(h, path, leaf, &bh);
2450                         if (it->ii_flags & IAM_IT_MOVE) {
2451                                 result = iam_it_next(it);
2452                                 if (result > 0)
2453                                         result = 0;
2454                         }
2455
2456                         if (bh != NULL) {
2457                                 iam_recycle_leaf(h, path, bh, blk);
2458                                 brelse(bh);
2459                         }
2460                 }
2461         }
2462         assert_inv(iam_leaf_check(leaf));
2463         assert_inv(iam_path_check(path));
2464         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2465                     it_state(it) == IAM_IT_DETACHED);
2466         return result;
2467 }
2468
2469 /*
2470  * Convert iterator to cookie.
2471  *
2472  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2473  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2474  * postcondition: it_state(it) == IAM_IT_ATTACHED
2475  */
2476 iam_pos_t iam_it_store(const struct iam_iterator *it)
2477 {
2478         iam_pos_t result;
2479
2480         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2481         assert_corr(it_at_rec(it));
2482         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2483                     sizeof result);
2484
2485         result = 0;
2486         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2487 }
2488
2489 /*
2490  * Restore iterator from cookie.
2491  *
2492  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2493  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2494  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2495  *                                  iam_it_store(it) == pos)
2496  */
2497 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2498 {
2499         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2500                     it->ii_flags&IAM_IT_MOVE);
2501         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2502         return iam_it_iget(it, (struct iam_ikey *)&pos);
2503 }
2504
2505 /***********************************************************************/
2506 /* invariants                                                          */
2507 /***********************************************************************/
2508
2509 static inline int ptr_inside(void *base, size_t size, void *ptr)
2510 {
2511         return (base <= ptr) && (ptr < base + size);
2512 }
2513
2514 static int iam_frame_invariant(struct iam_frame *f)
2515 {
2516         return
2517                 (f->bh != NULL &&
2518                 f->bh->b_data != NULL &&
2519                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2520                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2521                 f->entries <= f->at);
2522 }
2523
2524 static int iam_leaf_invariant(struct iam_leaf *l)
2525 {
2526         return
2527                 l->il_bh != NULL &&
2528                 l->il_bh->b_data != NULL &&
2529                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2530                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2531                 l->il_entries <= l->il_at;
2532 }
2533
2534 static int iam_path_invariant(struct iam_path *p)
2535 {
2536         int i;
2537
2538         if (p->ip_container == NULL ||
2539             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2540             p->ip_frame != p->ip_frames + p->ip_indirect ||
2541             !iam_leaf_invariant(&p->ip_leaf))
2542                 return 0;
2543         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2544                 if (i <= p->ip_indirect) {
2545                         if (!iam_frame_invariant(&p->ip_frames[i]))
2546                                 return 0;
2547                 }
2548         }
2549         return 1;
2550 }
2551
2552 int iam_it_invariant(struct iam_iterator *it)
2553 {
2554         return
2555                 (it->ii_state == IAM_IT_DETACHED ||
2556                  it->ii_state == IAM_IT_ATTACHED ||
2557                  it->ii_state == IAM_IT_SKEWED) &&
2558                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2559                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2560                      it->ii_state == IAM_IT_SKEWED,
2561                      iam_path_invariant(&it->ii_path) &&
2562                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2563 }
2564
2565 /*
2566  * Search container @c for record with key @k. If record is found, its data
2567  * are moved into @r.
2568  *
2569  * Return values: 0: found, -ENOENT: not-found, -ve: error
2570  */
2571 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2572                struct iam_rec *r, struct iam_path_descr *pd)
2573 {
2574         struct iam_iterator it;
2575         int result;
2576
2577         iam_it_init(&it, c, 0, pd);
2578
2579         result = iam_it_get_exact(&it, k);
2580         if (result == 0)
2581                 /*
2582                  * record with required key found, copy it into user buffer
2583                  */
2584                 iam_reccpy(&it.ii_path.ip_leaf, r);
2585         iam_it_put(&it);
2586         iam_it_fini(&it);
2587         return result;
2588 }
2589
2590 /*
2591  * Insert new record @r with key @k into container @c (within context of
2592  * transaction @h).
2593  *
2594  * Return values: 0: success, -ve: error, including -EEXIST when record with
2595  * given key is already present.
2596  *
2597  * postcondition: ergo(result == 0 || result == -EEXIST,
2598  *                                  iam_lookup(c, k, r2) > 0;
2599  */
2600 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2601                const struct iam_rec *r, struct iam_path_descr *pd)
2602 {
2603         struct iam_iterator it;
2604         int result;
2605
2606         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2607
2608         result = iam_it_get_exact(&it, k);
2609         if (result == -ENOENT)
2610                 result = iam_it_rec_insert(h, &it, k, r);
2611         else if (result == 0)
2612                 result = -EEXIST;
2613         iam_it_put(&it);
2614         iam_it_fini(&it);
2615         return result;
2616 }
2617
2618 /*
2619  * Update record with the key @k in container @c (within context of
2620  * transaction @h), new record is given by @r.
2621  *
2622  * Return values: +1: skip because of the same rec value, 0: success,
2623  * -ve: error, including -ENOENT if no record with the given key found.
2624  */
2625 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2626                const struct iam_rec *r, struct iam_path_descr *pd)
2627 {
2628         struct iam_iterator it;
2629         struct iam_leaf *folio;
2630         int result;
2631
2632         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2633
2634         result = iam_it_get_exact(&it, k);
2635         if (result == 0) {
2636                 folio = &it.ii_path.ip_leaf;
2637                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2638                 if (result == 0)
2639                         iam_it_rec_set(h, &it, r);
2640                 else
2641                         result = 1;
2642         }
2643         iam_it_put(&it);
2644         iam_it_fini(&it);
2645         return result;
2646 }
2647
2648 /*
2649  * Delete existing record with key @k.
2650  *
2651  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2652  *
2653  * postcondition: ergo(result == 0 || result == -ENOENT,
2654  *                                 !iam_lookup(c, k, *));
2655  */
2656 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2657                struct iam_path_descr *pd)
2658 {
2659         struct iam_iterator it;
2660         int result;
2661
2662         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2663
2664         result = iam_it_get_exact(&it, k);
2665         if (result == 0)
2666                 iam_it_rec_delete(h, &it);
2667         iam_it_put(&it);
2668         iam_it_fini(&it);
2669         return result;
2670 }
2671
2672 int iam_root_limit(int rootgap, int blocksize, int size)
2673 {
2674         int limit;
2675         int nlimit;
2676
2677         limit = (blocksize - rootgap) / size;
2678         nlimit = blocksize / size;
2679         if (limit == nlimit)
2680                 limit--;
2681         return limit;
2682 }