Whamcloud - gitweb
LU-7268 scrub: NOT assign LMA for EA inode
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150
151 #include <ldiskfs/ldiskfs.h>
152 #include <ldiskfs/xattr.h>
153 #undef ENTRY
154
155 #include "osd_internal.h"
156
157 #include <ldiskfs/acl.h>
158
159 /*
160  * List of all registered formats.
161  *
162  * No locking. Callers synchronize.
163  */
164 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
165
166 void iam_format_register(struct iam_format *fmt)
167 {
168         list_add(&fmt->if_linkage, &iam_formats);
169 }
170
171 static struct buffer_head *
172 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
173 {
174         struct inode *inode = c->ic_object;
175         struct iam_idle_head *head;
176         struct buffer_head *bh;
177         int err = 0;
178
179         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
180
181         if (blk == 0)
182                 return NULL;
183
184         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
185         if (bh == NULL) {
186                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
187                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
188                 c->ic_idle_failed = 1;
189                 err = err ? err : -EIO;
190                 return ERR_PTR(err);
191         }
192
193         head = (struct iam_idle_head *)(bh->b_data);
194         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
195                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
196                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
197                        le16_to_cpu(head->iih_magic));
198                 brelse(bh);
199                 c->ic_idle_failed = 1;
200                 return ERR_PTR(-EBADF);
201         }
202
203         return bh;
204 }
205
206 /*
207  * Determine format of given container. This is done by scanning list of
208  * registered formats and calling ->if_guess() method of each in turn.
209  */
210 static int iam_format_guess(struct iam_container *c)
211 {
212         int result;
213         struct iam_format *fmt;
214
215         /*
216          * XXX temporary initialization hook.
217          */
218         {
219                 static int initialized = 0;
220
221                 if (!initialized) {
222                         iam_lvar_format_init();
223                         iam_lfix_format_init();
224                         initialized = 1;
225                 }
226         }
227
228         result = -ENOENT;
229         list_for_each_entry(fmt, &iam_formats, if_linkage) {
230                 result = fmt->if_guess(c);
231                 if (result == 0)
232                         break;
233         }
234
235         if (result == 0) {
236                 struct buffer_head *bh;
237                 __u32 *idle_blocks;
238
239                 LASSERT(c->ic_root_bh != NULL);
240
241                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
242                                         c->ic_descr->id_root_gap +
243                                         sizeof(struct dx_countlimit));
244                 mutex_lock(&c->ic_idle_mutex);
245                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
246                 if (bh != NULL && IS_ERR(bh))
247                         result = PTR_ERR(bh);
248                 else
249                         c->ic_idle_bh = bh;
250                 mutex_unlock(&c->ic_idle_mutex);
251         }
252
253         return result;
254 }
255
256 /*
257  * Initialize container @c.
258  */
259 int iam_container_init(struct iam_container *c,
260                        struct iam_descr *descr, struct inode *inode)
261 {
262         memset(c, 0, sizeof *c);
263         c->ic_descr  = descr;
264         c->ic_object = inode;
265         init_rwsem(&c->ic_sem);
266         dynlock_init(&c->ic_tree_lock);
267         mutex_init(&c->ic_idle_mutex);
268         return 0;
269 }
270
271 /*
272  * Determine container format.
273  */
274 int iam_container_setup(struct iam_container *c)
275 {
276         return iam_format_guess(c);
277 }
278
279 /*
280  * Finalize container @c, release all resources.
281  */
282 void iam_container_fini(struct iam_container *c)
283 {
284         brelse(c->ic_idle_bh);
285         c->ic_idle_bh = NULL;
286         brelse(c->ic_root_bh);
287         c->ic_root_bh = NULL;
288 }
289
290 void iam_path_init(struct iam_path *path, struct iam_container *c,
291                    struct iam_path_descr *pd)
292 {
293         memset(path, 0, sizeof *path);
294         path->ip_container = c;
295         path->ip_frame = path->ip_frames;
296         path->ip_data = pd;
297         path->ip_leaf.il_path = path;
298 }
299
300 static void iam_leaf_fini(struct iam_leaf *leaf);
301
302 void iam_path_release(struct iam_path *path)
303 {
304         int i;
305
306         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
307                 if (path->ip_frames[i].bh != NULL) {
308                         path->ip_frames[i].at_shifted = 0;
309                         brelse(path->ip_frames[i].bh);
310                         path->ip_frames[i].bh = NULL;
311                 }
312         }
313 }
314
315 void iam_path_fini(struct iam_path *path)
316 {
317         iam_leaf_fini(&path->ip_leaf);
318         iam_path_release(path);
319 }
320
321
322 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
323 {
324         int i;
325
326         path->ipc_hinfo = &path->ipc_hinfo_area;
327         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
328                 path->ipc_descr.ipd_key_scratch[i] =
329                         (struct iam_ikey *)&path->ipc_scratch[i];
330
331         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
332 }
333
334 void iam_path_compat_fini(struct iam_path_compat *path)
335 {
336         iam_path_fini(&path->ipc_path);
337 }
338
339 /*
340  * Helper function initializing iam_path_descr and its key scratch area.
341  */
342 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
343 {
344         struct iam_path_descr *ipd;
345         void *karea;
346         int i;
347
348         ipd = area;
349         karea = ipd + 1;
350         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
351                 ipd->ipd_key_scratch[i] = karea;
352         return ipd;
353 }
354
355 void iam_ipd_free(struct iam_path_descr *ipd)
356 {
357 }
358
359 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
360                   handle_t *h, struct buffer_head **bh)
361 {
362         int result = 0;
363
364         /* NB: it can be called by iam_lfix_guess() which is still at
365          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
366          * haven't been intialized yet.
367          * Also, we don't have this for IAM dir.
368          */
369         if (c->ic_root_bh != NULL &&
370             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
371                 get_bh(c->ic_root_bh);
372                 *bh = c->ic_root_bh;
373                 return 0;
374         }
375
376         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
377         if (*bh == NULL)
378                 result = result ? result : -EIO;
379         return result;
380 }
381
382 /*
383  * Return pointer to current leaf record. Pointer is valid while corresponding
384  * leaf node is locked and pinned.
385  */
386 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
387 {
388         return iam_leaf_ops(leaf)->rec(leaf);
389 }
390
391 /*
392  * Return pointer to the current leaf key. This function returns pointer to
393  * the key stored in node.
394  *
395  * Caller should assume that returned pointer is only valid while leaf node is
396  * pinned and locked.
397  */
398 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
399 {
400         return iam_leaf_ops(leaf)->key(leaf);
401 }
402
403 static int iam_leaf_key_size(const struct iam_leaf *leaf)
404 {
405         return iam_leaf_ops(leaf)->key_size(leaf);
406 }
407
408 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
409                                       struct iam_ikey *key)
410 {
411         return iam_leaf_ops(leaf)->ikey(leaf, key);
412 }
413
414 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
415                            const struct iam_key *key)
416 {
417         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
418 }
419
420 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
421                           const struct iam_key *key)
422 {
423         return iam_leaf_ops(leaf)->key_eq(leaf, key);
424 }
425
426 #if LDISKFS_INVARIANT_ON
427 static int iam_leaf_check(struct iam_leaf *leaf);
428 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
429
430 static int iam_path_check(struct iam_path *p)
431 {
432         int i;
433         int result;
434         struct iam_frame *f;
435         struct iam_descr *param;
436
437         result = 1;
438         param = iam_path_descr(p);
439         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
440                 f = &p->ip_frames[i];
441                 if (f->bh != NULL) {
442                         result = dx_node_check(p, f);
443                         if (result)
444                                 result = !param->id_ops->id_node_check(p, f);
445                 }
446         }
447         if (result && p->ip_leaf.il_bh != NULL)
448                 result = iam_leaf_check(&p->ip_leaf);
449         if (result == 0) {
450                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
451         }
452         return result;
453 }
454 #endif
455
456 static int iam_leaf_load(struct iam_path *path)
457 {
458         iam_ptr_t block;
459         int err;
460         struct iam_container *c;
461         struct buffer_head   *bh;
462         struct iam_leaf      *leaf;
463         struct iam_descr     *descr;
464
465         c     = path->ip_container;
466         leaf  = &path->ip_leaf;
467         descr = iam_path_descr(path);
468         block = path->ip_frame->leaf;
469         if (block == 0) {
470                 /* XXX bug 11027 */
471                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
472                        (long unsigned)path->ip_frame->leaf,
473                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
474                        path->ip_frames[0].bh, path->ip_frames[1].bh,
475                        path->ip_frames[2].bh);
476         }
477         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
478         if (err == 0) {
479                 leaf->il_bh = bh;
480                 leaf->il_curidx = block;
481                 err = iam_leaf_ops(leaf)->init(leaf);
482                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
483         }
484         return err;
485 }
486
487 static void iam_unlock_htree(struct iam_container *ic,
488                              struct dynlock_handle *lh)
489 {
490         if (lh != NULL)
491                 dynlock_unlock(&ic->ic_tree_lock, lh);
492 }
493
494
495 static void iam_leaf_unlock(struct iam_leaf *leaf)
496 {
497         if (leaf->il_lock != NULL) {
498                 iam_unlock_htree(iam_leaf_container(leaf),
499                                  leaf->il_lock);
500                 do_corr(schedule());
501                 leaf->il_lock = NULL;
502         }
503 }
504
505 static void iam_leaf_fini(struct iam_leaf *leaf)
506 {
507         if (leaf->il_path != NULL) {
508                 iam_leaf_unlock(leaf);
509                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
510                 iam_leaf_ops(leaf)->fini(leaf);
511                 if (leaf->il_bh) {
512                         brelse(leaf->il_bh);
513                         leaf->il_bh = NULL;
514                         leaf->il_curidx = 0;
515                 }
516         }
517 }
518
519 static void iam_leaf_start(struct iam_leaf *folio)
520 {
521         iam_leaf_ops(folio)->start(folio);
522 }
523
524 void iam_leaf_next(struct iam_leaf *folio)
525 {
526         iam_leaf_ops(folio)->next(folio);
527 }
528
529 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
530                              const struct iam_rec *rec)
531 {
532         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
533 }
534
535 static void iam_rec_del(struct iam_leaf *leaf, int shift)
536 {
537         iam_leaf_ops(leaf)->rec_del(leaf, shift);
538 }
539
540 int iam_leaf_at_end(const struct iam_leaf *leaf)
541 {
542         return iam_leaf_ops(leaf)->at_end(leaf);
543 }
544
545 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
546                            iam_ptr_t nr)
547 {
548         iam_leaf_ops(l)->split(l, bh, nr);
549 }
550
551 static inline int iam_leaf_empty(struct iam_leaf *l)
552 {
553         return iam_leaf_ops(l)->leaf_empty(l);
554 }
555
556 int iam_leaf_can_add(const struct iam_leaf *l,
557                      const struct iam_key *k, const struct iam_rec *r)
558 {
559         return iam_leaf_ops(l)->can_add(l, k, r);
560 }
561
562 #if LDISKFS_INVARIANT_ON
563 static int iam_leaf_check(struct iam_leaf *leaf)
564 {
565         return 1;
566 #if 0
567         struct iam_lentry    *orig;
568         struct iam_path      *path;
569         struct iam_container *bag;
570         struct iam_ikey       *k0;
571         struct iam_ikey       *k1;
572         int result;
573         int first;
574
575         orig = leaf->il_at;
576         path = iam_leaf_path(leaf);
577         bag  = iam_leaf_container(leaf);
578
579         result = iam_leaf_ops(leaf)->init(leaf);
580         if (result != 0)
581                 return result;
582
583         first = 1;
584         iam_leaf_start(leaf);
585         k0 = iam_path_ikey(path, 0);
586         k1 = iam_path_ikey(path, 1);
587         while (!iam_leaf_at_end(leaf)) {
588                 iam_ikeycpy(bag, k0, k1);
589                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
590                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
591                         return 0;
592                 }
593                 first = 0;
594                 iam_leaf_next(leaf);
595         }
596         leaf->il_at = orig;
597         return 1;
598 #endif
599 }
600 #endif
601
602 static int iam_txn_dirty(handle_t *handle,
603                          struct iam_path *path, struct buffer_head *bh)
604 {
605         int result;
606
607         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
608         if (result != 0)
609                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
610         return result;
611 }
612
613 static int iam_txn_add(handle_t *handle,
614                        struct iam_path *path, struct buffer_head *bh)
615 {
616         int result;
617
618         result = ldiskfs_journal_get_write_access(handle, bh);
619         if (result != 0)
620                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
621         return result;
622 }
623
624 /***********************************************************************/
625 /* iterator interface                                                  */
626 /***********************************************************************/
627
628 static enum iam_it_state it_state(const struct iam_iterator *it)
629 {
630         return it->ii_state;
631 }
632
633 /*
634  * Helper function returning scratch key.
635  */
636 static struct iam_container *iam_it_container(const struct iam_iterator *it)
637 {
638         return it->ii_path.ip_container;
639 }
640
641 static inline int it_keycmp(const struct iam_iterator *it,
642                             const struct iam_key *k)
643 {
644         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
645 }
646
647 static inline int it_keyeq(const struct iam_iterator *it,
648                            const struct iam_key *k)
649 {
650         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
651 }
652
653 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
654 {
655         return iam_ikeycmp(it->ii_path.ip_container,
656                            iam_leaf_ikey(&it->ii_path.ip_leaf,
657                                          iam_path_ikey(&it->ii_path, 0)), ik);
658 }
659
660 static inline int it_at_rec(const struct iam_iterator *it)
661 {
662         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
663 }
664
665 static inline int it_before(const struct iam_iterator *it)
666 {
667         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
668 }
669
670 /*
671  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
672  * with exactly the same key as asked is found.
673  */
674 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
675 {
676         int result;
677
678         result = iam_it_get(it, k);
679         if (result > 0)
680                 result = 0;
681         else if (result == 0)
682                 /*
683                  * Return -ENOENT if cursor is located above record with a key
684                  * different from one specified, or in the empty leaf.
685                  *
686                  * XXX returning -ENOENT only works if iam_it_get() never
687                  * returns -ENOENT as a legitimate error.
688                  */
689                 result = -ENOENT;
690         return result;
691 }
692
693 void iam_container_write_lock(struct iam_container *ic)
694 {
695         down_write(&ic->ic_sem);
696 }
697
698 void iam_container_write_unlock(struct iam_container *ic)
699 {
700         up_write(&ic->ic_sem);
701 }
702
703 void iam_container_read_lock(struct iam_container *ic)
704 {
705         down_read(&ic->ic_sem);
706 }
707
708 void iam_container_read_unlock(struct iam_container *ic)
709 {
710         up_read(&ic->ic_sem);
711 }
712
713 /*
714  * Initialize iterator to IAM_IT_DETACHED state.
715  *
716  * postcondition: it_state(it) == IAM_IT_DETACHED
717  */
718 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
719                  struct iam_path_descr *pd)
720 {
721         memset(it, 0, sizeof *it);
722         it->ii_flags  = flags;
723         it->ii_state  = IAM_IT_DETACHED;
724         iam_path_init(&it->ii_path, c, pd);
725         return 0;
726 }
727
728 /*
729  * Finalize iterator and release all resources.
730  *
731  * precondition: it_state(it) == IAM_IT_DETACHED
732  */
733 void iam_it_fini(struct iam_iterator *it)
734 {
735         assert_corr(it_state(it) == IAM_IT_DETACHED);
736         iam_path_fini(&it->ii_path);
737 }
738
739 /*
740  * this locking primitives are used to protect parts
741  * of dir's htree. protection unit is block: leaf or index
742  */
743 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
744                                              unsigned long value,
745                                              enum dynlock_type lt)
746 {
747         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
748 }
749
750 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
751 {
752         struct iam_frame *f;
753
754         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
755                 do_corr(schedule());
756                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
757                 if (*lh == NULL)
758                         return -ENOMEM;
759         }
760         return 0;
761 }
762
763 /*
764  * Fast check for frame consistency.
765  */
766 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
767 {
768         struct iam_container *bag;
769         struct iam_entry *next;
770         struct iam_entry *last;
771         struct iam_entry *entries;
772         struct iam_entry *at;
773
774         bag     = path->ip_container;
775         at      = frame->at;
776         entries = frame->entries;
777         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
778
779         if (unlikely(at > last))
780                 return -EAGAIN;
781
782         if (unlikely(dx_get_block(path, at) != frame->leaf))
783                 return -EAGAIN;
784
785         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
786                                  path->ip_ikey_target) > 0))
787                 return -EAGAIN;
788
789         next = iam_entry_shift(path, at, +1);
790         if (next <= last) {
791                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
792                                          path->ip_ikey_target) <= 0))
793                         return -EAGAIN;
794         }
795         return 0;
796 }
797
798 int dx_index_is_compat(struct iam_path *path)
799 {
800         return iam_path_descr(path) == NULL;
801 }
802
803 /*
804  * dx_find_position
805  *
806  * search position of specified hash in index
807  *
808  */
809
810 static struct iam_entry *iam_find_position(struct iam_path *path,
811                                            struct iam_frame *frame)
812 {
813         int count;
814         struct iam_entry *p;
815         struct iam_entry *q;
816         struct iam_entry *m;
817
818         count = dx_get_count(frame->entries);
819         assert_corr(count && count <= dx_get_limit(frame->entries));
820         p = iam_entry_shift(path, frame->entries,
821                             dx_index_is_compat(path) ? 1 : 2);
822         q = iam_entry_shift(path, frame->entries, count - 1);
823         while (p <= q) {
824                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
825                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
826                                 path->ip_ikey_target) > 0)
827                         q = iam_entry_shift(path, m, -1);
828                 else
829                         p = iam_entry_shift(path, m, +1);
830         }
831         return iam_entry_shift(path, p, -1);
832 }
833
834
835
836 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
837 {
838         return dx_get_block(path, iam_find_position(path, frame));
839 }
840
841 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
842                     const struct iam_ikey *key, iam_ptr_t ptr)
843 {
844         struct iam_entry *entries = frame->entries;
845         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
846         int count = dx_get_count(entries);
847
848         /*
849          * Unfortunately we cannot assert this, as this function is sometimes
850          * called by VFS under i_sem and without pdirops lock.
851          */
852         assert_corr(1 || iam_frame_is_locked(path, frame));
853         assert_corr(count < dx_get_limit(entries));
854         assert_corr(frame->at < iam_entry_shift(path, entries, count));
855         assert_inv(dx_node_check(path, frame));
856
857         memmove(iam_entry_shift(path, new, 1), new,
858                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
859         dx_set_ikey(path, new, key);
860         dx_set_block(path, new, ptr);
861         dx_set_count(entries, count + 1);
862         assert_inv(dx_node_check(path, frame));
863 }
864
865 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
866                          const struct iam_ikey *key, iam_ptr_t ptr)
867 {
868         iam_lock_bh(frame->bh);
869         iam_insert_key(path, frame, key, ptr);
870         iam_unlock_bh(frame->bh);
871 }
872 /*
873  * returns 0 if path was unchanged, -EAGAIN otherwise.
874  */
875 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
876 {
877         int equal;
878
879         iam_lock_bh(frame->bh);
880         equal = iam_check_fast(path, frame) == 0 ||
881                 frame->leaf == iam_find_ptr(path, frame);
882         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
883         iam_unlock_bh(frame->bh);
884
885         return equal ? 0 : -EAGAIN;
886 }
887
888 static int iam_lookup_try(struct iam_path *path)
889 {
890         u32 ptr;
891         int err = 0;
892         int i;
893
894         struct iam_descr *param;
895         struct iam_frame *frame;
896         struct iam_container *c;
897
898         param = iam_path_descr(path);
899         c = path->ip_container;
900
901         ptr = param->id_ops->id_root_ptr(c);
902         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
903              ++frame, ++i) {
904                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
905                                                   &frame->bh);
906                 do_corr(schedule());
907
908                 iam_lock_bh(frame->bh);
909                 /*
910                  * node must be initialized under bh lock because concurrent
911                  * creation procedure may change it and iam_lookup_try() will
912                  * see obsolete tree height. -bzzz
913                  */
914                 if (err != 0)
915                         break;
916
917                 if (LDISKFS_INVARIANT_ON) {
918                         err = param->id_ops->id_node_check(path, frame);
919                         if (err != 0)
920                                 break;
921                 }
922
923                 err = param->id_ops->id_node_load(path, frame);
924                 if (err != 0)
925                         break;
926
927                 assert_inv(dx_node_check(path, frame));
928                 /*
929                  * splitting may change root index block and move hash we're
930                  * looking for into another index block so, we have to check
931                  * this situation and repeat from begining if path got changed
932                  * -bzzz
933                  */
934                 if (i > 0) {
935                         err = iam_check_path(path, frame - 1);
936                         if (err != 0)
937                                 break;
938                 }
939
940                 frame->at = iam_find_position(path, frame);
941                 frame->curidx = ptr;
942                 frame->leaf = ptr = dx_get_block(path, frame->at);
943
944                 iam_unlock_bh(frame->bh);
945                 do_corr(schedule());
946         }
947         if (err != 0)
948                 iam_unlock_bh(frame->bh);
949         path->ip_frame = --frame;
950         return err;
951 }
952
953 static int __iam_path_lookup(struct iam_path *path)
954 {
955         int err;
956         int i;
957
958         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
959                 assert(path->ip_frames[i].bh == NULL);
960
961         do {
962                 err = iam_lookup_try(path);
963                 do_corr(schedule());
964                 if (err != 0)
965                         iam_path_fini(path);
966         } while (err == -EAGAIN);
967
968         return err;
969 }
970
971 /*
972  * returns 0 if path was unchanged, -EAGAIN otherwise.
973  */
974 static int iam_check_full_path(struct iam_path *path, int search)
975 {
976         struct iam_frame *bottom;
977         struct iam_frame *scan;
978         int i;
979         int result;
980
981         do_corr(schedule());
982
983         for (bottom = path->ip_frames, i = 0;
984              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
985                 ; /* find last filled in frame */
986         }
987
988         /*
989          * Lock frames, bottom to top.
990          */
991         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
992                 iam_lock_bh(scan->bh);
993         /*
994          * Check them top to bottom.
995          */
996         result = 0;
997         for (scan = path->ip_frames; scan < bottom; ++scan) {
998                 struct iam_entry *pos;
999
1000                 if (search) {
1001                         if (iam_check_fast(path, scan) == 0)
1002                                 continue;
1003
1004                         pos = iam_find_position(path, scan);
1005                         if (scan->leaf != dx_get_block(path, pos)) {
1006                                 result = -EAGAIN;
1007                                 break;
1008                         }
1009                         scan->at = pos;
1010                 } else {
1011                         pos = iam_entry_shift(path, scan->entries,
1012                                               dx_get_count(scan->entries) - 1);
1013                         if (scan->at > pos ||
1014                             scan->leaf != dx_get_block(path, scan->at)) {
1015                                 result = -EAGAIN;
1016                                 break;
1017                         }
1018                 }
1019         }
1020
1021         /*
1022          * Unlock top to bottom.
1023          */
1024         for (scan = path->ip_frames; scan < bottom; ++scan)
1025                 iam_unlock_bh(scan->bh);
1026         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1027         do_corr(schedule());
1028
1029         return result;
1030 }
1031
1032
1033 /*
1034  * Performs path lookup and returns with found leaf (if any) locked by htree
1035  * lock.
1036  */
1037 static int iam_lookup_lock(struct iam_path *path,
1038                            struct dynlock_handle **dl, enum dynlock_type lt)
1039 {
1040         int result;
1041
1042         while ((result = __iam_path_lookup(path)) == 0) {
1043                 do_corr(schedule());
1044                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1045                                      lt);
1046                 if (*dl == NULL) {
1047                         iam_path_fini(path);
1048                         result = -ENOMEM;
1049                         break;
1050                 }
1051                 do_corr(schedule());
1052                 /*
1053                  * while locking leaf we just found may get split so we need
1054                  * to check this -bzzz
1055                  */
1056                 if (iam_check_full_path(path, 1) == 0)
1057                         break;
1058                 iam_unlock_htree(path->ip_container, *dl);
1059                 *dl = NULL;
1060                 iam_path_fini(path);
1061         }
1062         return result;
1063 }
1064 /*
1065  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1066  * node.
1067  */
1068 static int iam_path_lookup(struct iam_path *path, int index)
1069 {
1070         struct iam_container *c;
1071         struct iam_leaf  *leaf;
1072         int result;
1073
1074         c = path->ip_container;
1075         leaf = &path->ip_leaf;
1076         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1077         assert_inv(iam_path_check(path));
1078         do_corr(schedule());
1079         if (result == 0) {
1080                 result = iam_leaf_load(path);
1081                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1082                 if (result == 0) {
1083                         do_corr(schedule());
1084                         if (index)
1085                                 result = iam_leaf_ops(leaf)->
1086                                         ilookup(leaf, path->ip_ikey_target);
1087                         else
1088                                 result = iam_leaf_ops(leaf)->
1089                                         lookup(leaf, path->ip_key_target);
1090                         do_corr(schedule());
1091                 }
1092                 if (result < 0)
1093                         iam_leaf_unlock(leaf);
1094         }
1095         return result;
1096 }
1097
1098 /*
1099  * Common part of iam_it_{i,}get().
1100  */
1101 static int __iam_it_get(struct iam_iterator *it, int index)
1102 {
1103         int result;
1104         assert_corr(it_state(it) == IAM_IT_DETACHED);
1105
1106         result = iam_path_lookup(&it->ii_path, index);
1107         if (result >= 0) {
1108                 int collision;
1109
1110                 collision = result & IAM_LOOKUP_LAST;
1111                 switch (result & ~IAM_LOOKUP_LAST) {
1112                 case IAM_LOOKUP_EXACT:
1113                         result = +1;
1114                         it->ii_state = IAM_IT_ATTACHED;
1115                         break;
1116                 case IAM_LOOKUP_OK:
1117                         result = 0;
1118                         it->ii_state = IAM_IT_ATTACHED;
1119                         break;
1120                 case IAM_LOOKUP_BEFORE:
1121                 case IAM_LOOKUP_EMPTY:
1122                         result = 0;
1123                         it->ii_state = IAM_IT_SKEWED;
1124                         break;
1125                 default:
1126                         assert(0);
1127                 }
1128                 result |= collision;
1129         }
1130         /*
1131          * See iam_it_get_exact() for explanation.
1132          */
1133         assert_corr(result != -ENOENT);
1134         return result;
1135 }
1136
1137 /*
1138  * Correct hash, but not the same key was found, iterate through hash
1139  * collision chain, looking for correct record.
1140  */
1141 static int iam_it_collision(struct iam_iterator *it)
1142 {
1143         int result;
1144
1145         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1146
1147         while ((result = iam_it_next(it)) == 0) {
1148                 do_corr(schedule());
1149                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1150                         return -ENOENT;
1151                 if (it_keyeq(it, it->ii_path.ip_key_target))
1152                         return 0;
1153         }
1154         return result;
1155 }
1156
1157 /*
1158  * Attach iterator. After successful completion, @it points to record with
1159  * least key not larger than @k.
1160  *
1161  * Return value: 0: positioned on existing record,
1162  *             +ve: exact position found,
1163  *             -ve: error.
1164  *
1165  * precondition:  it_state(it) == IAM_IT_DETACHED
1166  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1167  *                     it_keycmp(it, k) <= 0)
1168  */
1169 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1170 {
1171         int result;
1172         assert_corr(it_state(it) == IAM_IT_DETACHED);
1173
1174         it->ii_path.ip_ikey_target = NULL;
1175         it->ii_path.ip_key_target  = k;
1176
1177         result = __iam_it_get(it, 0);
1178
1179         if (result == IAM_LOOKUP_LAST) {
1180                 result = iam_it_collision(it);
1181                 if (result != 0) {
1182                         iam_it_put(it);
1183                         iam_it_fini(it);
1184                         result = __iam_it_get(it, 0);
1185                 } else
1186                         result = +1;
1187         }
1188         if (result > 0)
1189                 result &= ~IAM_LOOKUP_LAST;
1190
1191         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1192         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1193                          it_keycmp(it, k) <= 0));
1194         return result;
1195 }
1196
1197 /*
1198  * Attach iterator by index key.
1199  */
1200 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1201 {
1202         assert_corr(it_state(it) == IAM_IT_DETACHED);
1203
1204         it->ii_path.ip_ikey_target = k;
1205         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1206 }
1207
1208 /*
1209  * Attach iterator, and assure it points to the record (not skewed).
1210  *
1211  * Return value: 0: positioned on existing record,
1212  *             +ve: exact position found,
1213  *             -ve: error.
1214  *
1215  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1216  *                !(it->ii_flags&IAM_IT_WRITE)
1217  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1218  */
1219 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1220 {
1221         int result;
1222         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1223                     !(it->ii_flags&IAM_IT_WRITE));
1224         result = iam_it_get(it, k);
1225         if (result == 0) {
1226                 if (it_state(it) != IAM_IT_ATTACHED) {
1227                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1228                         result = iam_it_next(it);
1229                 }
1230         }
1231         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1232         return result;
1233 }
1234
1235 /*
1236  * Duplicates iterator.
1237  *
1238  * postcondition: it_state(dst) == it_state(src) &&
1239  *                iam_it_container(dst) == iam_it_container(src) &&
1240  *                dst->ii_flags = src->ii_flags &&
1241  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1242  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1243  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1244  */
1245 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1246 {
1247         dst->ii_flags     = src->ii_flags;
1248         dst->ii_state     = src->ii_state;
1249         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1250         /*
1251          * XXX: duplicate lock.
1252          */
1253         assert_corr(it_state(dst) == it_state(src));
1254         assert_corr(iam_it_container(dst) == iam_it_container(src));
1255         assert_corr(dst->ii_flags = src->ii_flags);
1256         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1257                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1258                     iam_it_key_get(dst) == iam_it_key_get(src)));
1259
1260 }
1261
1262 /*
1263  * Detach iterator. Does nothing it detached state.
1264  *
1265  * postcondition: it_state(it) == IAM_IT_DETACHED
1266  */
1267 void iam_it_put(struct iam_iterator *it)
1268 {
1269         if (it->ii_state != IAM_IT_DETACHED) {
1270                 it->ii_state = IAM_IT_DETACHED;
1271                 iam_leaf_fini(&it->ii_path.ip_leaf);
1272         }
1273 }
1274
1275 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1276                                         struct iam_ikey *ikey);
1277
1278
1279 /*
1280  * This function increments the frame pointer to search the next leaf
1281  * block, and reads in the necessary intervening nodes if the search
1282  * should be necessary.  Whether or not the search is necessary is
1283  * controlled by the hash parameter.  If the hash value is even, then
1284  * the search is only continued if the next block starts with that
1285  * hash value.  This is used if we are searching for a specific file.
1286  *
1287  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1288  *
1289  * This function returns 1 if the caller should continue to search,
1290  * or 0 if it should not.  If there is an error reading one of the
1291  * index blocks, it will a negative error code.
1292  *
1293  * If start_hash is non-null, it will be filled in with the starting
1294  * hash of the next page.
1295  */
1296 static int iam_htree_advance(struct inode *dir, __u32 hash,
1297                               struct iam_path *path, __u32 *start_hash,
1298                               int compat)
1299 {
1300         struct iam_frame *p;
1301         struct buffer_head *bh;
1302         int err, num_frames = 0;
1303         __u32 bhash;
1304
1305         p = path->ip_frame;
1306         /*
1307          * Find the next leaf page by incrementing the frame pointer.
1308          * If we run out of entries in the interior node, loop around and
1309          * increment pointer in the parent node.  When we break out of
1310          * this loop, num_frames indicates the number of interior
1311          * nodes need to be read.
1312          */
1313         while (1) {
1314                 do_corr(schedule());
1315                 iam_lock_bh(p->bh);
1316                 if (p->at_shifted)
1317                         p->at_shifted = 0;
1318                 else
1319                         p->at = iam_entry_shift(path, p->at, +1);
1320                 if (p->at < iam_entry_shift(path, p->entries,
1321                                             dx_get_count(p->entries))) {
1322                         p->leaf = dx_get_block(path, p->at);
1323                         iam_unlock_bh(p->bh);
1324                         break;
1325                 }
1326                 iam_unlock_bh(p->bh);
1327                 if (p == path->ip_frames)
1328                         return 0;
1329                 num_frames++;
1330                 --p;
1331         }
1332
1333         if (compat) {
1334                 /*
1335                  * Htree hash magic.
1336                  */
1337         /*
1338          * If the hash is 1, then continue only if the next page has a
1339          * continuation hash of any value.  This is used for readdir
1340          * handling.  Otherwise, check to see if the hash matches the
1341          * desired contiuation hash.  If it doesn't, return since
1342          * there's no point to read in the successive index pages.
1343          */
1344                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1345         if (start_hash)
1346                 *start_hash = bhash;
1347         if ((hash & 1) == 0) {
1348                 if ((bhash & ~1) != hash)
1349                         return 0;
1350         }
1351         }
1352         /*
1353          * If the hash is HASH_NB_ALWAYS, we always go to the next
1354          * block so no check is necessary
1355          */
1356         while (num_frames--) {
1357                 iam_ptr_t idx;
1358
1359                 do_corr(schedule());
1360                 iam_lock_bh(p->bh);
1361                 idx = p->leaf = dx_get_block(path, p->at);
1362                 iam_unlock_bh(p->bh);
1363                 err = iam_path_descr(path)->id_ops->
1364                         id_node_read(path->ip_container, idx, NULL, &bh);
1365                 if (err != 0)
1366                         return err; /* Failure */
1367                 ++p;
1368                 brelse(p->bh);
1369                 assert_corr(p->bh != bh);
1370                 p->bh = bh;
1371                 p->entries = dx_node_get_entries(path, p);
1372                 p->at = iam_entry_shift(path, p->entries, !compat);
1373                 assert_corr(p->curidx != idx);
1374                 p->curidx = idx;
1375                 iam_lock_bh(p->bh);
1376                 assert_corr(p->leaf != dx_get_block(path, p->at));
1377                 p->leaf = dx_get_block(path, p->at);
1378                 iam_unlock_bh(p->bh);
1379                 assert_inv(dx_node_check(path, p));
1380         }
1381         return 1;
1382 }
1383
1384
1385 static inline int iam_index_advance(struct iam_path *path)
1386 {
1387         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1388 }
1389
1390 static void iam_unlock_array(struct iam_container *ic,
1391                              struct dynlock_handle **lh)
1392 {
1393         int i;
1394
1395         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1396                 if (*lh != NULL) {
1397                         iam_unlock_htree(ic, *lh);
1398                         *lh = NULL;
1399                 }
1400         }
1401 }
1402 /*
1403  * Advance index part of @path to point to the next leaf. Returns 1 on
1404  * success, 0, when end of container was reached. Leaf node is locked.
1405  */
1406 int iam_index_next(struct iam_container *c, struct iam_path *path)
1407 {
1408         iam_ptr_t cursor;
1409         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1410         int result;
1411         struct inode *object;
1412
1413         /*
1414          * Locking for iam_index_next()... is to be described.
1415          */
1416
1417         object = c->ic_object;
1418         cursor = path->ip_frame->leaf;
1419
1420         while (1) {
1421                 result = iam_index_lock(path, lh);
1422                 do_corr(schedule());
1423                 if (result < 0)
1424                         break;
1425
1426                 result = iam_check_full_path(path, 0);
1427                 if (result == 0 && cursor == path->ip_frame->leaf) {
1428                         result = iam_index_advance(path);
1429
1430                         assert_corr(result == 0 ||
1431                                     cursor != path->ip_frame->leaf);
1432                         break;
1433                 }
1434                 do {
1435                         iam_unlock_array(c, lh);
1436
1437                         iam_path_release(path);
1438                         do_corr(schedule());
1439
1440                         result = __iam_path_lookup(path);
1441                         if (result < 0)
1442                                 break;
1443
1444                         while (path->ip_frame->leaf != cursor) {
1445                                 do_corr(schedule());
1446
1447                                 result = iam_index_lock(path, lh);
1448                                 do_corr(schedule());
1449                                 if (result < 0)
1450                                         break;
1451
1452                                 result = iam_check_full_path(path, 0);
1453                                 if (result != 0)
1454                                         break;
1455
1456                                 result = iam_index_advance(path);
1457                                 if (result == 0) {
1458                                         CERROR("cannot find cursor : %u\n",
1459                                                 cursor);
1460                                         result = -EIO;
1461                                 }
1462                                 if (result < 0)
1463                                         break;
1464                                 result = iam_check_full_path(path, 0);
1465                                 if (result != 0)
1466                                         break;
1467                                 iam_unlock_array(c, lh);
1468                         }
1469                 } while (result == -EAGAIN);
1470                 if (result < 0)
1471                         break;
1472         }
1473         iam_unlock_array(c, lh);
1474         return result;
1475 }
1476
1477 /*
1478  * Move iterator one record right.
1479  *
1480  * Return value: 0: success,
1481  *              +1: end of container reached
1482  *             -ve: error
1483  *
1484  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1485  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1486  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1487  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1488  */
1489 int iam_it_next(struct iam_iterator *it)
1490 {
1491         int result;
1492         struct iam_path      *path;
1493         struct iam_leaf      *leaf;
1494         do_corr(struct iam_ikey *ik_orig);
1495
1496         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1497         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1498                     it_state(it) == IAM_IT_SKEWED);
1499
1500         path = &it->ii_path;
1501         leaf = &path->ip_leaf;
1502
1503         assert_corr(iam_leaf_is_locked(leaf));
1504
1505         result = 0;
1506         do_corr(ik_orig = it_at_rec(it) ?
1507                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1508         if (it_before(it)) {
1509                 assert_corr(!iam_leaf_at_end(leaf));
1510                 it->ii_state = IAM_IT_ATTACHED;
1511         } else {
1512                 if (!iam_leaf_at_end(leaf))
1513                         /* advance within leaf node */
1514                         iam_leaf_next(leaf);
1515                 /*
1516                  * multiple iterations may be necessary due to empty leaves.
1517                  */
1518                 while (result == 0 && iam_leaf_at_end(leaf)) {
1519                         do_corr(schedule());
1520                         /* advance index portion of the path */
1521                         result = iam_index_next(iam_it_container(it), path);
1522                         assert_corr(iam_leaf_is_locked(leaf));
1523                         if (result == 1) {
1524                                 struct dynlock_handle *lh;
1525                                 lh = iam_lock_htree(iam_it_container(it),
1526                                                     path->ip_frame->leaf,
1527                                                     DLT_WRITE);
1528                                 if (lh != NULL) {
1529                                         iam_leaf_fini(leaf);
1530                                         leaf->il_lock = lh;
1531                                         result = iam_leaf_load(path);
1532                                         if (result == 0)
1533                                                 iam_leaf_start(leaf);
1534                                 } else
1535                                         result = -ENOMEM;
1536                         } else if (result == 0)
1537                                 /* end of container reached */
1538                                 result = +1;
1539                         if (result != 0)
1540                                 iam_it_put(it);
1541                 }
1542                 if (result == 0)
1543                         it->ii_state = IAM_IT_ATTACHED;
1544         }
1545         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1546         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1547         assert_corr(ergo(result == 0 && ik_orig != NULL,
1548                          it_ikeycmp(it, ik_orig) >= 0));
1549         return result;
1550 }
1551
1552 /*
1553  * Return pointer to the record under iterator.
1554  *
1555  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1556  * postcondition: it_state(it) == IAM_IT_ATTACHED
1557  */
1558 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1559 {
1560         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1561         assert_corr(it_at_rec(it));
1562         return iam_leaf_rec(&it->ii_path.ip_leaf);
1563 }
1564
1565 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1566 {
1567         struct iam_leaf *folio;
1568
1569         folio = &it->ii_path.ip_leaf;
1570         iam_leaf_ops(folio)->rec_set(folio, r);
1571 }
1572
1573 /*
1574  * Replace contents of record under iterator.
1575  *
1576  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1577  *                it->ii_flags&IAM_IT_WRITE
1578  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1579  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1580  */
1581 int iam_it_rec_set(handle_t *h,
1582                    struct iam_iterator *it, const struct iam_rec *r)
1583 {
1584         int result;
1585         struct iam_path *path;
1586         struct buffer_head *bh;
1587
1588         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1589                     it->ii_flags&IAM_IT_WRITE);
1590         assert_corr(it_at_rec(it));
1591
1592         path = &it->ii_path;
1593         bh   = path->ip_leaf.il_bh;
1594         result = iam_txn_add(h, path, bh);
1595         if (result == 0) {
1596                 iam_it_reccpy(it, r);
1597                 result = iam_txn_dirty(h, path, bh);
1598         }
1599         return result;
1600 }
1601
1602 /*
1603  * Return pointer to the index key under iterator.
1604  *
1605  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1606  *                it_state(it) == IAM_IT_SKEWED
1607  */
1608 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1609                                         struct iam_ikey *ikey)
1610 {
1611         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1612                     it_state(it) == IAM_IT_SKEWED);
1613         assert_corr(it_at_rec(it));
1614         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1615 }
1616
1617 /*
1618  * Return pointer to the key under iterator.
1619  *
1620  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1621  *                it_state(it) == IAM_IT_SKEWED
1622  */
1623 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1624 {
1625         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1626                     it_state(it) == IAM_IT_SKEWED);
1627         assert_corr(it_at_rec(it));
1628         return iam_leaf_key(&it->ii_path.ip_leaf);
1629 }
1630
1631 /*
1632  * Return size of key under iterator (in bytes)
1633  *
1634  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1635  *                it_state(it) == IAM_IT_SKEWED
1636  */
1637 int iam_it_key_size(const struct iam_iterator *it)
1638 {
1639         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1640                     it_state(it) == IAM_IT_SKEWED);
1641         assert_corr(it_at_rec(it));
1642         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1643 }
1644
1645 static struct buffer_head *
1646 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1647 {
1648         struct inode *inode = c->ic_object;
1649         struct buffer_head *bh = NULL;
1650         struct iam_idle_head *head;
1651         struct buffer_head *idle;
1652         __u32 *idle_blocks;
1653         __u16 count;
1654
1655         if (c->ic_idle_bh == NULL)
1656                 goto newblock;
1657
1658         mutex_lock(&c->ic_idle_mutex);
1659         if (unlikely(c->ic_idle_bh == NULL)) {
1660                 mutex_unlock(&c->ic_idle_mutex);
1661                 goto newblock;
1662         }
1663
1664         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1665         count = le16_to_cpu(head->iih_count);
1666         if (count > 0) {
1667                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1668                 if (*e != 0)
1669                         goto fail;
1670
1671                 --count;
1672                 *b = le32_to_cpu(head->iih_blks[count]);
1673                 head->iih_count = cpu_to_le16(count);
1674                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1675                 if (*e != 0)
1676                         goto fail;
1677
1678                 mutex_unlock(&c->ic_idle_mutex);
1679                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1680                 if (bh == NULL) {
1681                         *e = *e ? *e : -EIO;
1682                         return NULL;
1683                 }
1684                 goto got;
1685         }
1686
1687         /* The block itself which contains the iam_idle_head is
1688          * also an idle block, and can be used as the new node. */
1689         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1690                                 c->ic_descr->id_root_gap +
1691                                 sizeof(struct dx_countlimit));
1692         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1693         if (*e != 0)
1694                 goto fail;
1695
1696         *b = le32_to_cpu(*idle_blocks);
1697         iam_lock_bh(c->ic_root_bh);
1698         *idle_blocks = head->iih_next;
1699         iam_unlock_bh(c->ic_root_bh);
1700         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1701         if (*e != 0) {
1702                 iam_lock_bh(c->ic_root_bh);
1703                 *idle_blocks = cpu_to_le32(*b);
1704                 iam_unlock_bh(c->ic_root_bh);
1705                 goto fail;
1706         }
1707
1708         bh = c->ic_idle_bh;
1709         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1710         if (idle != NULL && IS_ERR(idle)) {
1711                 *e = PTR_ERR(idle);
1712                 c->ic_idle_bh = NULL;
1713                 brelse(bh);
1714                 goto fail;
1715         }
1716
1717         c->ic_idle_bh = idle;
1718         mutex_unlock(&c->ic_idle_mutex);
1719
1720 got:
1721         /* get write access for the found buffer head */
1722         *e = ldiskfs_journal_get_write_access(h, bh);
1723         if (*e != 0) {
1724                 brelse(bh);
1725                 bh = NULL;
1726                 ldiskfs_std_error(inode->i_sb, *e);
1727         } else {
1728                 /* Clear the reused node as new node does. */
1729                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1730                 set_buffer_uptodate(bh);
1731         }
1732         return bh;
1733
1734 newblock:
1735         bh = osd_ldiskfs_append(h, inode, b);
1736         if (IS_ERR(bh)) {
1737                 *e = PTR_ERR(bh);
1738                 bh = NULL;
1739         }
1740
1741         return bh;
1742
1743 fail:
1744         mutex_unlock(&c->ic_idle_mutex);
1745         ldiskfs_std_error(inode->i_sb, *e);
1746         return NULL;
1747 }
1748
1749 /*
1750  * Insertion of new record. Interaction with jbd during non-trivial case (when
1751  * split happens) is as following:
1752  *
1753  *  - new leaf node is involved into transaction by iam_new_node();
1754  *
1755  *  - old leaf node is involved into transaction by iam_add_rec();
1756  *
1757  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1758  *
1759  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1760  *  iam_new_leaf();
1761  *
1762  *  - split index nodes are involved into transaction and marked dirty by
1763  *  split_index_node().
1764  *
1765  *  - "safe" index node, which is no split, but where new pointer is inserted
1766  *  is involved into transaction and marked dirty by split_index_node().
1767  *
1768  *  - index node where pointer to new leaf is inserted is involved into
1769  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1770  *
1771  *  - inode is marked dirty by iam_add_rec().
1772  *
1773  */
1774
1775 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1776 {
1777         int err;
1778         iam_ptr_t blknr;
1779         struct buffer_head   *new_leaf;
1780         struct buffer_head   *old_leaf;
1781         struct iam_container *c;
1782         struct inode         *obj;
1783         struct iam_path      *path;
1784
1785         assert_inv(iam_leaf_check(leaf));
1786
1787         c = iam_leaf_container(leaf);
1788         path = leaf->il_path;
1789
1790         obj = c->ic_object;
1791         new_leaf = iam_new_node(handle, c, &blknr, &err);
1792         do_corr(schedule());
1793         if (new_leaf != NULL) {
1794                 struct dynlock_handle *lh;
1795
1796                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1797                 do_corr(schedule());
1798                 if (lh != NULL) {
1799                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1800                         do_corr(schedule());
1801                         old_leaf = leaf->il_bh;
1802                         iam_leaf_split(leaf, &new_leaf, blknr);
1803                         if (old_leaf != leaf->il_bh) {
1804                                 /*
1805                                  * Switched to the new leaf.
1806                                  */
1807                                 iam_leaf_unlock(leaf);
1808                                 leaf->il_lock = lh;
1809                                 path->ip_frame->leaf = blknr;
1810                         } else
1811                                 iam_unlock_htree(path->ip_container, lh);
1812                         do_corr(schedule());
1813                         err = iam_txn_dirty(handle, path, new_leaf);
1814                         if (err == 0)
1815                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1816                         do_corr(schedule());
1817                 } else
1818                         err = -ENOMEM;
1819                 brelse(new_leaf);
1820         }
1821         assert_inv(iam_leaf_check(leaf));
1822         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1823         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1824         return err;
1825 }
1826
1827 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1828 {
1829         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1830 }
1831
1832 static int iam_shift_entries(struct iam_path *path,
1833                          struct iam_frame *frame, unsigned count,
1834                          struct iam_entry *entries, struct iam_entry *entries2,
1835                          u32 newblock)
1836 {
1837         unsigned count1;
1838         unsigned count2;
1839         int delta;
1840
1841         struct iam_frame *parent = frame - 1;
1842         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1843
1844         delta = dx_index_is_compat(path) ? 0 : +1;
1845
1846         count1 = count/2 + delta;
1847         count2 = count - count1;
1848         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1849
1850         dxtrace(printk("Split index %d/%d\n", count1, count2));
1851
1852         memcpy((char *) iam_entry_shift(path, entries2, delta),
1853                (char *) iam_entry_shift(path, entries, count1),
1854                count2 * iam_entry_size(path));
1855
1856         dx_set_count(entries2, count2 + delta);
1857         dx_set_limit(entries2, dx_node_limit(path));
1858
1859         /*
1860          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1861          * level index in root index, then we insert new index here and set
1862          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1863          * index w/o hash it looks for. the solution is to check root index
1864          * after we locked just founded 2nd level index -bzzz
1865          */
1866         iam_insert_key_lock(path, parent, pivot, newblock);
1867
1868         /*
1869          * now old and new 2nd level index blocks contain all pointers, so
1870          * dx_probe() may find it in the both.  it's OK -bzzz
1871          */
1872         iam_lock_bh(frame->bh);
1873         dx_set_count(entries, count1);
1874         iam_unlock_bh(frame->bh);
1875
1876         /*
1877          * now old 2nd level index block points to first half of leafs. it's
1878          * importand that dx_probe() must check root index block for changes
1879          * under dx_lock_bh(frame->bh) -bzzz
1880          */
1881
1882         return count1;
1883 }
1884
1885
1886 int split_index_node(handle_t *handle, struct iam_path *path,
1887                      struct dynlock_handle **lh)
1888 {
1889
1890         struct iam_entry *entries;   /* old block contents */
1891         struct iam_entry *entries2;  /* new block contents */
1892         struct iam_frame *frame, *safe;
1893         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1894         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1895         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1896         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1897         struct inode *dir = iam_path_obj(path);
1898         struct iam_descr *descr;
1899         int nr_splet;
1900         int i, err;
1901
1902         descr = iam_path_descr(path);
1903         /*
1904          * Algorithm below depends on this.
1905          */
1906         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1907
1908         frame = path->ip_frame;
1909         entries = frame->entries;
1910
1911         /*
1912          * Tall-tree handling: we might have to split multiple index blocks
1913          * all the way up to tree root. Tricky point here is error handling:
1914          * to avoid complicated undo/rollback we
1915          *
1916          *   - first allocate all necessary blocks
1917          *
1918          *   - insert pointers into them atomically.
1919          */
1920
1921         /*
1922          * Locking: leaf is already locked. htree-locks are acquired on all
1923          * index nodes that require split bottom-to-top, on the "safe" node,
1924          * and on all new nodes
1925          */
1926
1927         dxtrace(printk("using %u of %u node entries\n",
1928                        dx_get_count(entries), dx_get_limit(entries)));
1929
1930         /* What levels need split? */
1931         for (nr_splet = 0; frame >= path->ip_frames &&
1932              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1933              --frame, ++nr_splet) {
1934                 do_corr(schedule());
1935                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1936                         /*
1937                         CWARN(dir->i_sb, __FUNCTION__,
1938                                      "Directory index full!\n");
1939                                      */
1940                         err = -ENOSPC;
1941                         goto cleanup;
1942                 }
1943         }
1944
1945         safe = frame;
1946
1947         /*
1948          * Lock all nodes, bottom to top.
1949          */
1950         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1951                 do_corr(schedule());
1952                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1953                                          DLT_WRITE);
1954                 if (lock[i] == NULL) {
1955                         err = -ENOMEM;
1956                         goto cleanup;
1957                 }
1958         }
1959
1960         /*
1961          * Check for concurrent index modification.
1962          */
1963         err = iam_check_full_path(path, 1);
1964         if (err)
1965                 goto cleanup;
1966         /*
1967          * And check that the same number of nodes is to be split.
1968          */
1969         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1970              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1971              --frame, ++i) {
1972                 ;
1973         }
1974         if (i != nr_splet) {
1975                 err = -EAGAIN;
1976                 goto cleanup;
1977         }
1978
1979         /* Go back down, allocating blocks, locking them, and adding into
1980          * transaction... */
1981         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1982                 bh_new[i] = iam_new_node(handle, path->ip_container,
1983                                          &newblock[i], &err);
1984                 do_corr(schedule());
1985                 if (!bh_new[i] ||
1986                     descr->id_ops->id_node_init(path->ip_container,
1987                                                 bh_new[i], 0) != 0)
1988                         goto cleanup;
1989                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1990                                              DLT_WRITE);
1991                 if (new_lock[i] == NULL) {
1992                         err = -ENOMEM;
1993                         goto cleanup;
1994                 }
1995                 do_corr(schedule());
1996                 BUFFER_TRACE(frame->bh, "get_write_access");
1997                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1998                 if (err)
1999                         goto journal_error;
2000         }
2001         /* Add "safe" node to transaction too */
2002         if (safe + 1 != path->ip_frames) {
2003                 do_corr(schedule());
2004                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
2005                 if (err)
2006                         goto journal_error;
2007         }
2008
2009         /* Go through nodes once more, inserting pointers */
2010         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
2011                 unsigned count;
2012                 int idx;
2013                 struct buffer_head *bh2;
2014                 struct buffer_head *bh;
2015
2016                 entries = frame->entries;
2017                 count = dx_get_count(entries);
2018                 idx = iam_entry_diff(path, frame->at, entries);
2019
2020                 bh2 = bh_new[i];
2021                 entries2 = dx_get_entries(path, bh2->b_data, 0);
2022
2023                 bh = frame->bh;
2024                 if (frame == path->ip_frames) {
2025                         /* splitting root node. Tricky point:
2026                          *
2027                          * In the "normal" B-tree we'd split root *and* add
2028                          * new root to the tree with pointers to the old root
2029                          * and its sibling (thus introducing two new nodes).
2030                          *
2031                          * In htree it's enough to add one node, because
2032                          * capacity of the root node is smaller than that of
2033                          * non-root one.
2034                          */
2035                         struct iam_frame *frames;
2036                         struct iam_entry *next;
2037
2038                         assert_corr(i == 0);
2039
2040                         do_corr(schedule());
2041
2042                         frames = path->ip_frames;
2043                         memcpy((char *) entries2, (char *) entries,
2044                                count * iam_entry_size(path));
2045                         dx_set_limit(entries2, dx_node_limit(path));
2046
2047                         /* Set up root */
2048                           iam_lock_bh(frame->bh);
2049                         next = descr->id_ops->id_root_inc(path->ip_container,
2050                                                           path, frame);
2051                         dx_set_block(path, next, newblock[0]);
2052                           iam_unlock_bh(frame->bh);
2053
2054                         do_corr(schedule());
2055                         /* Shift frames in the path */
2056                         memmove(frames + 2, frames + 1,
2057                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2058                         /* Add new access path frame */
2059                         frames[1].at = iam_entry_shift(path, entries2, idx);
2060                         frames[1].entries = entries = entries2;
2061                         frames[1].bh = bh2;
2062                         assert_inv(dx_node_check(path, frame));
2063                         ++ path->ip_frame;
2064                         ++ frame;
2065                         assert_inv(dx_node_check(path, frame));
2066                         bh_new[0] = NULL; /* buffer head is "consumed" */
2067                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2068                         if (err)
2069                                 goto journal_error;
2070                         do_corr(schedule());
2071                 } else {
2072                         /* splitting non-root index node. */
2073                         struct iam_frame *parent = frame - 1;
2074
2075                         do_corr(schedule());
2076                         count = iam_shift_entries(path, frame, count,
2077                                               entries, entries2, newblock[i]);
2078                         /* Which index block gets the new entry? */
2079                         if (idx >= count) {
2080                                 int d = dx_index_is_compat(path) ? 0 : +1;
2081
2082                                 frame->at = iam_entry_shift(path, entries2,
2083                                                             idx - count + d);
2084                                 frame->entries = entries = entries2;
2085                                 frame->curidx = newblock[i];
2086                                 swap(frame->bh, bh2);
2087                                 assert_corr(lock[i + 1] != NULL);
2088                                 assert_corr(new_lock[i] != NULL);
2089                                 swap(lock[i + 1], new_lock[i]);
2090                                 bh_new[i] = bh2;
2091                                 parent->at = iam_entry_shift(path,
2092                                                              parent->at, +1);
2093                         }
2094                         assert_inv(dx_node_check(path, frame));
2095                         assert_inv(dx_node_check(path, parent));
2096                         dxtrace(dx_show_index ("node", frame->entries));
2097                         dxtrace(dx_show_index ("node",
2098                                ((struct dx_node *) bh2->b_data)->entries));
2099                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2100                         if (err)
2101                                 goto journal_error;
2102                         do_corr(schedule());
2103                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2104                                                             parent->bh);
2105                         if (err)
2106                                 goto journal_error;
2107                 }
2108                 do_corr(schedule());
2109                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2110                 if (err)
2111                         goto journal_error;
2112         }
2113                 /*
2114                  * This function was called to make insertion of new leaf
2115                  * possible. Check that it fulfilled its obligations.
2116                  */
2117                 assert_corr(dx_get_count(path->ip_frame->entries) <
2118                             dx_get_limit(path->ip_frame->entries));
2119         assert_corr(lock[nr_splet] != NULL);
2120         *lh = lock[nr_splet];
2121         lock[nr_splet] = NULL;
2122         if (nr_splet > 0) {
2123                 /*
2124                  * Log ->i_size modification.
2125                  */
2126                 err = ldiskfs_mark_inode_dirty(handle, dir);
2127                 if (err)
2128                         goto journal_error;
2129         }
2130         goto cleanup;
2131 journal_error:
2132         ldiskfs_std_error(dir->i_sb, err);
2133
2134 cleanup:
2135         iam_unlock_array(path->ip_container, lock);
2136         iam_unlock_array(path->ip_container, new_lock);
2137
2138         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2139
2140         do_corr(schedule());
2141         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2142                 if (bh_new[i] != NULL)
2143                         brelse(bh_new[i]);
2144         }
2145         return err;
2146 }
2147
2148 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2149                        struct iam_path *path,
2150                        const struct iam_key *k, const struct iam_rec *r)
2151 {
2152         int err;
2153         struct iam_leaf *leaf;
2154
2155         leaf = &path->ip_leaf;
2156         assert_inv(iam_leaf_check(leaf));
2157         assert_inv(iam_path_check(path));
2158         err = iam_txn_add(handle, path, leaf->il_bh);
2159         if (err == 0) {
2160                 do_corr(schedule());
2161                 if (!iam_leaf_can_add(leaf, k, r)) {
2162                         struct dynlock_handle *lh = NULL;
2163
2164                         do {
2165                                 assert_corr(lh == NULL);
2166                                 do_corr(schedule());
2167                                 err = split_index_node(handle, path, &lh);
2168                                 if (err == -EAGAIN) {
2169                                         assert_corr(lh == NULL);
2170
2171                                         iam_path_fini(path);
2172                                         it->ii_state = IAM_IT_DETACHED;
2173
2174                                         do_corr(schedule());
2175                                         err = iam_it_get_exact(it, k);
2176                                         if (err == -ENOENT)
2177                                                 err = +1; /* repeat split */
2178                                         else if (err == 0)
2179                                                 err = -EEXIST;
2180                                 }
2181                         } while (err > 0);
2182                         assert_inv(iam_path_check(path));
2183                         if (err == 0) {
2184                                 assert_corr(lh != NULL);
2185                                 do_corr(schedule());
2186                                 err = iam_new_leaf(handle, leaf);
2187                                 if (err == 0)
2188                                         err = iam_txn_dirty(handle, path,
2189                                                             path->ip_frame->bh);
2190                         }
2191                         iam_unlock_htree(path->ip_container, lh);
2192                         do_corr(schedule());
2193                 }
2194                 if (err == 0) {
2195                         iam_leaf_rec_add(leaf, k, r);
2196                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2197                 }
2198         }
2199         assert_inv(iam_leaf_check(leaf));
2200         assert_inv(iam_leaf_check(&path->ip_leaf));
2201         assert_inv(iam_path_check(path));
2202         return err;
2203 }
2204
2205 /*
2206  * Insert new record with key @k and contents from @r, shifting records to the
2207  * right. On success, iterator is positioned on the newly inserted record.
2208  *
2209  * precondition: it->ii_flags&IAM_IT_WRITE &&
2210  *               (it_state(it) == IAM_IT_ATTACHED ||
2211  *                it_state(it) == IAM_IT_SKEWED) &&
2212  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2213  *                    it_keycmp(it, k) <= 0) &&
2214  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2215  * postcondition: ergo(result == 0,
2216  *                     it_state(it) == IAM_IT_ATTACHED &&
2217  *                     it_keycmp(it, k) == 0 &&
2218  *                     !memcmp(iam_it_rec_get(it), r, ...))
2219  */
2220 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2221                       const struct iam_key *k, const struct iam_rec *r)
2222 {
2223         int result;
2224         struct iam_path *path;
2225
2226         path = &it->ii_path;
2227
2228         assert_corr(it->ii_flags&IAM_IT_WRITE);
2229         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2230                     it_state(it) == IAM_IT_SKEWED);
2231         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2232                          it_keycmp(it, k) <= 0));
2233         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2234         result = iam_add_rec(h, it, path, k, r);
2235         if (result == 0)
2236                 it->ii_state = IAM_IT_ATTACHED;
2237         assert_corr(ergo(result == 0,
2238                          it_state(it) == IAM_IT_ATTACHED &&
2239                          it_keycmp(it, k) == 0));
2240         return result;
2241 }
2242
2243 static inline int iam_idle_blocks_limit(struct inode *inode)
2244 {
2245         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2246 }
2247
2248 /*
2249  * If the leaf cannnot be recycled, we will lose one block for reusing.
2250  * It is not a serious issue because it almost the same of non-recycle.
2251  */
2252 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2253                                   struct iam_leaf *l, struct buffer_head **bh)
2254 {
2255         struct iam_container *c = p->ip_container;
2256         struct inode *inode = c->ic_object;
2257         struct iam_frame *frame = p->ip_frame;
2258         struct iam_entry *entries;
2259         struct iam_entry *pos;
2260         struct dynlock_handle *lh;
2261         int count;
2262         int rc;
2263
2264         if (c->ic_idle_failed)
2265                 return 0;
2266
2267         if (unlikely(frame == NULL))
2268                 return 0;
2269
2270         if (!iam_leaf_empty(l))
2271                 return 0;
2272
2273         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2274         if (lh == NULL) {
2275                 CWARN("%.16s: No memory to recycle idle blocks\n",
2276                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2277                 return 0;
2278         }
2279
2280         rc = iam_txn_add(h, p, frame->bh);
2281         if (rc != 0) {
2282                 iam_unlock_htree(c, lh);
2283                 return 0;
2284         }
2285
2286         iam_lock_bh(frame->bh);
2287         entries = frame->entries;
2288         count = dx_get_count(entries);
2289         /* NOT shrink the last entry in the index node, which can be reused
2290          * directly by next new node. */
2291         if (count == 2) {
2292                 iam_unlock_bh(frame->bh);
2293                 iam_unlock_htree(c, lh);
2294                 return 0;
2295         }
2296
2297         pos = iam_find_position(p, frame);
2298         /* There may be some new leaf nodes have been added or empty leaf nodes
2299          * have been shrinked during my delete operation.
2300          *
2301          * If the empty leaf is not under current index node because the index
2302          * node has been split, then just skip the empty leaf, which is rare. */
2303         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2304                 iam_unlock_bh(frame->bh);
2305                 iam_unlock_htree(c, lh);
2306                 return 0;
2307         }
2308
2309         frame->at = pos;
2310         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2311                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2312
2313                 memmove(frame->at, n,
2314                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2315                 frame->at_shifted = 1;
2316         }
2317         dx_set_count(entries, count - 1);
2318         iam_unlock_bh(frame->bh);
2319         rc = iam_txn_dirty(h, p, frame->bh);
2320         iam_unlock_htree(c, lh);
2321         if (rc != 0)
2322                 return 0;
2323
2324         get_bh(l->il_bh);
2325         *bh = l->il_bh;
2326         return frame->leaf;
2327 }
2328
2329 static int
2330 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2331                         __u32 *idle_blocks, iam_ptr_t blk)
2332 {
2333         struct iam_container *c = p->ip_container;
2334         struct buffer_head *old = c->ic_idle_bh;
2335         struct iam_idle_head *head;
2336         int rc;
2337
2338         head = (struct iam_idle_head *)(bh->b_data);
2339         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2340         head->iih_count = 0;
2341         head->iih_next = *idle_blocks;
2342         /* The bh already get_write_accessed. */
2343         rc = iam_txn_dirty(h, p, bh);
2344         if (rc != 0)
2345                 return rc;
2346
2347         rc = iam_txn_add(h, p, c->ic_root_bh);
2348         if (rc != 0)
2349                 return rc;
2350
2351         iam_lock_bh(c->ic_root_bh);
2352         *idle_blocks = cpu_to_le32(blk);
2353         iam_unlock_bh(c->ic_root_bh);
2354         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2355         if (rc == 0) {
2356                 /* NOT release old before new assigned. */
2357                 get_bh(bh);
2358                 c->ic_idle_bh = bh;
2359                 brelse(old);
2360         } else {
2361                 iam_lock_bh(c->ic_root_bh);
2362                 *idle_blocks = head->iih_next;
2363                 iam_unlock_bh(c->ic_root_bh);
2364         }
2365         return rc;
2366 }
2367
2368 /*
2369  * If the leaf cannnot be recycled, we will lose one block for reusing.
2370  * It is not a serious issue because it almost the same of non-recycle.
2371  */
2372 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2373                              struct buffer_head *bh, iam_ptr_t blk)
2374 {
2375         struct iam_container *c = p->ip_container;
2376         struct inode *inode = c->ic_object;
2377         struct iam_idle_head *head;
2378         __u32 *idle_blocks;
2379         int count;
2380         int rc;
2381
2382         mutex_lock(&c->ic_idle_mutex);
2383         if (unlikely(c->ic_idle_failed)) {
2384                 rc = -EFAULT;
2385                 goto unlock;
2386         }
2387
2388         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2389                                 c->ic_descr->id_root_gap +
2390                                 sizeof(struct dx_countlimit));
2391         /* It is the first idle block. */
2392         if (c->ic_idle_bh == NULL) {
2393                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2394                 goto unlock;
2395         }
2396
2397         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2398         count = le16_to_cpu(head->iih_count);
2399         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2400         if (count == iam_idle_blocks_limit(inode)) {
2401                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2402                 goto unlock;
2403         }
2404
2405         /* Just add to ic_idle_bh. */
2406         rc = iam_txn_add(h, p, c->ic_idle_bh);
2407         if (rc != 0)
2408                 goto unlock;
2409
2410         head->iih_blks[count] = cpu_to_le32(blk);
2411         head->iih_count = cpu_to_le16(count + 1);
2412         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2413
2414 unlock:
2415         mutex_unlock(&c->ic_idle_mutex);
2416         if (rc != 0)
2417                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2418                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2419 }
2420
2421 /*
2422  * Delete record under iterator.
2423  *
2424  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2425  *                it->ii_flags&IAM_IT_WRITE &&
2426  *                it_at_rec(it)
2427  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2428  *                it_state(it) == IAM_IT_DETACHED
2429  */
2430 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2431 {
2432         int result;
2433         struct iam_leaf *leaf;
2434         struct iam_path *path;
2435
2436         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2437                     it->ii_flags&IAM_IT_WRITE);
2438         assert_corr(it_at_rec(it));
2439
2440         path = &it->ii_path;
2441         leaf = &path->ip_leaf;
2442
2443         assert_inv(iam_leaf_check(leaf));
2444         assert_inv(iam_path_check(path));
2445
2446         result = iam_txn_add(h, path, leaf->il_bh);
2447         /*
2448          * no compaction for now.
2449          */
2450         if (result == 0) {
2451                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2452                 result = iam_txn_dirty(h, path, leaf->il_bh);
2453                 if (result == 0 && iam_leaf_at_end(leaf)) {
2454                         struct buffer_head *bh = NULL;
2455                         iam_ptr_t blk;
2456
2457                         blk = iam_index_shrink(h, path, leaf, &bh);
2458                         if (it->ii_flags & IAM_IT_MOVE) {
2459                                 result = iam_it_next(it);
2460                                 if (result > 0)
2461                                         result = 0;
2462                         }
2463
2464                         if (bh != NULL) {
2465                                 iam_recycle_leaf(h, path, bh, blk);
2466                                 brelse(bh);
2467                         }
2468                 }
2469         }
2470         assert_inv(iam_leaf_check(leaf));
2471         assert_inv(iam_path_check(path));
2472         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2473                     it_state(it) == IAM_IT_DETACHED);
2474         return result;
2475 }
2476
2477 /*
2478  * Convert iterator to cookie.
2479  *
2480  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2481  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2482  * postcondition: it_state(it) == IAM_IT_ATTACHED
2483  */
2484 iam_pos_t iam_it_store(const struct iam_iterator *it)
2485 {
2486         iam_pos_t result;
2487
2488         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2489         assert_corr(it_at_rec(it));
2490         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2491                     sizeof result);
2492
2493         result = 0;
2494         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2495 }
2496
2497 /*
2498  * Restore iterator from cookie.
2499  *
2500  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2501  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2502  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2503  *                                  iam_it_store(it) == pos)
2504  */
2505 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2506 {
2507         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2508                     it->ii_flags&IAM_IT_MOVE);
2509         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2510         return iam_it_iget(it, (struct iam_ikey *)&pos);
2511 }
2512
2513 /***********************************************************************/
2514 /* invariants                                                          */
2515 /***********************************************************************/
2516
2517 static inline int ptr_inside(void *base, size_t size, void *ptr)
2518 {
2519         return (base <= ptr) && (ptr < base + size);
2520 }
2521
2522 static int iam_frame_invariant(struct iam_frame *f)
2523 {
2524         return
2525                 (f->bh != NULL &&
2526                 f->bh->b_data != NULL &&
2527                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2528                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2529                 f->entries <= f->at);
2530 }
2531
2532 static int iam_leaf_invariant(struct iam_leaf *l)
2533 {
2534         return
2535                 l->il_bh != NULL &&
2536                 l->il_bh->b_data != NULL &&
2537                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2538                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2539                 l->il_entries <= l->il_at;
2540 }
2541
2542 static int iam_path_invariant(struct iam_path *p)
2543 {
2544         int i;
2545
2546         if (p->ip_container == NULL ||
2547             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2548             p->ip_frame != p->ip_frames + p->ip_indirect ||
2549             !iam_leaf_invariant(&p->ip_leaf))
2550                 return 0;
2551         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2552                 if (i <= p->ip_indirect) {
2553                         if (!iam_frame_invariant(&p->ip_frames[i]))
2554                                 return 0;
2555                 }
2556         }
2557         return 1;
2558 }
2559
2560 int iam_it_invariant(struct iam_iterator *it)
2561 {
2562         return
2563                 (it->ii_state == IAM_IT_DETACHED ||
2564                  it->ii_state == IAM_IT_ATTACHED ||
2565                  it->ii_state == IAM_IT_SKEWED) &&
2566                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2567                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2568                      it->ii_state == IAM_IT_SKEWED,
2569                      iam_path_invariant(&it->ii_path) &&
2570                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2571 }
2572
2573 /*
2574  * Search container @c for record with key @k. If record is found, its data
2575  * are moved into @r.
2576  *
2577  * Return values: 0: found, -ENOENT: not-found, -ve: error
2578  */
2579 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2580                struct iam_rec *r, struct iam_path_descr *pd)
2581 {
2582         struct iam_iterator it;
2583         int result;
2584
2585         iam_it_init(&it, c, 0, pd);
2586
2587         result = iam_it_get_exact(&it, k);
2588         if (result == 0)
2589                 /*
2590                  * record with required key found, copy it into user buffer
2591                  */
2592                 iam_reccpy(&it.ii_path.ip_leaf, r);
2593         iam_it_put(&it);
2594         iam_it_fini(&it);
2595         return result;
2596 }
2597
2598 /*
2599  * Insert new record @r with key @k into container @c (within context of
2600  * transaction @h).
2601  *
2602  * Return values: 0: success, -ve: error, including -EEXIST when record with
2603  * given key is already present.
2604  *
2605  * postcondition: ergo(result == 0 || result == -EEXIST,
2606  *                                  iam_lookup(c, k, r2) > 0;
2607  */
2608 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2609                const struct iam_rec *r, struct iam_path_descr *pd)
2610 {
2611         struct iam_iterator it;
2612         int result;
2613
2614         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2615
2616         result = iam_it_get_exact(&it, k);
2617         if (result == -ENOENT)
2618                 result = iam_it_rec_insert(h, &it, k, r);
2619         else if (result == 0)
2620                 result = -EEXIST;
2621         iam_it_put(&it);
2622         iam_it_fini(&it);
2623         return result;
2624 }
2625
2626 /*
2627  * Update record with the key @k in container @c (within context of
2628  * transaction @h), new record is given by @r.
2629  *
2630  * Return values: +1: skip because of the same rec value, 0: success,
2631  * -ve: error, including -ENOENT if no record with the given key found.
2632  */
2633 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2634                const struct iam_rec *r, struct iam_path_descr *pd)
2635 {
2636         struct iam_iterator it;
2637         struct iam_leaf *folio;
2638         int result;
2639
2640         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2641
2642         result = iam_it_get_exact(&it, k);
2643         if (result == 0) {
2644                 folio = &it.ii_path.ip_leaf;
2645                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2646                 if (result == 0)
2647                         iam_it_rec_set(h, &it, r);
2648                 else
2649                         result = 1;
2650         }
2651         iam_it_put(&it);
2652         iam_it_fini(&it);
2653         return result;
2654 }
2655
2656 /*
2657  * Delete existing record with key @k.
2658  *
2659  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2660  *
2661  * postcondition: ergo(result == 0 || result == -ENOENT,
2662  *                                 !iam_lookup(c, k, *));
2663  */
2664 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2665                struct iam_path_descr *pd)
2666 {
2667         struct iam_iterator it;
2668         int result;
2669
2670         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2671
2672         result = iam_it_get_exact(&it, k);
2673         if (result == 0)
2674                 iam_it_rec_delete(h, &it);
2675         iam_it_put(&it);
2676         iam_it_fini(&it);
2677         return result;
2678 }
2679
2680 int iam_root_limit(int rootgap, int blocksize, int size)
2681 {
2682         int limit;
2683         int nlimit;
2684
2685         limit = (blocksize - rootgap) / size;
2686         nlimit = blocksize / size;
2687         if (limit == nlimit)
2688                 limit--;
2689         return limit;
2690 }