Whamcloud - gitweb
8e2e0c502422038487fe8372a6e16c404be807de
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150
151 #include <ldiskfs/ldiskfs.h>
152 #include <ldiskfs/xattr.h>
153 #undef ENTRY
154
155 #include "osd_internal.h"
156
157 #include <ldiskfs/acl.h>
158
159 /*
160  * List of all registered formats.
161  *
162  * No locking. Callers synchronize.
163  */
164 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
165
166 void iam_format_register(struct iam_format *fmt)
167 {
168         list_add(&fmt->if_linkage, &iam_formats);
169 }
170
171 static struct buffer_head *
172 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
173 {
174         struct inode *inode = c->ic_object;
175         struct iam_idle_head *head;
176         struct buffer_head *bh;
177         int err = 0;
178
179         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
180
181         if (blk == 0)
182                 return NULL;
183
184         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
185         if (bh == NULL) {
186                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
187                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
188                 c->ic_idle_failed = 1;
189                 err = err ? err : -EIO;
190                 return ERR_PTR(err);
191         }
192
193         head = (struct iam_idle_head *)(bh->b_data);
194         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
195                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
196                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
197                        le16_to_cpu(head->iih_magic));
198                 brelse(bh);
199                 c->ic_idle_failed = 1;
200                 return ERR_PTR(-EBADF);
201         }
202
203         return bh;
204 }
205
206 /*
207  * Determine format of given container. This is done by scanning list of
208  * registered formats and calling ->if_guess() method of each in turn.
209  */
210 static int iam_format_guess(struct iam_container *c)
211 {
212         int result;
213         struct iam_format *fmt;
214
215         /*
216          * XXX temporary initialization hook.
217          */
218         {
219                 static int initialized = 0;
220
221                 if (!initialized) {
222                         iam_lvar_format_init();
223                         iam_lfix_format_init();
224                         initialized = 1;
225                 }
226         }
227
228         result = -ENOENT;
229         list_for_each_entry(fmt, &iam_formats, if_linkage) {
230                 result = fmt->if_guess(c);
231                 if (result == 0)
232                         break;
233         }
234
235         if (result == 0) {
236                 struct buffer_head *bh;
237                 __u32 *idle_blocks;
238
239                 LASSERT(c->ic_root_bh != NULL);
240
241                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
242                                         c->ic_descr->id_root_gap +
243                                         sizeof(struct dx_countlimit));
244                 mutex_lock(&c->ic_idle_mutex);
245                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
246                 if (bh != NULL && IS_ERR(bh))
247                         result = PTR_ERR(bh);
248                 else
249                         c->ic_idle_bh = bh;
250                 mutex_unlock(&c->ic_idle_mutex);
251         }
252
253         return result;
254 }
255
256 /*
257  * Initialize container @c.
258  */
259 int iam_container_init(struct iam_container *c,
260                        struct iam_descr *descr, struct inode *inode)
261 {
262         memset(c, 0, sizeof *c);
263         c->ic_descr  = descr;
264         c->ic_object = inode;
265         init_rwsem(&c->ic_sem);
266         dynlock_init(&c->ic_tree_lock);
267         mutex_init(&c->ic_idle_mutex);
268         return 0;
269 }
270
271 /*
272  * Determine container format.
273  */
274 int iam_container_setup(struct iam_container *c)
275 {
276         return iam_format_guess(c);
277 }
278
279 /*
280  * Finalize container @c, release all resources.
281  */
282 void iam_container_fini(struct iam_container *c)
283 {
284         brelse(c->ic_idle_bh);
285         c->ic_idle_bh = NULL;
286         brelse(c->ic_root_bh);
287         c->ic_root_bh = NULL;
288 }
289
290 void iam_path_init(struct iam_path *path, struct iam_container *c,
291                    struct iam_path_descr *pd)
292 {
293         memset(path, 0, sizeof *path);
294         path->ip_container = c;
295         path->ip_frame = path->ip_frames;
296         path->ip_data = pd;
297         path->ip_leaf.il_path = path;
298 }
299
300 static void iam_leaf_fini(struct iam_leaf *leaf);
301
302 void iam_path_release(struct iam_path *path)
303 {
304         int i;
305
306         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
307                 if (path->ip_frames[i].bh != NULL) {
308                         path->ip_frames[i].at_shifted = 0;
309                         brelse(path->ip_frames[i].bh);
310                         path->ip_frames[i].bh = NULL;
311                 }
312         }
313 }
314
315 void iam_path_fini(struct iam_path *path)
316 {
317         iam_leaf_fini(&path->ip_leaf);
318         iam_path_release(path);
319 }
320
321
322 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
323 {
324         int i;
325
326         path->ipc_hinfo = &path->ipc_hinfo_area;
327         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
328                 path->ipc_descr.ipd_key_scratch[i] =
329                         (struct iam_ikey *)&path->ipc_scratch[i];
330
331         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
332 }
333
334 void iam_path_compat_fini(struct iam_path_compat *path)
335 {
336         iam_path_fini(&path->ipc_path);
337 }
338
339 /*
340  * Helper function initializing iam_path_descr and its key scratch area.
341  */
342 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
343 {
344         struct iam_path_descr *ipd;
345         void *karea;
346         int i;
347
348         ipd = area;
349         karea = ipd + 1;
350         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
351                 ipd->ipd_key_scratch[i] = karea;
352         return ipd;
353 }
354
355 void iam_ipd_free(struct iam_path_descr *ipd)
356 {
357 }
358
359 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
360                   handle_t *h, struct buffer_head **bh)
361 {
362         int result = 0;
363
364         /* NB: it can be called by iam_lfix_guess() which is still at
365          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
366          * haven't been intialized yet.
367          * Also, we don't have this for IAM dir.
368          */
369         if (c->ic_root_bh != NULL &&
370             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
371                 get_bh(c->ic_root_bh);
372                 *bh = c->ic_root_bh;
373                 return 0;
374         }
375
376         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
377         if (*bh == NULL)
378                 result = result ? result : -EIO;
379         return result;
380 }
381
382 /*
383  * Return pointer to current leaf record. Pointer is valid while corresponding
384  * leaf node is locked and pinned.
385  */
386 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
387 {
388         return iam_leaf_ops(leaf)->rec(leaf);
389 }
390
391 /*
392  * Return pointer to the current leaf key. This function returns pointer to
393  * the key stored in node.
394  *
395  * Caller should assume that returned pointer is only valid while leaf node is
396  * pinned and locked.
397  */
398 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
399 {
400         return iam_leaf_ops(leaf)->key(leaf);
401 }
402
403 static int iam_leaf_key_size(const struct iam_leaf *leaf)
404 {
405         return iam_leaf_ops(leaf)->key_size(leaf);
406 }
407
408 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
409                                       struct iam_ikey *key)
410 {
411         return iam_leaf_ops(leaf)->ikey(leaf, key);
412 }
413
414 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
415                            const struct iam_key *key)
416 {
417         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
418 }
419
420 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
421                           const struct iam_key *key)
422 {
423         return iam_leaf_ops(leaf)->key_eq(leaf, key);
424 }
425
426 #if LDISKFS_INVARIANT_ON
427 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
428
429 static int iam_path_check(struct iam_path *p)
430 {
431         int i;
432         int result;
433         struct iam_frame *f;
434         struct iam_descr *param;
435
436         result = 1;
437         param = iam_path_descr(p);
438         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
439                 f = &p->ip_frames[i];
440                 if (f->bh != NULL) {
441                         result = dx_node_check(p, f);
442                         if (result)
443                                 result = !param->id_ops->id_node_check(p, f);
444                 }
445         }
446         if (result && p->ip_leaf.il_bh != NULL)
447                 result = 1;
448         if (result == 0)
449                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
450
451         return result;
452 }
453 #endif
454
455 static int iam_leaf_load(struct iam_path *path)
456 {
457         iam_ptr_t block;
458         int err;
459         struct iam_container *c;
460         struct buffer_head   *bh;
461         struct iam_leaf      *leaf;
462         struct iam_descr     *descr;
463
464         c     = path->ip_container;
465         leaf  = &path->ip_leaf;
466         descr = iam_path_descr(path);
467         block = path->ip_frame->leaf;
468         if (block == 0) {
469                 /* XXX bug 11027 */
470                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
471                        (long unsigned)path->ip_frame->leaf,
472                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
473                        path->ip_frames[0].bh, path->ip_frames[1].bh,
474                        path->ip_frames[2].bh);
475         }
476         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
477         if (err == 0) {
478                 leaf->il_bh = bh;
479                 leaf->il_curidx = block;
480                 err = iam_leaf_ops(leaf)->init(leaf);
481         }
482         return err;
483 }
484
485 static void iam_unlock_htree(struct iam_container *ic,
486                              struct dynlock_handle *lh)
487 {
488         if (lh != NULL)
489                 dynlock_unlock(&ic->ic_tree_lock, lh);
490 }
491
492
493 static void iam_leaf_unlock(struct iam_leaf *leaf)
494 {
495         if (leaf->il_lock != NULL) {
496                 iam_unlock_htree(iam_leaf_container(leaf),
497                                  leaf->il_lock);
498                 do_corr(schedule());
499                 leaf->il_lock = NULL;
500         }
501 }
502
503 static void iam_leaf_fini(struct iam_leaf *leaf)
504 {
505         if (leaf->il_path != NULL) {
506                 iam_leaf_unlock(leaf);
507                 iam_leaf_ops(leaf)->fini(leaf);
508                 if (leaf->il_bh) {
509                         brelse(leaf->il_bh);
510                         leaf->il_bh = NULL;
511                         leaf->il_curidx = 0;
512                 }
513         }
514 }
515
516 static void iam_leaf_start(struct iam_leaf *folio)
517 {
518         iam_leaf_ops(folio)->start(folio);
519 }
520
521 void iam_leaf_next(struct iam_leaf *folio)
522 {
523         iam_leaf_ops(folio)->next(folio);
524 }
525
526 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
527                              const struct iam_rec *rec)
528 {
529         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
530 }
531
532 static void iam_rec_del(struct iam_leaf *leaf, int shift)
533 {
534         iam_leaf_ops(leaf)->rec_del(leaf, shift);
535 }
536
537 int iam_leaf_at_end(const struct iam_leaf *leaf)
538 {
539         return iam_leaf_ops(leaf)->at_end(leaf);
540 }
541
542 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
543                            iam_ptr_t nr)
544 {
545         iam_leaf_ops(l)->split(l, bh, nr);
546 }
547
548 static inline int iam_leaf_empty(struct iam_leaf *l)
549 {
550         return iam_leaf_ops(l)->leaf_empty(l);
551 }
552
553 int iam_leaf_can_add(const struct iam_leaf *l,
554                      const struct iam_key *k, const struct iam_rec *r)
555 {
556         return iam_leaf_ops(l)->can_add(l, k, r);
557 }
558
559 static int iam_txn_dirty(handle_t *handle,
560                          struct iam_path *path, struct buffer_head *bh)
561 {
562         int result;
563
564         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
565         if (result != 0)
566                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
567         return result;
568 }
569
570 static int iam_txn_add(handle_t *handle,
571                        struct iam_path *path, struct buffer_head *bh)
572 {
573         int result;
574
575         result = ldiskfs_journal_get_write_access(handle, bh);
576         if (result != 0)
577                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
578         return result;
579 }
580
581 /***********************************************************************/
582 /* iterator interface                                                  */
583 /***********************************************************************/
584
585 static enum iam_it_state it_state(const struct iam_iterator *it)
586 {
587         return it->ii_state;
588 }
589
590 /*
591  * Helper function returning scratch key.
592  */
593 static struct iam_container *iam_it_container(const struct iam_iterator *it)
594 {
595         return it->ii_path.ip_container;
596 }
597
598 static inline int it_keycmp(const struct iam_iterator *it,
599                             const struct iam_key *k)
600 {
601         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
602 }
603
604 static inline int it_keyeq(const struct iam_iterator *it,
605                            const struct iam_key *k)
606 {
607         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
608 }
609
610 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
611 {
612         return iam_ikeycmp(it->ii_path.ip_container,
613                            iam_leaf_ikey(&it->ii_path.ip_leaf,
614                                          iam_path_ikey(&it->ii_path, 0)), ik);
615 }
616
617 static inline int it_at_rec(const struct iam_iterator *it)
618 {
619         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
620 }
621
622 static inline int it_before(const struct iam_iterator *it)
623 {
624         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
625 }
626
627 /*
628  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
629  * with exactly the same key as asked is found.
630  */
631 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
632 {
633         int result;
634
635         result = iam_it_get(it, k);
636         if (result > 0)
637                 result = 0;
638         else if (result == 0)
639                 /*
640                  * Return -ENOENT if cursor is located above record with a key
641                  * different from one specified, or in the empty leaf.
642                  *
643                  * XXX returning -ENOENT only works if iam_it_get() never
644                  * returns -ENOENT as a legitimate error.
645                  */
646                 result = -ENOENT;
647         return result;
648 }
649
650 void iam_container_write_lock(struct iam_container *ic)
651 {
652         down_write(&ic->ic_sem);
653 }
654
655 void iam_container_write_unlock(struct iam_container *ic)
656 {
657         up_write(&ic->ic_sem);
658 }
659
660 void iam_container_read_lock(struct iam_container *ic)
661 {
662         down_read(&ic->ic_sem);
663 }
664
665 void iam_container_read_unlock(struct iam_container *ic)
666 {
667         up_read(&ic->ic_sem);
668 }
669
670 /*
671  * Initialize iterator to IAM_IT_DETACHED state.
672  *
673  * postcondition: it_state(it) == IAM_IT_DETACHED
674  */
675 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
676                  struct iam_path_descr *pd)
677 {
678         memset(it, 0, sizeof *it);
679         it->ii_flags  = flags;
680         it->ii_state  = IAM_IT_DETACHED;
681         iam_path_init(&it->ii_path, c, pd);
682         return 0;
683 }
684
685 /*
686  * Finalize iterator and release all resources.
687  *
688  * precondition: it_state(it) == IAM_IT_DETACHED
689  */
690 void iam_it_fini(struct iam_iterator *it)
691 {
692         assert_corr(it_state(it) == IAM_IT_DETACHED);
693         iam_path_fini(&it->ii_path);
694 }
695
696 /*
697  * this locking primitives are used to protect parts
698  * of dir's htree. protection unit is block: leaf or index
699  */
700 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
701                                              unsigned long value,
702                                              enum dynlock_type lt)
703 {
704         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
705 }
706
707 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
708 {
709         struct iam_frame *f;
710
711         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
712                 do_corr(schedule());
713                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
714                 if (*lh == NULL)
715                         return -ENOMEM;
716         }
717         return 0;
718 }
719
720 /*
721  * Fast check for frame consistency.
722  */
723 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
724 {
725         struct iam_container *bag;
726         struct iam_entry *next;
727         struct iam_entry *last;
728         struct iam_entry *entries;
729         struct iam_entry *at;
730
731         bag     = path->ip_container;
732         at      = frame->at;
733         entries = frame->entries;
734         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
735
736         if (unlikely(at > last))
737                 return -EAGAIN;
738
739         if (unlikely(dx_get_block(path, at) != frame->leaf))
740                 return -EAGAIN;
741
742         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
743                                  path->ip_ikey_target) > 0))
744                 return -EAGAIN;
745
746         next = iam_entry_shift(path, at, +1);
747         if (next <= last) {
748                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
749                                          path->ip_ikey_target) <= 0))
750                         return -EAGAIN;
751         }
752         return 0;
753 }
754
755 int dx_index_is_compat(struct iam_path *path)
756 {
757         return iam_path_descr(path) == NULL;
758 }
759
760 /*
761  * dx_find_position
762  *
763  * search position of specified hash in index
764  *
765  */
766
767 static struct iam_entry *iam_find_position(struct iam_path *path,
768                                            struct iam_frame *frame)
769 {
770         int count;
771         struct iam_entry *p;
772         struct iam_entry *q;
773         struct iam_entry *m;
774
775         count = dx_get_count(frame->entries);
776         assert_corr(count && count <= dx_get_limit(frame->entries));
777         p = iam_entry_shift(path, frame->entries,
778                             dx_index_is_compat(path) ? 1 : 2);
779         q = iam_entry_shift(path, frame->entries, count - 1);
780         while (p <= q) {
781                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
782                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
783                                 path->ip_ikey_target) > 0)
784                         q = iam_entry_shift(path, m, -1);
785                 else
786                         p = iam_entry_shift(path, m, +1);
787         }
788         return iam_entry_shift(path, p, -1);
789 }
790
791
792
793 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
794 {
795         return dx_get_block(path, iam_find_position(path, frame));
796 }
797
798 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
799                     const struct iam_ikey *key, iam_ptr_t ptr)
800 {
801         struct iam_entry *entries = frame->entries;
802         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
803         int count = dx_get_count(entries);
804
805         /*
806          * Unfortunately we cannot assert this, as this function is sometimes
807          * called by VFS under i_sem and without pdirops lock.
808          */
809         assert_corr(1 || iam_frame_is_locked(path, frame));
810         assert_corr(count < dx_get_limit(entries));
811         assert_corr(frame->at < iam_entry_shift(path, entries, count));
812         assert_inv(dx_node_check(path, frame));
813
814         memmove(iam_entry_shift(path, new, 1), new,
815                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
816         dx_set_ikey(path, new, key);
817         dx_set_block(path, new, ptr);
818         dx_set_count(entries, count + 1);
819         assert_inv(dx_node_check(path, frame));
820 }
821
822 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
823                          const struct iam_ikey *key, iam_ptr_t ptr)
824 {
825         iam_lock_bh(frame->bh);
826         iam_insert_key(path, frame, key, ptr);
827         iam_unlock_bh(frame->bh);
828 }
829 /*
830  * returns 0 if path was unchanged, -EAGAIN otherwise.
831  */
832 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
833 {
834         int equal;
835
836         iam_lock_bh(frame->bh);
837         equal = iam_check_fast(path, frame) == 0 ||
838                 frame->leaf == iam_find_ptr(path, frame);
839         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
840         iam_unlock_bh(frame->bh);
841
842         return equal ? 0 : -EAGAIN;
843 }
844
845 static int iam_lookup_try(struct iam_path *path)
846 {
847         u32 ptr;
848         int err = 0;
849         int i;
850
851         struct iam_descr *param;
852         struct iam_frame *frame;
853         struct iam_container *c;
854
855         param = iam_path_descr(path);
856         c = path->ip_container;
857
858         ptr = param->id_ops->id_root_ptr(c);
859         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
860              ++frame, ++i) {
861                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
862                                                   &frame->bh);
863                 do_corr(schedule());
864
865                 iam_lock_bh(frame->bh);
866                 /*
867                  * node must be initialized under bh lock because concurrent
868                  * creation procedure may change it and iam_lookup_try() will
869                  * see obsolete tree height. -bzzz
870                  */
871                 if (err != 0)
872                         break;
873
874                 if (LDISKFS_INVARIANT_ON) {
875                         err = param->id_ops->id_node_check(path, frame);
876                         if (err != 0)
877                                 break;
878                 }
879
880                 err = param->id_ops->id_node_load(path, frame);
881                 if (err != 0)
882                         break;
883
884                 assert_inv(dx_node_check(path, frame));
885                 /*
886                  * splitting may change root index block and move hash we're
887                  * looking for into another index block so, we have to check
888                  * this situation and repeat from begining if path got changed
889                  * -bzzz
890                  */
891                 if (i > 0) {
892                         err = iam_check_path(path, frame - 1);
893                         if (err != 0)
894                                 break;
895                 }
896
897                 frame->at = iam_find_position(path, frame);
898                 frame->curidx = ptr;
899                 frame->leaf = ptr = dx_get_block(path, frame->at);
900
901                 iam_unlock_bh(frame->bh);
902                 do_corr(schedule());
903         }
904         if (err != 0)
905                 iam_unlock_bh(frame->bh);
906         path->ip_frame = --frame;
907         return err;
908 }
909
910 static int __iam_path_lookup(struct iam_path *path)
911 {
912         int err;
913         int i;
914
915         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
916                 assert(path->ip_frames[i].bh == NULL);
917
918         do {
919                 err = iam_lookup_try(path);
920                 do_corr(schedule());
921                 if (err != 0)
922                         iam_path_fini(path);
923         } while (err == -EAGAIN);
924
925         return err;
926 }
927
928 /*
929  * returns 0 if path was unchanged, -EAGAIN otherwise.
930  */
931 static int iam_check_full_path(struct iam_path *path, int search)
932 {
933         struct iam_frame *bottom;
934         struct iam_frame *scan;
935         int i;
936         int result;
937
938         do_corr(schedule());
939
940         for (bottom = path->ip_frames, i = 0;
941              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
942                 ; /* find last filled in frame */
943         }
944
945         /*
946          * Lock frames, bottom to top.
947          */
948         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
949                 iam_lock_bh(scan->bh);
950         /*
951          * Check them top to bottom.
952          */
953         result = 0;
954         for (scan = path->ip_frames; scan < bottom; ++scan) {
955                 struct iam_entry *pos;
956
957                 if (search) {
958                         if (iam_check_fast(path, scan) == 0)
959                                 continue;
960
961                         pos = iam_find_position(path, scan);
962                         if (scan->leaf != dx_get_block(path, pos)) {
963                                 result = -EAGAIN;
964                                 break;
965                         }
966                         scan->at = pos;
967                 } else {
968                         pos = iam_entry_shift(path, scan->entries,
969                                               dx_get_count(scan->entries) - 1);
970                         if (scan->at > pos ||
971                             scan->leaf != dx_get_block(path, scan->at)) {
972                                 result = -EAGAIN;
973                                 break;
974                         }
975                 }
976         }
977
978         /*
979          * Unlock top to bottom.
980          */
981         for (scan = path->ip_frames; scan < bottom; ++scan)
982                 iam_unlock_bh(scan->bh);
983         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
984         do_corr(schedule());
985
986         return result;
987 }
988
989
990 /*
991  * Performs path lookup and returns with found leaf (if any) locked by htree
992  * lock.
993  */
994 static int iam_lookup_lock(struct iam_path *path,
995                            struct dynlock_handle **dl, enum dynlock_type lt)
996 {
997         int result;
998
999         while ((result = __iam_path_lookup(path)) == 0) {
1000                 do_corr(schedule());
1001                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1002                                      lt);
1003                 if (*dl == NULL) {
1004                         iam_path_fini(path);
1005                         result = -ENOMEM;
1006                         break;
1007                 }
1008                 do_corr(schedule());
1009                 /*
1010                  * while locking leaf we just found may get split so we need
1011                  * to check this -bzzz
1012                  */
1013                 if (iam_check_full_path(path, 1) == 0)
1014                         break;
1015                 iam_unlock_htree(path->ip_container, *dl);
1016                 *dl = NULL;
1017                 iam_path_fini(path);
1018         }
1019         return result;
1020 }
1021 /*
1022  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1023  * node.
1024  */
1025 static int iam_path_lookup(struct iam_path *path, int index)
1026 {
1027         struct iam_container *c;
1028         struct iam_leaf  *leaf;
1029         int result;
1030
1031         c = path->ip_container;
1032         leaf = &path->ip_leaf;
1033         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1034         assert_inv(iam_path_check(path));
1035         do_corr(schedule());
1036         if (result == 0) {
1037                 result = iam_leaf_load(path);
1038                 if (result == 0) {
1039                         do_corr(schedule());
1040                         if (index)
1041                                 result = iam_leaf_ops(leaf)->
1042                                         ilookup(leaf, path->ip_ikey_target);
1043                         else
1044                                 result = iam_leaf_ops(leaf)->
1045                                         lookup(leaf, path->ip_key_target);
1046                         do_corr(schedule());
1047                 }
1048                 if (result < 0)
1049                         iam_leaf_unlock(leaf);
1050         }
1051         return result;
1052 }
1053
1054 /*
1055  * Common part of iam_it_{i,}get().
1056  */
1057 static int __iam_it_get(struct iam_iterator *it, int index)
1058 {
1059         int result;
1060         assert_corr(it_state(it) == IAM_IT_DETACHED);
1061
1062         result = iam_path_lookup(&it->ii_path, index);
1063         if (result >= 0) {
1064                 int collision;
1065
1066                 collision = result & IAM_LOOKUP_LAST;
1067                 switch (result & ~IAM_LOOKUP_LAST) {
1068                 case IAM_LOOKUP_EXACT:
1069                         result = +1;
1070                         it->ii_state = IAM_IT_ATTACHED;
1071                         break;
1072                 case IAM_LOOKUP_OK:
1073                         result = 0;
1074                         it->ii_state = IAM_IT_ATTACHED;
1075                         break;
1076                 case IAM_LOOKUP_BEFORE:
1077                 case IAM_LOOKUP_EMPTY:
1078                         result = 0;
1079                         it->ii_state = IAM_IT_SKEWED;
1080                         break;
1081                 default:
1082                         assert(0);
1083                 }
1084                 result |= collision;
1085         }
1086         /*
1087          * See iam_it_get_exact() for explanation.
1088          */
1089         assert_corr(result != -ENOENT);
1090         return result;
1091 }
1092
1093 /*
1094  * Correct hash, but not the same key was found, iterate through hash
1095  * collision chain, looking for correct record.
1096  */
1097 static int iam_it_collision(struct iam_iterator *it)
1098 {
1099         int result;
1100
1101         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1102
1103         while ((result = iam_it_next(it)) == 0) {
1104                 do_corr(schedule());
1105                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1106                         return -ENOENT;
1107                 if (it_keyeq(it, it->ii_path.ip_key_target))
1108                         return 0;
1109         }
1110         return result;
1111 }
1112
1113 /*
1114  * Attach iterator. After successful completion, @it points to record with
1115  * least key not larger than @k.
1116  *
1117  * Return value: 0: positioned on existing record,
1118  *             +ve: exact position found,
1119  *             -ve: error.
1120  *
1121  * precondition:  it_state(it) == IAM_IT_DETACHED
1122  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1123  *                     it_keycmp(it, k) <= 0)
1124  */
1125 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1126 {
1127         int result;
1128         assert_corr(it_state(it) == IAM_IT_DETACHED);
1129
1130         it->ii_path.ip_ikey_target = NULL;
1131         it->ii_path.ip_key_target  = k;
1132
1133         result = __iam_it_get(it, 0);
1134
1135         if (result == IAM_LOOKUP_LAST) {
1136                 result = iam_it_collision(it);
1137                 if (result != 0) {
1138                         iam_it_put(it);
1139                         iam_it_fini(it);
1140                         result = __iam_it_get(it, 0);
1141                 } else
1142                         result = +1;
1143         }
1144         if (result > 0)
1145                 result &= ~IAM_LOOKUP_LAST;
1146
1147         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1148         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1149                          it_keycmp(it, k) <= 0));
1150         return result;
1151 }
1152
1153 /*
1154  * Attach iterator by index key.
1155  */
1156 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1157 {
1158         assert_corr(it_state(it) == IAM_IT_DETACHED);
1159
1160         it->ii_path.ip_ikey_target = k;
1161         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1162 }
1163
1164 /*
1165  * Attach iterator, and assure it points to the record (not skewed).
1166  *
1167  * Return value: 0: positioned on existing record,
1168  *             +ve: exact position found,
1169  *             -ve: error.
1170  *
1171  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1172  *                !(it->ii_flags&IAM_IT_WRITE)
1173  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1174  */
1175 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1176 {
1177         int result;
1178         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1179                     !(it->ii_flags&IAM_IT_WRITE));
1180         result = iam_it_get(it, k);
1181         if (result == 0) {
1182                 if (it_state(it) != IAM_IT_ATTACHED) {
1183                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1184                         result = iam_it_next(it);
1185                 }
1186         }
1187         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1188         return result;
1189 }
1190
1191 /*
1192  * Duplicates iterator.
1193  *
1194  * postcondition: it_state(dst) == it_state(src) &&
1195  *                iam_it_container(dst) == iam_it_container(src) &&
1196  *                dst->ii_flags = src->ii_flags &&
1197  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1198  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1199  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1200  */
1201 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1202 {
1203         dst->ii_flags     = src->ii_flags;
1204         dst->ii_state     = src->ii_state;
1205         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1206         /*
1207          * XXX: duplicate lock.
1208          */
1209         assert_corr(it_state(dst) == it_state(src));
1210         assert_corr(iam_it_container(dst) == iam_it_container(src));
1211         assert_corr(dst->ii_flags = src->ii_flags);
1212         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1213                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1214                     iam_it_key_get(dst) == iam_it_key_get(src)));
1215
1216 }
1217
1218 /*
1219  * Detach iterator. Does nothing it detached state.
1220  *
1221  * postcondition: it_state(it) == IAM_IT_DETACHED
1222  */
1223 void iam_it_put(struct iam_iterator *it)
1224 {
1225         if (it->ii_state != IAM_IT_DETACHED) {
1226                 it->ii_state = IAM_IT_DETACHED;
1227                 iam_leaf_fini(&it->ii_path.ip_leaf);
1228         }
1229 }
1230
1231 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1232                                         struct iam_ikey *ikey);
1233
1234
1235 /*
1236  * This function increments the frame pointer to search the next leaf
1237  * block, and reads in the necessary intervening nodes if the search
1238  * should be necessary.  Whether or not the search is necessary is
1239  * controlled by the hash parameter.  If the hash value is even, then
1240  * the search is only continued if the next block starts with that
1241  * hash value.  This is used if we are searching for a specific file.
1242  *
1243  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1244  *
1245  * This function returns 1 if the caller should continue to search,
1246  * or 0 if it should not.  If there is an error reading one of the
1247  * index blocks, it will a negative error code.
1248  *
1249  * If start_hash is non-null, it will be filled in with the starting
1250  * hash of the next page.
1251  */
1252 static int iam_htree_advance(struct inode *dir, __u32 hash,
1253                               struct iam_path *path, __u32 *start_hash,
1254                               int compat)
1255 {
1256         struct iam_frame *p;
1257         struct buffer_head *bh;
1258         int err, num_frames = 0;
1259         __u32 bhash;
1260
1261         p = path->ip_frame;
1262         /*
1263          * Find the next leaf page by incrementing the frame pointer.
1264          * If we run out of entries in the interior node, loop around and
1265          * increment pointer in the parent node.  When we break out of
1266          * this loop, num_frames indicates the number of interior
1267          * nodes need to be read.
1268          */
1269         while (1) {
1270                 do_corr(schedule());
1271                 iam_lock_bh(p->bh);
1272                 if (p->at_shifted)
1273                         p->at_shifted = 0;
1274                 else
1275                         p->at = iam_entry_shift(path, p->at, +1);
1276                 if (p->at < iam_entry_shift(path, p->entries,
1277                                             dx_get_count(p->entries))) {
1278                         p->leaf = dx_get_block(path, p->at);
1279                         iam_unlock_bh(p->bh);
1280                         break;
1281                 }
1282                 iam_unlock_bh(p->bh);
1283                 if (p == path->ip_frames)
1284                         return 0;
1285                 num_frames++;
1286                 --p;
1287         }
1288
1289         if (compat) {
1290                 /*
1291                  * Htree hash magic.
1292                  */
1293         /*
1294          * If the hash is 1, then continue only if the next page has a
1295          * continuation hash of any value.  This is used for readdir
1296          * handling.  Otherwise, check to see if the hash matches the
1297          * desired contiuation hash.  If it doesn't, return since
1298          * there's no point to read in the successive index pages.
1299          */
1300                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1301         if (start_hash)
1302                 *start_hash = bhash;
1303         if ((hash & 1) == 0) {
1304                 if ((bhash & ~1) != hash)
1305                         return 0;
1306         }
1307         }
1308         /*
1309          * If the hash is HASH_NB_ALWAYS, we always go to the next
1310          * block so no check is necessary
1311          */
1312         while (num_frames--) {
1313                 iam_ptr_t idx;
1314
1315                 do_corr(schedule());
1316                 iam_lock_bh(p->bh);
1317                 idx = p->leaf = dx_get_block(path, p->at);
1318                 iam_unlock_bh(p->bh);
1319                 err = iam_path_descr(path)->id_ops->
1320                         id_node_read(path->ip_container, idx, NULL, &bh);
1321                 if (err != 0)
1322                         return err; /* Failure */
1323                 ++p;
1324                 brelse(p->bh);
1325                 assert_corr(p->bh != bh);
1326                 p->bh = bh;
1327                 p->entries = dx_node_get_entries(path, p);
1328                 p->at = iam_entry_shift(path, p->entries, !compat);
1329                 assert_corr(p->curidx != idx);
1330                 p->curidx = idx;
1331                 iam_lock_bh(p->bh);
1332                 assert_corr(p->leaf != dx_get_block(path, p->at));
1333                 p->leaf = dx_get_block(path, p->at);
1334                 iam_unlock_bh(p->bh);
1335                 assert_inv(dx_node_check(path, p));
1336         }
1337         return 1;
1338 }
1339
1340
1341 static inline int iam_index_advance(struct iam_path *path)
1342 {
1343         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1344 }
1345
1346 static void iam_unlock_array(struct iam_container *ic,
1347                              struct dynlock_handle **lh)
1348 {
1349         int i;
1350
1351         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1352                 if (*lh != NULL) {
1353                         iam_unlock_htree(ic, *lh);
1354                         *lh = NULL;
1355                 }
1356         }
1357 }
1358 /*
1359  * Advance index part of @path to point to the next leaf. Returns 1 on
1360  * success, 0, when end of container was reached. Leaf node is locked.
1361  */
1362 int iam_index_next(struct iam_container *c, struct iam_path *path)
1363 {
1364         iam_ptr_t cursor;
1365         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1366         int result;
1367         struct inode *object;
1368
1369         /*
1370          * Locking for iam_index_next()... is to be described.
1371          */
1372
1373         object = c->ic_object;
1374         cursor = path->ip_frame->leaf;
1375
1376         while (1) {
1377                 result = iam_index_lock(path, lh);
1378                 do_corr(schedule());
1379                 if (result < 0)
1380                         break;
1381
1382                 result = iam_check_full_path(path, 0);
1383                 if (result == 0 && cursor == path->ip_frame->leaf) {
1384                         result = iam_index_advance(path);
1385
1386                         assert_corr(result == 0 ||
1387                                     cursor != path->ip_frame->leaf);
1388                         break;
1389                 }
1390                 do {
1391                         iam_unlock_array(c, lh);
1392
1393                         iam_path_release(path);
1394                         do_corr(schedule());
1395
1396                         result = __iam_path_lookup(path);
1397                         if (result < 0)
1398                                 break;
1399
1400                         while (path->ip_frame->leaf != cursor) {
1401                                 do_corr(schedule());
1402
1403                                 result = iam_index_lock(path, lh);
1404                                 do_corr(schedule());
1405                                 if (result < 0)
1406                                         break;
1407
1408                                 result = iam_check_full_path(path, 0);
1409                                 if (result != 0)
1410                                         break;
1411
1412                                 result = iam_index_advance(path);
1413                                 if (result == 0) {
1414                                         CERROR("cannot find cursor : %u\n",
1415                                                 cursor);
1416                                         result = -EIO;
1417                                 }
1418                                 if (result < 0)
1419                                         break;
1420                                 result = iam_check_full_path(path, 0);
1421                                 if (result != 0)
1422                                         break;
1423                                 iam_unlock_array(c, lh);
1424                         }
1425                 } while (result == -EAGAIN);
1426                 if (result < 0)
1427                         break;
1428         }
1429         iam_unlock_array(c, lh);
1430         return result;
1431 }
1432
1433 /*
1434  * Move iterator one record right.
1435  *
1436  * Return value: 0: success,
1437  *              +1: end of container reached
1438  *             -ve: error
1439  *
1440  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1441  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1442  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1443  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1444  */
1445 int iam_it_next(struct iam_iterator *it)
1446 {
1447         int result;
1448         struct iam_path      *path;
1449         struct iam_leaf      *leaf;
1450         do_corr(struct iam_ikey *ik_orig);
1451
1452         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1453         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1454                     it_state(it) == IAM_IT_SKEWED);
1455
1456         path = &it->ii_path;
1457         leaf = &path->ip_leaf;
1458
1459         assert_corr(iam_leaf_is_locked(leaf));
1460
1461         result = 0;
1462         do_corr(ik_orig = it_at_rec(it) ?
1463                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1464         if (it_before(it)) {
1465                 assert_corr(!iam_leaf_at_end(leaf));
1466                 it->ii_state = IAM_IT_ATTACHED;
1467         } else {
1468                 if (!iam_leaf_at_end(leaf))
1469                         /* advance within leaf node */
1470                         iam_leaf_next(leaf);
1471                 /*
1472                  * multiple iterations may be necessary due to empty leaves.
1473                  */
1474                 while (result == 0 && iam_leaf_at_end(leaf)) {
1475                         do_corr(schedule());
1476                         /* advance index portion of the path */
1477                         result = iam_index_next(iam_it_container(it), path);
1478                         assert_corr(iam_leaf_is_locked(leaf));
1479                         if (result == 1) {
1480                                 struct dynlock_handle *lh;
1481                                 lh = iam_lock_htree(iam_it_container(it),
1482                                                     path->ip_frame->leaf,
1483                                                     DLT_WRITE);
1484                                 if (lh != NULL) {
1485                                         iam_leaf_fini(leaf);
1486                                         leaf->il_lock = lh;
1487                                         result = iam_leaf_load(path);
1488                                         if (result == 0)
1489                                                 iam_leaf_start(leaf);
1490                                 } else
1491                                         result = -ENOMEM;
1492                         } else if (result == 0)
1493                                 /* end of container reached */
1494                                 result = +1;
1495                         if (result != 0)
1496                                 iam_it_put(it);
1497                 }
1498                 if (result == 0)
1499                         it->ii_state = IAM_IT_ATTACHED;
1500         }
1501         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1502         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1503         assert_corr(ergo(result == 0 && ik_orig != NULL,
1504                          it_ikeycmp(it, ik_orig) >= 0));
1505         return result;
1506 }
1507
1508 /*
1509  * Return pointer to the record under iterator.
1510  *
1511  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1512  * postcondition: it_state(it) == IAM_IT_ATTACHED
1513  */
1514 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1515 {
1516         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1517         assert_corr(it_at_rec(it));
1518         return iam_leaf_rec(&it->ii_path.ip_leaf);
1519 }
1520
1521 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1522 {
1523         struct iam_leaf *folio;
1524
1525         folio = &it->ii_path.ip_leaf;
1526         iam_leaf_ops(folio)->rec_set(folio, r);
1527 }
1528
1529 /*
1530  * Replace contents of record under iterator.
1531  *
1532  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1533  *                it->ii_flags&IAM_IT_WRITE
1534  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1535  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1536  */
1537 int iam_it_rec_set(handle_t *h,
1538                    struct iam_iterator *it, const struct iam_rec *r)
1539 {
1540         int result;
1541         struct iam_path *path;
1542         struct buffer_head *bh;
1543
1544         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1545                     it->ii_flags&IAM_IT_WRITE);
1546         assert_corr(it_at_rec(it));
1547
1548         path = &it->ii_path;
1549         bh   = path->ip_leaf.il_bh;
1550         result = iam_txn_add(h, path, bh);
1551         if (result == 0) {
1552                 iam_it_reccpy(it, r);
1553                 result = iam_txn_dirty(h, path, bh);
1554         }
1555         return result;
1556 }
1557
1558 /*
1559  * Return pointer to the index key under iterator.
1560  *
1561  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1562  *                it_state(it) == IAM_IT_SKEWED
1563  */
1564 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1565                                         struct iam_ikey *ikey)
1566 {
1567         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1568                     it_state(it) == IAM_IT_SKEWED);
1569         assert_corr(it_at_rec(it));
1570         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1571 }
1572
1573 /*
1574  * Return pointer to the key under iterator.
1575  *
1576  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1577  *                it_state(it) == IAM_IT_SKEWED
1578  */
1579 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1580 {
1581         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1582                     it_state(it) == IAM_IT_SKEWED);
1583         assert_corr(it_at_rec(it));
1584         return iam_leaf_key(&it->ii_path.ip_leaf);
1585 }
1586
1587 /*
1588  * Return size of key under iterator (in bytes)
1589  *
1590  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1591  *                it_state(it) == IAM_IT_SKEWED
1592  */
1593 int iam_it_key_size(const struct iam_iterator *it)
1594 {
1595         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1596                     it_state(it) == IAM_IT_SKEWED);
1597         assert_corr(it_at_rec(it));
1598         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1599 }
1600
1601 static struct buffer_head *
1602 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1603 {
1604         struct inode *inode = c->ic_object;
1605         struct buffer_head *bh = NULL;
1606         struct iam_idle_head *head;
1607         struct buffer_head *idle;
1608         __u32 *idle_blocks;
1609         __u16 count;
1610
1611         if (c->ic_idle_bh == NULL)
1612                 goto newblock;
1613
1614         mutex_lock(&c->ic_idle_mutex);
1615         if (unlikely(c->ic_idle_bh == NULL)) {
1616                 mutex_unlock(&c->ic_idle_mutex);
1617                 goto newblock;
1618         }
1619
1620         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1621         count = le16_to_cpu(head->iih_count);
1622         if (count > 0) {
1623                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1624                 if (*e != 0)
1625                         goto fail;
1626
1627                 --count;
1628                 *b = le32_to_cpu(head->iih_blks[count]);
1629                 head->iih_count = cpu_to_le16(count);
1630                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1631                 if (*e != 0)
1632                         goto fail;
1633
1634                 mutex_unlock(&c->ic_idle_mutex);
1635                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1636                 if (bh == NULL) {
1637                         *e = *e ? *e : -EIO;
1638                         return NULL;
1639                 }
1640                 goto got;
1641         }
1642
1643         /* The block itself which contains the iam_idle_head is
1644          * also an idle block, and can be used as the new node. */
1645         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1646                                 c->ic_descr->id_root_gap +
1647                                 sizeof(struct dx_countlimit));
1648         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1649         if (*e != 0)
1650                 goto fail;
1651
1652         *b = le32_to_cpu(*idle_blocks);
1653         iam_lock_bh(c->ic_root_bh);
1654         *idle_blocks = head->iih_next;
1655         iam_unlock_bh(c->ic_root_bh);
1656         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1657         if (*e != 0) {
1658                 iam_lock_bh(c->ic_root_bh);
1659                 *idle_blocks = cpu_to_le32(*b);
1660                 iam_unlock_bh(c->ic_root_bh);
1661                 goto fail;
1662         }
1663
1664         bh = c->ic_idle_bh;
1665         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1666         if (idle != NULL && IS_ERR(idle)) {
1667                 *e = PTR_ERR(idle);
1668                 c->ic_idle_bh = NULL;
1669                 brelse(bh);
1670                 goto fail;
1671         }
1672
1673         c->ic_idle_bh = idle;
1674         mutex_unlock(&c->ic_idle_mutex);
1675
1676 got:
1677         /* get write access for the found buffer head */
1678         *e = ldiskfs_journal_get_write_access(h, bh);
1679         if (*e != 0) {
1680                 brelse(bh);
1681                 bh = NULL;
1682                 ldiskfs_std_error(inode->i_sb, *e);
1683         } else {
1684                 /* Clear the reused node as new node does. */
1685                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1686                 set_buffer_uptodate(bh);
1687         }
1688         return bh;
1689
1690 newblock:
1691         bh = osd_ldiskfs_append(h, inode, b);
1692         if (IS_ERR(bh)) {
1693                 *e = PTR_ERR(bh);
1694                 bh = NULL;
1695         }
1696
1697         return bh;
1698
1699 fail:
1700         mutex_unlock(&c->ic_idle_mutex);
1701         ldiskfs_std_error(inode->i_sb, *e);
1702         return NULL;
1703 }
1704
1705 /*
1706  * Insertion of new record. Interaction with jbd during non-trivial case (when
1707  * split happens) is as following:
1708  *
1709  *  - new leaf node is involved into transaction by iam_new_node();
1710  *
1711  *  - old leaf node is involved into transaction by iam_add_rec();
1712  *
1713  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1714  *
1715  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1716  *  iam_new_leaf();
1717  *
1718  *  - split index nodes are involved into transaction and marked dirty by
1719  *  split_index_node().
1720  *
1721  *  - "safe" index node, which is no split, but where new pointer is inserted
1722  *  is involved into transaction and marked dirty by split_index_node().
1723  *
1724  *  - index node where pointer to new leaf is inserted is involved into
1725  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1726  *
1727  *  - inode is marked dirty by iam_add_rec().
1728  *
1729  */
1730
1731 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1732 {
1733         int err;
1734         iam_ptr_t blknr;
1735         struct buffer_head   *new_leaf;
1736         struct buffer_head   *old_leaf;
1737         struct iam_container *c;
1738         struct inode         *obj;
1739         struct iam_path      *path;
1740
1741         c = iam_leaf_container(leaf);
1742         path = leaf->il_path;
1743
1744         obj = c->ic_object;
1745         new_leaf = iam_new_node(handle, c, &blknr, &err);
1746         do_corr(schedule());
1747         if (new_leaf != NULL) {
1748                 struct dynlock_handle *lh;
1749
1750                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1751                 do_corr(schedule());
1752                 if (lh != NULL) {
1753                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1754                         do_corr(schedule());
1755                         old_leaf = leaf->il_bh;
1756                         iam_leaf_split(leaf, &new_leaf, blknr);
1757                         if (old_leaf != leaf->il_bh) {
1758                                 /*
1759                                  * Switched to the new leaf.
1760                                  */
1761                                 iam_leaf_unlock(leaf);
1762                                 leaf->il_lock = lh;
1763                                 path->ip_frame->leaf = blknr;
1764                         } else
1765                                 iam_unlock_htree(path->ip_container, lh);
1766                         do_corr(schedule());
1767                         err = iam_txn_dirty(handle, path, new_leaf);
1768                         if (err == 0)
1769                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1770                         do_corr(schedule());
1771                 } else
1772                         err = -ENOMEM;
1773                 brelse(new_leaf);
1774         }
1775         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1776         return err;
1777 }
1778
1779 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1780 {
1781         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1782 }
1783
1784 static int iam_shift_entries(struct iam_path *path,
1785                          struct iam_frame *frame, unsigned count,
1786                          struct iam_entry *entries, struct iam_entry *entries2,
1787                          u32 newblock)
1788 {
1789         unsigned count1;
1790         unsigned count2;
1791         int delta;
1792
1793         struct iam_frame *parent = frame - 1;
1794         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1795
1796         delta = dx_index_is_compat(path) ? 0 : +1;
1797
1798         count1 = count/2 + delta;
1799         count2 = count - count1;
1800         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1801
1802         dxtrace(printk("Split index %d/%d\n", count1, count2));
1803
1804         memcpy((char *) iam_entry_shift(path, entries2, delta),
1805                (char *) iam_entry_shift(path, entries, count1),
1806                count2 * iam_entry_size(path));
1807
1808         dx_set_count(entries2, count2 + delta);
1809         dx_set_limit(entries2, dx_node_limit(path));
1810
1811         /*
1812          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1813          * level index in root index, then we insert new index here and set
1814          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1815          * index w/o hash it looks for. the solution is to check root index
1816          * after we locked just founded 2nd level index -bzzz
1817          */
1818         iam_insert_key_lock(path, parent, pivot, newblock);
1819
1820         /*
1821          * now old and new 2nd level index blocks contain all pointers, so
1822          * dx_probe() may find it in the both.  it's OK -bzzz
1823          */
1824         iam_lock_bh(frame->bh);
1825         dx_set_count(entries, count1);
1826         iam_unlock_bh(frame->bh);
1827
1828         /*
1829          * now old 2nd level index block points to first half of leafs. it's
1830          * importand that dx_probe() must check root index block for changes
1831          * under dx_lock_bh(frame->bh) -bzzz
1832          */
1833
1834         return count1;
1835 }
1836
1837
1838 int split_index_node(handle_t *handle, struct iam_path *path,
1839                      struct dynlock_handle **lh)
1840 {
1841
1842         struct iam_entry *entries;   /* old block contents */
1843         struct iam_entry *entries2;  /* new block contents */
1844         struct iam_frame *frame, *safe;
1845         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1846         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1847         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1848         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1849         struct inode *dir = iam_path_obj(path);
1850         struct iam_descr *descr;
1851         int nr_splet;
1852         int i, err;
1853
1854         descr = iam_path_descr(path);
1855         /*
1856          * Algorithm below depends on this.
1857          */
1858         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1859
1860         frame = path->ip_frame;
1861         entries = frame->entries;
1862
1863         /*
1864          * Tall-tree handling: we might have to split multiple index blocks
1865          * all the way up to tree root. Tricky point here is error handling:
1866          * to avoid complicated undo/rollback we
1867          *
1868          *   - first allocate all necessary blocks
1869          *
1870          *   - insert pointers into them atomically.
1871          */
1872
1873         /*
1874          * Locking: leaf is already locked. htree-locks are acquired on all
1875          * index nodes that require split bottom-to-top, on the "safe" node,
1876          * and on all new nodes
1877          */
1878
1879         dxtrace(printk("using %u of %u node entries\n",
1880                        dx_get_count(entries), dx_get_limit(entries)));
1881
1882         /* What levels need split? */
1883         for (nr_splet = 0; frame >= path->ip_frames &&
1884              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1885              --frame, ++nr_splet) {
1886                 do_corr(schedule());
1887                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1888                         /*
1889                         CWARN(dir->i_sb, __FUNCTION__,
1890                                      "Directory index full!\n");
1891                                      */
1892                         err = -ENOSPC;
1893                         goto cleanup;
1894                 }
1895         }
1896
1897         safe = frame;
1898
1899         /*
1900          * Lock all nodes, bottom to top.
1901          */
1902         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1903                 do_corr(schedule());
1904                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1905                                          DLT_WRITE);
1906                 if (lock[i] == NULL) {
1907                         err = -ENOMEM;
1908                         goto cleanup;
1909                 }
1910         }
1911
1912         /*
1913          * Check for concurrent index modification.
1914          */
1915         err = iam_check_full_path(path, 1);
1916         if (err)
1917                 goto cleanup;
1918         /*
1919          * And check that the same number of nodes is to be split.
1920          */
1921         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1922              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1923              --frame, ++i) {
1924                 ;
1925         }
1926         if (i != nr_splet) {
1927                 err = -EAGAIN;
1928                 goto cleanup;
1929         }
1930
1931         /* Go back down, allocating blocks, locking them, and adding into
1932          * transaction... */
1933         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1934                 bh_new[i] = iam_new_node(handle, path->ip_container,
1935                                          &newblock[i], &err);
1936                 do_corr(schedule());
1937                 if (!bh_new[i] ||
1938                     descr->id_ops->id_node_init(path->ip_container,
1939                                                 bh_new[i], 0) != 0)
1940                         goto cleanup;
1941                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1942                                              DLT_WRITE);
1943                 if (new_lock[i] == NULL) {
1944                         err = -ENOMEM;
1945                         goto cleanup;
1946                 }
1947                 do_corr(schedule());
1948                 BUFFER_TRACE(frame->bh, "get_write_access");
1949                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1950                 if (err)
1951                         goto journal_error;
1952         }
1953         /* Add "safe" node to transaction too */
1954         if (safe + 1 != path->ip_frames) {
1955                 do_corr(schedule());
1956                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1957                 if (err)
1958                         goto journal_error;
1959         }
1960
1961         /* Go through nodes once more, inserting pointers */
1962         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1963                 unsigned count;
1964                 int idx;
1965                 struct buffer_head *bh2;
1966                 struct buffer_head *bh;
1967
1968                 entries = frame->entries;
1969                 count = dx_get_count(entries);
1970                 idx = iam_entry_diff(path, frame->at, entries);
1971
1972                 bh2 = bh_new[i];
1973                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1974
1975                 bh = frame->bh;
1976                 if (frame == path->ip_frames) {
1977                         /* splitting root node. Tricky point:
1978                          *
1979                          * In the "normal" B-tree we'd split root *and* add
1980                          * new root to the tree with pointers to the old root
1981                          * and its sibling (thus introducing two new nodes).
1982                          *
1983                          * In htree it's enough to add one node, because
1984                          * capacity of the root node is smaller than that of
1985                          * non-root one.
1986                          */
1987                         struct iam_frame *frames;
1988                         struct iam_entry *next;
1989
1990                         assert_corr(i == 0);
1991
1992                         do_corr(schedule());
1993
1994                         frames = path->ip_frames;
1995                         memcpy((char *) entries2, (char *) entries,
1996                                count * iam_entry_size(path));
1997                         dx_set_limit(entries2, dx_node_limit(path));
1998
1999                         /* Set up root */
2000                           iam_lock_bh(frame->bh);
2001                         next = descr->id_ops->id_root_inc(path->ip_container,
2002                                                           path, frame);
2003                         dx_set_block(path, next, newblock[0]);
2004                           iam_unlock_bh(frame->bh);
2005
2006                         do_corr(schedule());
2007                         /* Shift frames in the path */
2008                         memmove(frames + 2, frames + 1,
2009                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2010                         /* Add new access path frame */
2011                         frames[1].at = iam_entry_shift(path, entries2, idx);
2012                         frames[1].entries = entries = entries2;
2013                         frames[1].bh = bh2;
2014                         assert_inv(dx_node_check(path, frame));
2015                         ++ path->ip_frame;
2016                         ++ frame;
2017                         assert_inv(dx_node_check(path, frame));
2018                         bh_new[0] = NULL; /* buffer head is "consumed" */
2019                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2020                         if (err)
2021                                 goto journal_error;
2022                         do_corr(schedule());
2023                 } else {
2024                         /* splitting non-root index node. */
2025                         struct iam_frame *parent = frame - 1;
2026
2027                         do_corr(schedule());
2028                         count = iam_shift_entries(path, frame, count,
2029                                               entries, entries2, newblock[i]);
2030                         /* Which index block gets the new entry? */
2031                         if (idx >= count) {
2032                                 int d = dx_index_is_compat(path) ? 0 : +1;
2033
2034                                 frame->at = iam_entry_shift(path, entries2,
2035                                                             idx - count + d);
2036                                 frame->entries = entries = entries2;
2037                                 frame->curidx = newblock[i];
2038                                 swap(frame->bh, bh2);
2039                                 assert_corr(lock[i + 1] != NULL);
2040                                 assert_corr(new_lock[i] != NULL);
2041                                 swap(lock[i + 1], new_lock[i]);
2042                                 bh_new[i] = bh2;
2043                                 parent->at = iam_entry_shift(path,
2044                                                              parent->at, +1);
2045                         }
2046                         assert_inv(dx_node_check(path, frame));
2047                         assert_inv(dx_node_check(path, parent));
2048                         dxtrace(dx_show_index ("node", frame->entries));
2049                         dxtrace(dx_show_index ("node",
2050                                ((struct dx_node *) bh2->b_data)->entries));
2051                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2052                         if (err)
2053                                 goto journal_error;
2054                         do_corr(schedule());
2055                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2056                                                             parent->bh);
2057                         if (err)
2058                                 goto journal_error;
2059                 }
2060                 do_corr(schedule());
2061                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2062                 if (err)
2063                         goto journal_error;
2064         }
2065                 /*
2066                  * This function was called to make insertion of new leaf
2067                  * possible. Check that it fulfilled its obligations.
2068                  */
2069                 assert_corr(dx_get_count(path->ip_frame->entries) <
2070                             dx_get_limit(path->ip_frame->entries));
2071         assert_corr(lock[nr_splet] != NULL);
2072         *lh = lock[nr_splet];
2073         lock[nr_splet] = NULL;
2074         if (nr_splet > 0) {
2075                 /*
2076                  * Log ->i_size modification.
2077                  */
2078                 err = ldiskfs_mark_inode_dirty(handle, dir);
2079                 if (err)
2080                         goto journal_error;
2081         }
2082         goto cleanup;
2083 journal_error:
2084         ldiskfs_std_error(dir->i_sb, err);
2085
2086 cleanup:
2087         iam_unlock_array(path->ip_container, lock);
2088         iam_unlock_array(path->ip_container, new_lock);
2089
2090         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2091
2092         do_corr(schedule());
2093         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2094                 if (bh_new[i] != NULL)
2095                         brelse(bh_new[i]);
2096         }
2097         return err;
2098 }
2099
2100 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2101                        struct iam_path *path,
2102                        const struct iam_key *k, const struct iam_rec *r)
2103 {
2104         int err;
2105         struct iam_leaf *leaf;
2106
2107         leaf = &path->ip_leaf;
2108         assert_inv(iam_path_check(path));
2109         err = iam_txn_add(handle, path, leaf->il_bh);
2110         if (err == 0) {
2111                 do_corr(schedule());
2112                 if (!iam_leaf_can_add(leaf, k, r)) {
2113                         struct dynlock_handle *lh = NULL;
2114
2115                         do {
2116                                 assert_corr(lh == NULL);
2117                                 do_corr(schedule());
2118                                 err = split_index_node(handle, path, &lh);
2119                                 if (err == -EAGAIN) {
2120                                         assert_corr(lh == NULL);
2121
2122                                         iam_path_fini(path);
2123                                         it->ii_state = IAM_IT_DETACHED;
2124
2125                                         do_corr(schedule());
2126                                         err = iam_it_get_exact(it, k);
2127                                         if (err == -ENOENT)
2128                                                 err = +1; /* repeat split */
2129                                         else if (err == 0)
2130                                                 err = -EEXIST;
2131                                 }
2132                         } while (err > 0);
2133                         assert_inv(iam_path_check(path));
2134                         if (err == 0) {
2135                                 assert_corr(lh != NULL);
2136                                 do_corr(schedule());
2137                                 err = iam_new_leaf(handle, leaf);
2138                                 if (err == 0)
2139                                         err = iam_txn_dirty(handle, path,
2140                                                             path->ip_frame->bh);
2141                         }
2142                         iam_unlock_htree(path->ip_container, lh);
2143                         do_corr(schedule());
2144                 }
2145                 if (err == 0) {
2146                         iam_leaf_rec_add(leaf, k, r);
2147                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2148                 }
2149         }
2150         assert_inv(iam_path_check(path));
2151         return err;
2152 }
2153
2154 /*
2155  * Insert new record with key @k and contents from @r, shifting records to the
2156  * right. On success, iterator is positioned on the newly inserted record.
2157  *
2158  * precondition: it->ii_flags&IAM_IT_WRITE &&
2159  *               (it_state(it) == IAM_IT_ATTACHED ||
2160  *                it_state(it) == IAM_IT_SKEWED) &&
2161  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2162  *                    it_keycmp(it, k) <= 0) &&
2163  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2164  * postcondition: ergo(result == 0,
2165  *                     it_state(it) == IAM_IT_ATTACHED &&
2166  *                     it_keycmp(it, k) == 0 &&
2167  *                     !memcmp(iam_it_rec_get(it), r, ...))
2168  */
2169 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2170                       const struct iam_key *k, const struct iam_rec *r)
2171 {
2172         int result;
2173         struct iam_path *path;
2174
2175         path = &it->ii_path;
2176
2177         assert_corr(it->ii_flags&IAM_IT_WRITE);
2178         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2179                     it_state(it) == IAM_IT_SKEWED);
2180         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2181                          it_keycmp(it, k) <= 0));
2182         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2183         result = iam_add_rec(h, it, path, k, r);
2184         if (result == 0)
2185                 it->ii_state = IAM_IT_ATTACHED;
2186         assert_corr(ergo(result == 0,
2187                          it_state(it) == IAM_IT_ATTACHED &&
2188                          it_keycmp(it, k) == 0));
2189         return result;
2190 }
2191
2192 static inline int iam_idle_blocks_limit(struct inode *inode)
2193 {
2194         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2195 }
2196
2197 /*
2198  * If the leaf cannnot be recycled, we will lose one block for reusing.
2199  * It is not a serious issue because it almost the same of non-recycle.
2200  */
2201 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2202                                   struct iam_leaf *l, struct buffer_head **bh)
2203 {
2204         struct iam_container *c = p->ip_container;
2205         struct inode *inode = c->ic_object;
2206         struct iam_frame *frame = p->ip_frame;
2207         struct iam_entry *entries;
2208         struct iam_entry *pos;
2209         struct dynlock_handle *lh;
2210         int count;
2211         int rc;
2212
2213         if (c->ic_idle_failed)
2214                 return 0;
2215
2216         if (unlikely(frame == NULL))
2217                 return 0;
2218
2219         if (!iam_leaf_empty(l))
2220                 return 0;
2221
2222         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2223         if (lh == NULL) {
2224                 CWARN("%.16s: No memory to recycle idle blocks\n",
2225                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2226                 return 0;
2227         }
2228
2229         rc = iam_txn_add(h, p, frame->bh);
2230         if (rc != 0) {
2231                 iam_unlock_htree(c, lh);
2232                 return 0;
2233         }
2234
2235         iam_lock_bh(frame->bh);
2236         entries = frame->entries;
2237         count = dx_get_count(entries);
2238         /* NOT shrink the last entry in the index node, which can be reused
2239          * directly by next new node. */
2240         if (count == 2) {
2241                 iam_unlock_bh(frame->bh);
2242                 iam_unlock_htree(c, lh);
2243                 return 0;
2244         }
2245
2246         pos = iam_find_position(p, frame);
2247         /* There may be some new leaf nodes have been added or empty leaf nodes
2248          * have been shrinked during my delete operation.
2249          *
2250          * If the empty leaf is not under current index node because the index
2251          * node has been split, then just skip the empty leaf, which is rare. */
2252         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2253                 iam_unlock_bh(frame->bh);
2254                 iam_unlock_htree(c, lh);
2255                 return 0;
2256         }
2257
2258         frame->at = pos;
2259         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2260                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2261
2262                 memmove(frame->at, n,
2263                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2264                 frame->at_shifted = 1;
2265         }
2266         dx_set_count(entries, count - 1);
2267         iam_unlock_bh(frame->bh);
2268         rc = iam_txn_dirty(h, p, frame->bh);
2269         iam_unlock_htree(c, lh);
2270         if (rc != 0)
2271                 return 0;
2272
2273         get_bh(l->il_bh);
2274         *bh = l->il_bh;
2275         return frame->leaf;
2276 }
2277
2278 static int
2279 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2280                         __u32 *idle_blocks, iam_ptr_t blk)
2281 {
2282         struct iam_container *c = p->ip_container;
2283         struct buffer_head *old = c->ic_idle_bh;
2284         struct iam_idle_head *head;
2285         int rc;
2286
2287         head = (struct iam_idle_head *)(bh->b_data);
2288         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2289         head->iih_count = 0;
2290         head->iih_next = *idle_blocks;
2291         /* The bh already get_write_accessed. */
2292         rc = iam_txn_dirty(h, p, bh);
2293         if (rc != 0)
2294                 return rc;
2295
2296         rc = iam_txn_add(h, p, c->ic_root_bh);
2297         if (rc != 0)
2298                 return rc;
2299
2300         iam_lock_bh(c->ic_root_bh);
2301         *idle_blocks = cpu_to_le32(blk);
2302         iam_unlock_bh(c->ic_root_bh);
2303         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2304         if (rc == 0) {
2305                 /* NOT release old before new assigned. */
2306                 get_bh(bh);
2307                 c->ic_idle_bh = bh;
2308                 brelse(old);
2309         } else {
2310                 iam_lock_bh(c->ic_root_bh);
2311                 *idle_blocks = head->iih_next;
2312                 iam_unlock_bh(c->ic_root_bh);
2313         }
2314         return rc;
2315 }
2316
2317 /*
2318  * If the leaf cannnot be recycled, we will lose one block for reusing.
2319  * It is not a serious issue because it almost the same of non-recycle.
2320  */
2321 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2322                              struct buffer_head *bh, iam_ptr_t blk)
2323 {
2324         struct iam_container *c = p->ip_container;
2325         struct inode *inode = c->ic_object;
2326         struct iam_idle_head *head;
2327         __u32 *idle_blocks;
2328         int count;
2329         int rc;
2330
2331         mutex_lock(&c->ic_idle_mutex);
2332         if (unlikely(c->ic_idle_failed)) {
2333                 rc = -EFAULT;
2334                 goto unlock;
2335         }
2336
2337         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2338                                 c->ic_descr->id_root_gap +
2339                                 sizeof(struct dx_countlimit));
2340         /* It is the first idle block. */
2341         if (c->ic_idle_bh == NULL) {
2342                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2343                 goto unlock;
2344         }
2345
2346         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2347         count = le16_to_cpu(head->iih_count);
2348         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2349         if (count == iam_idle_blocks_limit(inode)) {
2350                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2351                 goto unlock;
2352         }
2353
2354         /* Just add to ic_idle_bh. */
2355         rc = iam_txn_add(h, p, c->ic_idle_bh);
2356         if (rc != 0)
2357                 goto unlock;
2358
2359         head->iih_blks[count] = cpu_to_le32(blk);
2360         head->iih_count = cpu_to_le16(count + 1);
2361         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2362
2363 unlock:
2364         mutex_unlock(&c->ic_idle_mutex);
2365         if (rc != 0)
2366                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2367                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2368 }
2369
2370 /*
2371  * Delete record under iterator.
2372  *
2373  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2374  *                it->ii_flags&IAM_IT_WRITE &&
2375  *                it_at_rec(it)
2376  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2377  *                it_state(it) == IAM_IT_DETACHED
2378  */
2379 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2380 {
2381         int result;
2382         struct iam_leaf *leaf;
2383         struct iam_path *path;
2384
2385         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2386                     it->ii_flags&IAM_IT_WRITE);
2387         assert_corr(it_at_rec(it));
2388
2389         path = &it->ii_path;
2390         leaf = &path->ip_leaf;
2391
2392         assert_inv(iam_path_check(path));
2393
2394         result = iam_txn_add(h, path, leaf->il_bh);
2395         /*
2396          * no compaction for now.
2397          */
2398         if (result == 0) {
2399                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2400                 result = iam_txn_dirty(h, path, leaf->il_bh);
2401                 if (result == 0 && iam_leaf_at_end(leaf)) {
2402                         struct buffer_head *bh = NULL;
2403                         iam_ptr_t blk;
2404
2405                         blk = iam_index_shrink(h, path, leaf, &bh);
2406                         if (it->ii_flags & IAM_IT_MOVE) {
2407                                 result = iam_it_next(it);
2408                                 if (result > 0)
2409                                         result = 0;
2410                         }
2411
2412                         if (bh != NULL) {
2413                                 iam_recycle_leaf(h, path, bh, blk);
2414                                 brelse(bh);
2415                         }
2416                 }
2417         }
2418         assert_inv(iam_path_check(path));
2419         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2420                     it_state(it) == IAM_IT_DETACHED);
2421         return result;
2422 }
2423
2424 /*
2425  * Convert iterator to cookie.
2426  *
2427  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2428  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2429  * postcondition: it_state(it) == IAM_IT_ATTACHED
2430  */
2431 iam_pos_t iam_it_store(const struct iam_iterator *it)
2432 {
2433         iam_pos_t result;
2434
2435         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2436         assert_corr(it_at_rec(it));
2437         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2438                     sizeof result);
2439
2440         result = 0;
2441         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2442 }
2443
2444 /*
2445  * Restore iterator from cookie.
2446  *
2447  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2448  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2449  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2450  *                                  iam_it_store(it) == pos)
2451  */
2452 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2453 {
2454         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2455                     it->ii_flags&IAM_IT_MOVE);
2456         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2457         return iam_it_iget(it, (struct iam_ikey *)&pos);
2458 }
2459
2460 /***********************************************************************/
2461 /* invariants                                                          */
2462 /***********************************************************************/
2463
2464 static inline int ptr_inside(void *base, size_t size, void *ptr)
2465 {
2466         return (base <= ptr) && (ptr < base + size);
2467 }
2468
2469 static int iam_frame_invariant(struct iam_frame *f)
2470 {
2471         return
2472                 (f->bh != NULL &&
2473                 f->bh->b_data != NULL &&
2474                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2475                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2476                 f->entries <= f->at);
2477 }
2478
2479 static int iam_leaf_invariant(struct iam_leaf *l)
2480 {
2481         return
2482                 l->il_bh != NULL &&
2483                 l->il_bh->b_data != NULL &&
2484                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2485                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2486                 l->il_entries <= l->il_at;
2487 }
2488
2489 static int iam_path_invariant(struct iam_path *p)
2490 {
2491         int i;
2492
2493         if (p->ip_container == NULL ||
2494             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2495             p->ip_frame != p->ip_frames + p->ip_indirect ||
2496             !iam_leaf_invariant(&p->ip_leaf))
2497                 return 0;
2498         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2499                 if (i <= p->ip_indirect) {
2500                         if (!iam_frame_invariant(&p->ip_frames[i]))
2501                                 return 0;
2502                 }
2503         }
2504         return 1;
2505 }
2506
2507 int iam_it_invariant(struct iam_iterator *it)
2508 {
2509         return
2510                 (it->ii_state == IAM_IT_DETACHED ||
2511                  it->ii_state == IAM_IT_ATTACHED ||
2512                  it->ii_state == IAM_IT_SKEWED) &&
2513                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2514                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2515                      it->ii_state == IAM_IT_SKEWED,
2516                      iam_path_invariant(&it->ii_path) &&
2517                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2518 }
2519
2520 /*
2521  * Search container @c for record with key @k. If record is found, its data
2522  * are moved into @r.
2523  *
2524  * Return values: 0: found, -ENOENT: not-found, -ve: error
2525  */
2526 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2527                struct iam_rec *r, struct iam_path_descr *pd)
2528 {
2529         struct iam_iterator it;
2530         int result;
2531
2532         iam_it_init(&it, c, 0, pd);
2533
2534         result = iam_it_get_exact(&it, k);
2535         if (result == 0)
2536                 /*
2537                  * record with required key found, copy it into user buffer
2538                  */
2539                 iam_reccpy(&it.ii_path.ip_leaf, r);
2540         iam_it_put(&it);
2541         iam_it_fini(&it);
2542         return result;
2543 }
2544
2545 /*
2546  * Insert new record @r with key @k into container @c (within context of
2547  * transaction @h).
2548  *
2549  * Return values: 0: success, -ve: error, including -EEXIST when record with
2550  * given key is already present.
2551  *
2552  * postcondition: ergo(result == 0 || result == -EEXIST,
2553  *                                  iam_lookup(c, k, r2) > 0;
2554  */
2555 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2556                const struct iam_rec *r, struct iam_path_descr *pd)
2557 {
2558         struct iam_iterator it;
2559         int result;
2560
2561         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2562
2563         result = iam_it_get_exact(&it, k);
2564         if (result == -ENOENT)
2565                 result = iam_it_rec_insert(h, &it, k, r);
2566         else if (result == 0)
2567                 result = -EEXIST;
2568         iam_it_put(&it);
2569         iam_it_fini(&it);
2570         return result;
2571 }
2572
2573 /*
2574  * Update record with the key @k in container @c (within context of
2575  * transaction @h), new record is given by @r.
2576  *
2577  * Return values: +1: skip because of the same rec value, 0: success,
2578  * -ve: error, including -ENOENT if no record with the given key found.
2579  */
2580 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2581                const struct iam_rec *r, struct iam_path_descr *pd)
2582 {
2583         struct iam_iterator it;
2584         struct iam_leaf *folio;
2585         int result;
2586
2587         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2588
2589         result = iam_it_get_exact(&it, k);
2590         if (result == 0) {
2591                 folio = &it.ii_path.ip_leaf;
2592                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2593                 if (result == 0)
2594                         iam_it_rec_set(h, &it, r);
2595                 else
2596                         result = 1;
2597         }
2598         iam_it_put(&it);
2599         iam_it_fini(&it);
2600         return result;
2601 }
2602
2603 /*
2604  * Delete existing record with key @k.
2605  *
2606  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2607  *
2608  * postcondition: ergo(result == 0 || result == -ENOENT,
2609  *                                 !iam_lookup(c, k, *));
2610  */
2611 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2612                struct iam_path_descr *pd)
2613 {
2614         struct iam_iterator it;
2615         int result;
2616
2617         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2618
2619         result = iam_it_get_exact(&it, k);
2620         if (result == 0)
2621                 iam_it_rec_delete(h, &it);
2622         iam_it_put(&it);
2623         iam_it_fini(&it);
2624         return result;
2625 }
2626
2627 int iam_root_limit(int rootgap, int blocksize, int size)
2628 {
2629         int limit;
2630         int nlimit;
2631
2632         limit = (blocksize - rootgap) / size;
2633         nlimit = blocksize / size;
2634         if (limit == nlimit)
2635                 limit--;
2636         return limit;
2637 }