Whamcloud - gitweb
LU-6050 target: control OST-index in IDIF via ROCOMPAT flag
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150 #include "osd_internal.h"
151
152 #include "xattr.h"
153 #include "acl.h"
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         list_add(&fmt->if_linkage, &iam_formats);
165 }
166
167 static struct buffer_head *
168 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
169 {
170         struct inode *inode = c->ic_object;
171         struct iam_idle_head *head;
172         struct buffer_head *bh;
173         int err;
174
175         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
176
177         if (blk == 0)
178                 return NULL;
179
180         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
181         if (bh == NULL) {
182                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
183                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
184                 c->ic_idle_failed = 1;
185                 return ERR_PTR(err);
186         }
187
188         head = (struct iam_idle_head *)(bh->b_data);
189         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
190                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
191                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
192                        le16_to_cpu(head->iih_magic));
193                 brelse(bh);
194                 c->ic_idle_failed = 1;
195                 return ERR_PTR(-EBADF);
196         }
197
198         return bh;
199 }
200
201 /*
202  * Determine format of given container. This is done by scanning list of
203  * registered formats and calling ->if_guess() method of each in turn.
204  */
205 static int iam_format_guess(struct iam_container *c)
206 {
207         int result;
208         struct iam_format *fmt;
209
210         /*
211          * XXX temporary initialization hook.
212          */
213         {
214                 static int initialized = 0;
215
216                 if (!initialized) {
217                         iam_lvar_format_init();
218                         iam_lfix_format_init();
219                         initialized = 1;
220                 }
221         }
222
223         result = -ENOENT;
224         list_for_each_entry(fmt, &iam_formats, if_linkage) {
225                 result = fmt->if_guess(c);
226                 if (result == 0)
227                         break;
228         }
229
230         if (result == 0) {
231                 struct buffer_head *bh;
232                 __u32 *idle_blocks;
233
234                 LASSERT(c->ic_root_bh != NULL);
235
236                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
237                                         c->ic_descr->id_root_gap +
238                                         sizeof(struct dx_countlimit));
239                 mutex_lock(&c->ic_idle_mutex);
240                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
241                 if (bh != NULL && IS_ERR(bh))
242                         result = PTR_ERR(bh);
243                 else
244                         c->ic_idle_bh = bh;
245                 mutex_unlock(&c->ic_idle_mutex);
246         }
247
248         return result;
249 }
250
251 /*
252  * Initialize container @c.
253  */
254 int iam_container_init(struct iam_container *c,
255                        struct iam_descr *descr, struct inode *inode)
256 {
257         memset(c, 0, sizeof *c);
258         c->ic_descr  = descr;
259         c->ic_object = inode;
260         init_rwsem(&c->ic_sem);
261         dynlock_init(&c->ic_tree_lock);
262         mutex_init(&c->ic_idle_mutex);
263         return 0;
264 }
265
266 /*
267  * Determine container format.
268  */
269 int iam_container_setup(struct iam_container *c)
270 {
271         return iam_format_guess(c);
272 }
273
274 /*
275  * Finalize container @c, release all resources.
276  */
277 void iam_container_fini(struct iam_container *c)
278 {
279         brelse(c->ic_idle_bh);
280         c->ic_idle_bh = NULL;
281         brelse(c->ic_root_bh);
282         c->ic_root_bh = NULL;
283 }
284
285 void iam_path_init(struct iam_path *path, struct iam_container *c,
286                    struct iam_path_descr *pd)
287 {
288         memset(path, 0, sizeof *path);
289         path->ip_container = c;
290         path->ip_frame = path->ip_frames;
291         path->ip_data = pd;
292         path->ip_leaf.il_path = path;
293 }
294
295 static void iam_leaf_fini(struct iam_leaf *leaf);
296
297 void iam_path_release(struct iam_path *path)
298 {
299         int i;
300
301         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
302                 if (path->ip_frames[i].bh != NULL) {
303                         path->ip_frames[i].at_shifted = 0;
304                         brelse(path->ip_frames[i].bh);
305                         path->ip_frames[i].bh = NULL;
306                 }
307         }
308 }
309
310 void iam_path_fini(struct iam_path *path)
311 {
312         iam_leaf_fini(&path->ip_leaf);
313         iam_path_release(path);
314 }
315
316
317 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
318 {
319         int i;
320
321         path->ipc_hinfo = &path->ipc_hinfo_area;
322         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
323                 path->ipc_descr.ipd_key_scratch[i] =
324                         (struct iam_ikey *)&path->ipc_scratch[i];
325
326         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
327 }
328
329 void iam_path_compat_fini(struct iam_path_compat *path)
330 {
331         iam_path_fini(&path->ipc_path);
332 }
333
334 /*
335  * Helper function initializing iam_path_descr and its key scratch area.
336  */
337 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
338 {
339         struct iam_path_descr *ipd;
340         void *karea;
341         int i;
342
343         ipd = area;
344         karea = ipd + 1;
345         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
346                 ipd->ipd_key_scratch[i] = karea;
347         return ipd;
348 }
349
350 void iam_ipd_free(struct iam_path_descr *ipd)
351 {
352 }
353
354 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
355                   handle_t *h, struct buffer_head **bh)
356 {
357         int result = 0;
358
359         /* NB: it can be called by iam_lfix_guess() which is still at
360          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
361          * haven't been intialized yet.
362          * Also, we don't have this for IAM dir.
363          */
364         if (c->ic_root_bh != NULL &&
365             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
366                 get_bh(c->ic_root_bh);
367                 *bh = c->ic_root_bh;
368                 return 0;
369         }
370
371         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
372         if (*bh == NULL)
373                 result = -EIO;
374         return result;
375 }
376
377 /*
378  * Return pointer to current leaf record. Pointer is valid while corresponding
379  * leaf node is locked and pinned.
380  */
381 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
382 {
383         return iam_leaf_ops(leaf)->rec(leaf);
384 }
385
386 /*
387  * Return pointer to the current leaf key. This function returns pointer to
388  * the key stored in node.
389  *
390  * Caller should assume that returned pointer is only valid while leaf node is
391  * pinned and locked.
392  */
393 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
394 {
395         return iam_leaf_ops(leaf)->key(leaf);
396 }
397
398 static int iam_leaf_key_size(const struct iam_leaf *leaf)
399 {
400         return iam_leaf_ops(leaf)->key_size(leaf);
401 }
402
403 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
404                                       struct iam_ikey *key)
405 {
406         return iam_leaf_ops(leaf)->ikey(leaf, key);
407 }
408
409 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
410                            const struct iam_key *key)
411 {
412         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
413 }
414
415 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
416                           const struct iam_key *key)
417 {
418         return iam_leaf_ops(leaf)->key_eq(leaf, key);
419 }
420
421 #if LDISKFS_INVARIANT_ON
422 static int iam_leaf_check(struct iam_leaf *leaf);
423 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
424
425 static int iam_path_check(struct iam_path *p)
426 {
427         int i;
428         int result;
429         struct iam_frame *f;
430         struct iam_descr *param;
431
432         result = 1;
433         param = iam_path_descr(p);
434         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
435                 f = &p->ip_frames[i];
436                 if (f->bh != NULL) {
437                         result = dx_node_check(p, f);
438                         if (result)
439                                 result = !param->id_ops->id_node_check(p, f);
440                 }
441         }
442         if (result && p->ip_leaf.il_bh != NULL)
443                 result = iam_leaf_check(&p->ip_leaf);
444         if (result == 0) {
445                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
446         }
447         return result;
448 }
449 #endif
450
451 static int iam_leaf_load(struct iam_path *path)
452 {
453         iam_ptr_t block;
454         int err;
455         struct iam_container *c;
456         struct buffer_head   *bh;
457         struct iam_leaf      *leaf;
458         struct iam_descr     *descr;
459
460         c     = path->ip_container;
461         leaf  = &path->ip_leaf;
462         descr = iam_path_descr(path);
463         block = path->ip_frame->leaf;
464         if (block == 0) {
465                 /* XXX bug 11027 */
466                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
467                        (long unsigned)path->ip_frame->leaf,
468                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
469                        path->ip_frames[0].bh, path->ip_frames[1].bh,
470                        path->ip_frames[2].bh);
471         }
472         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
473         if (err == 0) {
474                 leaf->il_bh = bh;
475                 leaf->il_curidx = block;
476                 err = iam_leaf_ops(leaf)->init(leaf);
477                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
478         }
479         return err;
480 }
481
482 static void iam_unlock_htree(struct iam_container *ic,
483                              struct dynlock_handle *lh)
484 {
485         if (lh != NULL)
486                 dynlock_unlock(&ic->ic_tree_lock, lh);
487 }
488
489
490 static void iam_leaf_unlock(struct iam_leaf *leaf)
491 {
492         if (leaf->il_lock != NULL) {
493                 iam_unlock_htree(iam_leaf_container(leaf),
494                                  leaf->il_lock);
495                 do_corr(schedule());
496                 leaf->il_lock = NULL;
497         }
498 }
499
500 static void iam_leaf_fini(struct iam_leaf *leaf)
501 {
502         if (leaf->il_path != NULL) {
503                 iam_leaf_unlock(leaf);
504                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
505                 iam_leaf_ops(leaf)->fini(leaf);
506                 if (leaf->il_bh) {
507                         brelse(leaf->il_bh);
508                         leaf->il_bh = NULL;
509                         leaf->il_curidx = 0;
510                 }
511         }
512 }
513
514 static void iam_leaf_start(struct iam_leaf *folio)
515 {
516         iam_leaf_ops(folio)->start(folio);
517 }
518
519 void iam_leaf_next(struct iam_leaf *folio)
520 {
521         iam_leaf_ops(folio)->next(folio);
522 }
523
524 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
525                              const struct iam_rec *rec)
526 {
527         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
528 }
529
530 static void iam_rec_del(struct iam_leaf *leaf, int shift)
531 {
532         iam_leaf_ops(leaf)->rec_del(leaf, shift);
533 }
534
535 int iam_leaf_at_end(const struct iam_leaf *leaf)
536 {
537         return iam_leaf_ops(leaf)->at_end(leaf);
538 }
539
540 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
541                            iam_ptr_t nr)
542 {
543         iam_leaf_ops(l)->split(l, bh, nr);
544 }
545
546 static inline int iam_leaf_empty(struct iam_leaf *l)
547 {
548         return iam_leaf_ops(l)->leaf_empty(l);
549 }
550
551 int iam_leaf_can_add(const struct iam_leaf *l,
552                      const struct iam_key *k, const struct iam_rec *r)
553 {
554         return iam_leaf_ops(l)->can_add(l, k, r);
555 }
556
557 #if LDISKFS_INVARIANT_ON
558 static int iam_leaf_check(struct iam_leaf *leaf)
559 {
560         return 1;
561 #if 0
562         struct iam_lentry    *orig;
563         struct iam_path      *path;
564         struct iam_container *bag;
565         struct iam_ikey       *k0;
566         struct iam_ikey       *k1;
567         int result;
568         int first;
569
570         orig = leaf->il_at;
571         path = iam_leaf_path(leaf);
572         bag  = iam_leaf_container(leaf);
573
574         result = iam_leaf_ops(leaf)->init(leaf);
575         if (result != 0)
576                 return result;
577
578         first = 1;
579         iam_leaf_start(leaf);
580         k0 = iam_path_ikey(path, 0);
581         k1 = iam_path_ikey(path, 1);
582         while (!iam_leaf_at_end(leaf)) {
583                 iam_ikeycpy(bag, k0, k1);
584                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
585                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
586                         return 0;
587                 }
588                 first = 0;
589                 iam_leaf_next(leaf);
590         }
591         leaf->il_at = orig;
592         return 1;
593 #endif
594 }
595 #endif
596
597 static int iam_txn_dirty(handle_t *handle,
598                          struct iam_path *path, struct buffer_head *bh)
599 {
600         int result;
601
602         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
603         if (result != 0)
604                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
605         return result;
606 }
607
608 static int iam_txn_add(handle_t *handle,
609                        struct iam_path *path, struct buffer_head *bh)
610 {
611         int result;
612
613         result = ldiskfs_journal_get_write_access(handle, bh);
614         if (result != 0)
615                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
616         return result;
617 }
618
619 /***********************************************************************/
620 /* iterator interface                                                  */
621 /***********************************************************************/
622
623 static enum iam_it_state it_state(const struct iam_iterator *it)
624 {
625         return it->ii_state;
626 }
627
628 /*
629  * Helper function returning scratch key.
630  */
631 static struct iam_container *iam_it_container(const struct iam_iterator *it)
632 {
633         return it->ii_path.ip_container;
634 }
635
636 static inline int it_keycmp(const struct iam_iterator *it,
637                             const struct iam_key *k)
638 {
639         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
640 }
641
642 static inline int it_keyeq(const struct iam_iterator *it,
643                            const struct iam_key *k)
644 {
645         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
646 }
647
648 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
649 {
650         return iam_ikeycmp(it->ii_path.ip_container,
651                            iam_leaf_ikey(&it->ii_path.ip_leaf,
652                                          iam_path_ikey(&it->ii_path, 0)), ik);
653 }
654
655 static inline int it_at_rec(const struct iam_iterator *it)
656 {
657         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
658 }
659
660 static inline int it_before(const struct iam_iterator *it)
661 {
662         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
663 }
664
665 /*
666  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
667  * with exactly the same key as asked is found.
668  */
669 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
670 {
671         int result;
672
673         result = iam_it_get(it, k);
674         if (result > 0)
675                 result = 0;
676         else if (result == 0)
677                 /*
678                  * Return -ENOENT if cursor is located above record with a key
679                  * different from one specified, or in the empty leaf.
680                  *
681                  * XXX returning -ENOENT only works if iam_it_get() never
682                  * returns -ENOENT as a legitimate error.
683                  */
684                 result = -ENOENT;
685         return result;
686 }
687
688 void iam_container_write_lock(struct iam_container *ic)
689 {
690         down_write(&ic->ic_sem);
691 }
692
693 void iam_container_write_unlock(struct iam_container *ic)
694 {
695         up_write(&ic->ic_sem);
696 }
697
698 void iam_container_read_lock(struct iam_container *ic)
699 {
700         down_read(&ic->ic_sem);
701 }
702
703 void iam_container_read_unlock(struct iam_container *ic)
704 {
705         up_read(&ic->ic_sem);
706 }
707
708 /*
709  * Initialize iterator to IAM_IT_DETACHED state.
710  *
711  * postcondition: it_state(it) == IAM_IT_DETACHED
712  */
713 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
714                  struct iam_path_descr *pd)
715 {
716         memset(it, 0, sizeof *it);
717         it->ii_flags  = flags;
718         it->ii_state  = IAM_IT_DETACHED;
719         iam_path_init(&it->ii_path, c, pd);
720         return 0;
721 }
722
723 /*
724  * Finalize iterator and release all resources.
725  *
726  * precondition: it_state(it) == IAM_IT_DETACHED
727  */
728 void iam_it_fini(struct iam_iterator *it)
729 {
730         assert_corr(it_state(it) == IAM_IT_DETACHED);
731         iam_path_fini(&it->ii_path);
732 }
733
734 /*
735  * this locking primitives are used to protect parts
736  * of dir's htree. protection unit is block: leaf or index
737  */
738 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
739                                              unsigned long value,
740                                              enum dynlock_type lt)
741 {
742         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
743 }
744
745 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
746 {
747         struct iam_frame *f;
748
749         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
750                 do_corr(schedule());
751                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
752                 if (*lh == NULL)
753                         return -ENOMEM;
754         }
755         return 0;
756 }
757
758 /*
759  * Fast check for frame consistency.
760  */
761 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
762 {
763         struct iam_container *bag;
764         struct iam_entry *next;
765         struct iam_entry *last;
766         struct iam_entry *entries;
767         struct iam_entry *at;
768
769         bag     = path->ip_container;
770         at      = frame->at;
771         entries = frame->entries;
772         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
773
774         if (unlikely(at > last))
775                 return -EAGAIN;
776
777         if (unlikely(dx_get_block(path, at) != frame->leaf))
778                 return -EAGAIN;
779
780         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
781                                  path->ip_ikey_target) > 0))
782                 return -EAGAIN;
783
784         next = iam_entry_shift(path, at, +1);
785         if (next <= last) {
786                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
787                                          path->ip_ikey_target) <= 0))
788                         return -EAGAIN;
789         }
790         return 0;
791 }
792
793 int dx_index_is_compat(struct iam_path *path)
794 {
795         return iam_path_descr(path) == NULL;
796 }
797
798 /*
799  * dx_find_position
800  *
801  * search position of specified hash in index
802  *
803  */
804
805 static struct iam_entry *iam_find_position(struct iam_path *path,
806                                            struct iam_frame *frame)
807 {
808         int count;
809         struct iam_entry *p;
810         struct iam_entry *q;
811         struct iam_entry *m;
812
813         count = dx_get_count(frame->entries);
814         assert_corr(count && count <= dx_get_limit(frame->entries));
815         p = iam_entry_shift(path, frame->entries,
816                             dx_index_is_compat(path) ? 1 : 2);
817         q = iam_entry_shift(path, frame->entries, count - 1);
818         while (p <= q) {
819                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
820                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
821                                 path->ip_ikey_target) > 0)
822                         q = iam_entry_shift(path, m, -1);
823                 else
824                         p = iam_entry_shift(path, m, +1);
825         }
826         return iam_entry_shift(path, p, -1);
827 }
828
829
830
831 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
832 {
833         return dx_get_block(path, iam_find_position(path, frame));
834 }
835
836 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
837                     const struct iam_ikey *key, iam_ptr_t ptr)
838 {
839         struct iam_entry *entries = frame->entries;
840         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
841         int count = dx_get_count(entries);
842
843         /*
844          * Unfortunately we cannot assert this, as this function is sometimes
845          * called by VFS under i_sem and without pdirops lock.
846          */
847         assert_corr(1 || iam_frame_is_locked(path, frame));
848         assert_corr(count < dx_get_limit(entries));
849         assert_corr(frame->at < iam_entry_shift(path, entries, count));
850         assert_inv(dx_node_check(path, frame));
851
852         memmove(iam_entry_shift(path, new, 1), new,
853                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
854         dx_set_ikey(path, new, key);
855         dx_set_block(path, new, ptr);
856         dx_set_count(entries, count + 1);
857         assert_inv(dx_node_check(path, frame));
858 }
859
860 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
861                          const struct iam_ikey *key, iam_ptr_t ptr)
862 {
863         iam_lock_bh(frame->bh);
864         iam_insert_key(path, frame, key, ptr);
865         iam_unlock_bh(frame->bh);
866 }
867 /*
868  * returns 0 if path was unchanged, -EAGAIN otherwise.
869  */
870 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
871 {
872         int equal;
873
874         iam_lock_bh(frame->bh);
875         equal = iam_check_fast(path, frame) == 0 ||
876                 frame->leaf == iam_find_ptr(path, frame);
877         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
878         iam_unlock_bh(frame->bh);
879
880         return equal ? 0 : -EAGAIN;
881 }
882
883 static int iam_lookup_try(struct iam_path *path)
884 {
885         u32 ptr;
886         int err = 0;
887         int i;
888
889         struct iam_descr *param;
890         struct iam_frame *frame;
891         struct iam_container *c;
892
893         param = iam_path_descr(path);
894         c = path->ip_container;
895
896         ptr = param->id_ops->id_root_ptr(c);
897         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
898              ++frame, ++i) {
899                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
900                                                   &frame->bh);
901                 do_corr(schedule());
902
903                 iam_lock_bh(frame->bh);
904                 /*
905                  * node must be initialized under bh lock because concurrent
906                  * creation procedure may change it and iam_lookup_try() will
907                  * see obsolete tree height. -bzzz
908                  */
909                 if (err != 0)
910                         break;
911
912                 if (LDISKFS_INVARIANT_ON) {
913                         err = param->id_ops->id_node_check(path, frame);
914                         if (err != 0)
915                                 break;
916                 }
917
918                 err = param->id_ops->id_node_load(path, frame);
919                 if (err != 0)
920                         break;
921
922                 assert_inv(dx_node_check(path, frame));
923                 /*
924                  * splitting may change root index block and move hash we're
925                  * looking for into another index block so, we have to check
926                  * this situation and repeat from begining if path got changed
927                  * -bzzz
928                  */
929                 if (i > 0) {
930                         err = iam_check_path(path, frame - 1);
931                         if (err != 0)
932                                 break;
933                 }
934
935                 frame->at = iam_find_position(path, frame);
936                 frame->curidx = ptr;
937                 frame->leaf = ptr = dx_get_block(path, frame->at);
938
939                 iam_unlock_bh(frame->bh);
940                 do_corr(schedule());
941         }
942         if (err != 0)
943                 iam_unlock_bh(frame->bh);
944         path->ip_frame = --frame;
945         return err;
946 }
947
948 static int __iam_path_lookup(struct iam_path *path)
949 {
950         int err;
951         int i;
952
953         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
954                 assert(path->ip_frames[i].bh == NULL);
955
956         do {
957                 err = iam_lookup_try(path);
958                 do_corr(schedule());
959                 if (err != 0)
960                         iam_path_fini(path);
961         } while (err == -EAGAIN);
962
963         return err;
964 }
965
966 /*
967  * returns 0 if path was unchanged, -EAGAIN otherwise.
968  */
969 static int iam_check_full_path(struct iam_path *path, int search)
970 {
971         struct iam_frame *bottom;
972         struct iam_frame *scan;
973         int i;
974         int result;
975
976         do_corr(schedule());
977
978         for (bottom = path->ip_frames, i = 0;
979              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
980                 ; /* find last filled in frame */
981         }
982
983         /*
984          * Lock frames, bottom to top.
985          */
986         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
987                 iam_lock_bh(scan->bh);
988         /*
989          * Check them top to bottom.
990          */
991         result = 0;
992         for (scan = path->ip_frames; scan < bottom; ++scan) {
993                 struct iam_entry *pos;
994
995                 if (search) {
996                         if (iam_check_fast(path, scan) == 0)
997                                 continue;
998
999                         pos = iam_find_position(path, scan);
1000                         if (scan->leaf != dx_get_block(path, pos)) {
1001                                 result = -EAGAIN;
1002                                 break;
1003                         }
1004                         scan->at = pos;
1005                 } else {
1006                         pos = iam_entry_shift(path, scan->entries,
1007                                               dx_get_count(scan->entries) - 1);
1008                         if (scan->at > pos ||
1009                             scan->leaf != dx_get_block(path, scan->at)) {
1010                                 result = -EAGAIN;
1011                                 break;
1012                         }
1013                 }
1014         }
1015
1016         /*
1017          * Unlock top to bottom.
1018          */
1019         for (scan = path->ip_frames; scan < bottom; ++scan)
1020                 iam_unlock_bh(scan->bh);
1021         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1022         do_corr(schedule());
1023
1024         return result;
1025 }
1026
1027
1028 /*
1029  * Performs path lookup and returns with found leaf (if any) locked by htree
1030  * lock.
1031  */
1032 static int iam_lookup_lock(struct iam_path *path,
1033                            struct dynlock_handle **dl, enum dynlock_type lt)
1034 {
1035         int result;
1036
1037         while ((result = __iam_path_lookup(path)) == 0) {
1038                 do_corr(schedule());
1039                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1040                                      lt);
1041                 if (*dl == NULL) {
1042                         iam_path_fini(path);
1043                         result = -ENOMEM;
1044                         break;
1045                 }
1046                 do_corr(schedule());
1047                 /*
1048                  * while locking leaf we just found may get split so we need
1049                  * to check this -bzzz
1050                  */
1051                 if (iam_check_full_path(path, 1) == 0)
1052                         break;
1053                 iam_unlock_htree(path->ip_container, *dl);
1054                 *dl = NULL;
1055                 iam_path_fini(path);
1056         }
1057         return result;
1058 }
1059 /*
1060  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1061  * node.
1062  */
1063 static int iam_path_lookup(struct iam_path *path, int index)
1064 {
1065         struct iam_container *c;
1066         struct iam_leaf  *leaf;
1067         int result;
1068
1069         c = path->ip_container;
1070         leaf = &path->ip_leaf;
1071         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1072         assert_inv(iam_path_check(path));
1073         do_corr(schedule());
1074         if (result == 0) {
1075                 result = iam_leaf_load(path);
1076                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1077                 if (result == 0) {
1078                         do_corr(schedule());
1079                         if (index)
1080                                 result = iam_leaf_ops(leaf)->
1081                                         ilookup(leaf, path->ip_ikey_target);
1082                         else
1083                                 result = iam_leaf_ops(leaf)->
1084                                         lookup(leaf, path->ip_key_target);
1085                         do_corr(schedule());
1086                 }
1087                 if (result < 0)
1088                         iam_leaf_unlock(leaf);
1089         }
1090         return result;
1091 }
1092
1093 /*
1094  * Common part of iam_it_{i,}get().
1095  */
1096 static int __iam_it_get(struct iam_iterator *it, int index)
1097 {
1098         int result;
1099         assert_corr(it_state(it) == IAM_IT_DETACHED);
1100
1101         result = iam_path_lookup(&it->ii_path, index);
1102         if (result >= 0) {
1103                 int collision;
1104
1105                 collision = result & IAM_LOOKUP_LAST;
1106                 switch (result & ~IAM_LOOKUP_LAST) {
1107                 case IAM_LOOKUP_EXACT:
1108                         result = +1;
1109                         it->ii_state = IAM_IT_ATTACHED;
1110                         break;
1111                 case IAM_LOOKUP_OK:
1112                         result = 0;
1113                         it->ii_state = IAM_IT_ATTACHED;
1114                         break;
1115                 case IAM_LOOKUP_BEFORE:
1116                 case IAM_LOOKUP_EMPTY:
1117                         result = 0;
1118                         it->ii_state = IAM_IT_SKEWED;
1119                         break;
1120                 default:
1121                         assert(0);
1122                 }
1123                 result |= collision;
1124         }
1125         /*
1126          * See iam_it_get_exact() for explanation.
1127          */
1128         assert_corr(result != -ENOENT);
1129         return result;
1130 }
1131
1132 /*
1133  * Correct hash, but not the same key was found, iterate through hash
1134  * collision chain, looking for correct record.
1135  */
1136 static int iam_it_collision(struct iam_iterator *it)
1137 {
1138         int result;
1139
1140         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1141
1142         while ((result = iam_it_next(it)) == 0) {
1143                 do_corr(schedule());
1144                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1145                         return -ENOENT;
1146                 if (it_keyeq(it, it->ii_path.ip_key_target))
1147                         return 0;
1148         }
1149         return result;
1150 }
1151
1152 /*
1153  * Attach iterator. After successful completion, @it points to record with
1154  * least key not larger than @k.
1155  *
1156  * Return value: 0: positioned on existing record,
1157  *             +ve: exact position found,
1158  *             -ve: error.
1159  *
1160  * precondition:  it_state(it) == IAM_IT_DETACHED
1161  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1162  *                     it_keycmp(it, k) <= 0)
1163  */
1164 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1165 {
1166         int result;
1167         assert_corr(it_state(it) == IAM_IT_DETACHED);
1168
1169         it->ii_path.ip_ikey_target = NULL;
1170         it->ii_path.ip_key_target  = k;
1171
1172         result = __iam_it_get(it, 0);
1173
1174         if (result == IAM_LOOKUP_LAST) {
1175                 result = iam_it_collision(it);
1176                 if (result != 0) {
1177                         iam_it_put(it);
1178                         iam_it_fini(it);
1179                         result = __iam_it_get(it, 0);
1180                 } else
1181                         result = +1;
1182         }
1183         if (result > 0)
1184                 result &= ~IAM_LOOKUP_LAST;
1185
1186         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1187         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1188                          it_keycmp(it, k) <= 0));
1189         return result;
1190 }
1191
1192 /*
1193  * Attach iterator by index key.
1194  */
1195 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1196 {
1197         assert_corr(it_state(it) == IAM_IT_DETACHED);
1198
1199         it->ii_path.ip_ikey_target = k;
1200         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1201 }
1202
1203 /*
1204  * Attach iterator, and assure it points to the record (not skewed).
1205  *
1206  * Return value: 0: positioned on existing record,
1207  *             +ve: exact position found,
1208  *             -ve: error.
1209  *
1210  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1211  *                !(it->ii_flags&IAM_IT_WRITE)
1212  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1213  */
1214 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1215 {
1216         int result;
1217         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1218                     !(it->ii_flags&IAM_IT_WRITE));
1219         result = iam_it_get(it, k);
1220         if (result == 0) {
1221                 if (it_state(it) != IAM_IT_ATTACHED) {
1222                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1223                         result = iam_it_next(it);
1224                 }
1225         }
1226         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1227         return result;
1228 }
1229
1230 /*
1231  * Duplicates iterator.
1232  *
1233  * postcondition: it_state(dst) == it_state(src) &&
1234  *                iam_it_container(dst) == iam_it_container(src) &&
1235  *                dst->ii_flags = src->ii_flags &&
1236  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1237  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1238  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1239  */
1240 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1241 {
1242         dst->ii_flags     = src->ii_flags;
1243         dst->ii_state     = src->ii_state;
1244         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1245         /*
1246          * XXX: duplicate lock.
1247          */
1248         assert_corr(it_state(dst) == it_state(src));
1249         assert_corr(iam_it_container(dst) == iam_it_container(src));
1250         assert_corr(dst->ii_flags = src->ii_flags);
1251         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1252                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1253                     iam_it_key_get(dst) == iam_it_key_get(src)));
1254
1255 }
1256
1257 /*
1258  * Detach iterator. Does nothing it detached state.
1259  *
1260  * postcondition: it_state(it) == IAM_IT_DETACHED
1261  */
1262 void iam_it_put(struct iam_iterator *it)
1263 {
1264         if (it->ii_state != IAM_IT_DETACHED) {
1265                 it->ii_state = IAM_IT_DETACHED;
1266                 iam_leaf_fini(&it->ii_path.ip_leaf);
1267         }
1268 }
1269
1270 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1271                                         struct iam_ikey *ikey);
1272
1273
1274 /*
1275  * This function increments the frame pointer to search the next leaf
1276  * block, and reads in the necessary intervening nodes if the search
1277  * should be necessary.  Whether or not the search is necessary is
1278  * controlled by the hash parameter.  If the hash value is even, then
1279  * the search is only continued if the next block starts with that
1280  * hash value.  This is used if we are searching for a specific file.
1281  *
1282  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1283  *
1284  * This function returns 1 if the caller should continue to search,
1285  * or 0 if it should not.  If there is an error reading one of the
1286  * index blocks, it will a negative error code.
1287  *
1288  * If start_hash is non-null, it will be filled in with the starting
1289  * hash of the next page.
1290  */
1291 static int iam_htree_advance(struct inode *dir, __u32 hash,
1292                               struct iam_path *path, __u32 *start_hash,
1293                               int compat)
1294 {
1295         struct iam_frame *p;
1296         struct buffer_head *bh;
1297         int err, num_frames = 0;
1298         __u32 bhash;
1299
1300         p = path->ip_frame;
1301         /*
1302          * Find the next leaf page by incrementing the frame pointer.
1303          * If we run out of entries in the interior node, loop around and
1304          * increment pointer in the parent node.  When we break out of
1305          * this loop, num_frames indicates the number of interior
1306          * nodes need to be read.
1307          */
1308         while (1) {
1309                 do_corr(schedule());
1310                 iam_lock_bh(p->bh);
1311                 if (p->at_shifted)
1312                         p->at_shifted = 0;
1313                 else
1314                         p->at = iam_entry_shift(path, p->at, +1);
1315                 if (p->at < iam_entry_shift(path, p->entries,
1316                                             dx_get_count(p->entries))) {
1317                         p->leaf = dx_get_block(path, p->at);
1318                         iam_unlock_bh(p->bh);
1319                         break;
1320                 }
1321                 iam_unlock_bh(p->bh);
1322                 if (p == path->ip_frames)
1323                         return 0;
1324                 num_frames++;
1325                 --p;
1326         }
1327
1328         if (compat) {
1329                 /*
1330                  * Htree hash magic.
1331                  */
1332         /*
1333          * If the hash is 1, then continue only if the next page has a
1334          * continuation hash of any value.  This is used for readdir
1335          * handling.  Otherwise, check to see if the hash matches the
1336          * desired contiuation hash.  If it doesn't, return since
1337          * there's no point to read in the successive index pages.
1338          */
1339                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1340         if (start_hash)
1341                 *start_hash = bhash;
1342         if ((hash & 1) == 0) {
1343                 if ((bhash & ~1) != hash)
1344                         return 0;
1345         }
1346         }
1347         /*
1348          * If the hash is HASH_NB_ALWAYS, we always go to the next
1349          * block so no check is necessary
1350          */
1351         while (num_frames--) {
1352                 iam_ptr_t idx;
1353
1354                 do_corr(schedule());
1355                 iam_lock_bh(p->bh);
1356                 idx = p->leaf = dx_get_block(path, p->at);
1357                 iam_unlock_bh(p->bh);
1358                 err = iam_path_descr(path)->id_ops->
1359                         id_node_read(path->ip_container, idx, NULL, &bh);
1360                 if (err != 0)
1361                         return err; /* Failure */
1362                 ++p;
1363                 brelse(p->bh);
1364                 assert_corr(p->bh != bh);
1365                 p->bh = bh;
1366                 p->entries = dx_node_get_entries(path, p);
1367                 p->at = iam_entry_shift(path, p->entries, !compat);
1368                 assert_corr(p->curidx != idx);
1369                 p->curidx = idx;
1370                 iam_lock_bh(p->bh);
1371                 assert_corr(p->leaf != dx_get_block(path, p->at));
1372                 p->leaf = dx_get_block(path, p->at);
1373                 iam_unlock_bh(p->bh);
1374                 assert_inv(dx_node_check(path, p));
1375         }
1376         return 1;
1377 }
1378
1379
1380 static inline int iam_index_advance(struct iam_path *path)
1381 {
1382         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1383 }
1384
1385 static void iam_unlock_array(struct iam_container *ic,
1386                              struct dynlock_handle **lh)
1387 {
1388         int i;
1389
1390         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1391                 if (*lh != NULL) {
1392                         iam_unlock_htree(ic, *lh);
1393                         *lh = NULL;
1394                 }
1395         }
1396 }
1397 /*
1398  * Advance index part of @path to point to the next leaf. Returns 1 on
1399  * success, 0, when end of container was reached. Leaf node is locked.
1400  */
1401 int iam_index_next(struct iam_container *c, struct iam_path *path)
1402 {
1403         iam_ptr_t cursor;
1404         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1405         int result;
1406         struct inode *object;
1407
1408         /*
1409          * Locking for iam_index_next()... is to be described.
1410          */
1411
1412         object = c->ic_object;
1413         cursor = path->ip_frame->leaf;
1414
1415         while (1) {
1416                 result = iam_index_lock(path, lh);
1417                 do_corr(schedule());
1418                 if (result < 0)
1419                         break;
1420
1421                 result = iam_check_full_path(path, 0);
1422                 if (result == 0 && cursor == path->ip_frame->leaf) {
1423                         result = iam_index_advance(path);
1424
1425                         assert_corr(result == 0 ||
1426                                     cursor != path->ip_frame->leaf);
1427                         break;
1428                 }
1429                 do {
1430                         iam_unlock_array(c, lh);
1431
1432                         iam_path_release(path);
1433                         do_corr(schedule());
1434
1435                         result = __iam_path_lookup(path);
1436                         if (result < 0)
1437                                 break;
1438
1439                         while (path->ip_frame->leaf != cursor) {
1440                                 do_corr(schedule());
1441
1442                                 result = iam_index_lock(path, lh);
1443                                 do_corr(schedule());
1444                                 if (result < 0)
1445                                         break;
1446
1447                                 result = iam_check_full_path(path, 0);
1448                                 if (result != 0)
1449                                         break;
1450
1451                                 result = iam_index_advance(path);
1452                                 if (result == 0) {
1453                                         CERROR("cannot find cursor : %u\n",
1454                                                 cursor);
1455                                         result = -EIO;
1456                                 }
1457                                 if (result < 0)
1458                                         break;
1459                                 result = iam_check_full_path(path, 0);
1460                                 if (result != 0)
1461                                         break;
1462                                 iam_unlock_array(c, lh);
1463                         }
1464                 } while (result == -EAGAIN);
1465                 if (result < 0)
1466                         break;
1467         }
1468         iam_unlock_array(c, lh);
1469         return result;
1470 }
1471
1472 /*
1473  * Move iterator one record right.
1474  *
1475  * Return value: 0: success,
1476  *              +1: end of container reached
1477  *             -ve: error
1478  *
1479  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1480  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1481  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1482  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1483  */
1484 int iam_it_next(struct iam_iterator *it)
1485 {
1486         int result;
1487         struct iam_path      *path;
1488         struct iam_leaf      *leaf;
1489         do_corr(struct iam_ikey *ik_orig);
1490
1491         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1492         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1493                     it_state(it) == IAM_IT_SKEWED);
1494
1495         path = &it->ii_path;
1496         leaf = &path->ip_leaf;
1497
1498         assert_corr(iam_leaf_is_locked(leaf));
1499
1500         result = 0;
1501         do_corr(ik_orig = it_at_rec(it) ?
1502                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1503         if (it_before(it)) {
1504                 assert_corr(!iam_leaf_at_end(leaf));
1505                 it->ii_state = IAM_IT_ATTACHED;
1506         } else {
1507                 if (!iam_leaf_at_end(leaf))
1508                         /* advance within leaf node */
1509                         iam_leaf_next(leaf);
1510                 /*
1511                  * multiple iterations may be necessary due to empty leaves.
1512                  */
1513                 while (result == 0 && iam_leaf_at_end(leaf)) {
1514                         do_corr(schedule());
1515                         /* advance index portion of the path */
1516                         result = iam_index_next(iam_it_container(it), path);
1517                         assert_corr(iam_leaf_is_locked(leaf));
1518                         if (result == 1) {
1519                                 struct dynlock_handle *lh;
1520                                 lh = iam_lock_htree(iam_it_container(it),
1521                                                     path->ip_frame->leaf,
1522                                                     DLT_WRITE);
1523                                 if (lh != NULL) {
1524                                         iam_leaf_fini(leaf);
1525                                         leaf->il_lock = lh;
1526                                         result = iam_leaf_load(path);
1527                                         if (result == 0)
1528                                                 iam_leaf_start(leaf);
1529                                 } else
1530                                         result = -ENOMEM;
1531                         } else if (result == 0)
1532                                 /* end of container reached */
1533                                 result = +1;
1534                         if (result != 0)
1535                                 iam_it_put(it);
1536                 }
1537                 if (result == 0)
1538                         it->ii_state = IAM_IT_ATTACHED;
1539         }
1540         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1541         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1542         assert_corr(ergo(result == 0 && ik_orig != NULL,
1543                          it_ikeycmp(it, ik_orig) >= 0));
1544         return result;
1545 }
1546
1547 /*
1548  * Return pointer to the record under iterator.
1549  *
1550  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1551  * postcondition: it_state(it) == IAM_IT_ATTACHED
1552  */
1553 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1554 {
1555         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1556         assert_corr(it_at_rec(it));
1557         return iam_leaf_rec(&it->ii_path.ip_leaf);
1558 }
1559
1560 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1561 {
1562         struct iam_leaf *folio;
1563
1564         folio = &it->ii_path.ip_leaf;
1565         iam_leaf_ops(folio)->rec_set(folio, r);
1566 }
1567
1568 /*
1569  * Replace contents of record under iterator.
1570  *
1571  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1572  *                it->ii_flags&IAM_IT_WRITE
1573  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1574  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1575  */
1576 int iam_it_rec_set(handle_t *h,
1577                    struct iam_iterator *it, const struct iam_rec *r)
1578 {
1579         int result;
1580         struct iam_path *path;
1581         struct buffer_head *bh;
1582
1583         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1584                     it->ii_flags&IAM_IT_WRITE);
1585         assert_corr(it_at_rec(it));
1586
1587         path = &it->ii_path;
1588         bh   = path->ip_leaf.il_bh;
1589         result = iam_txn_add(h, path, bh);
1590         if (result == 0) {
1591                 iam_it_reccpy(it, r);
1592                 result = iam_txn_dirty(h, path, bh);
1593         }
1594         return result;
1595 }
1596
1597 /*
1598  * Return pointer to the index key under iterator.
1599  *
1600  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1601  *                it_state(it) == IAM_IT_SKEWED
1602  */
1603 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1604                                         struct iam_ikey *ikey)
1605 {
1606         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1607                     it_state(it) == IAM_IT_SKEWED);
1608         assert_corr(it_at_rec(it));
1609         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1610 }
1611
1612 /*
1613  * Return pointer to the key under iterator.
1614  *
1615  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1616  *                it_state(it) == IAM_IT_SKEWED
1617  */
1618 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1619 {
1620         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1621                     it_state(it) == IAM_IT_SKEWED);
1622         assert_corr(it_at_rec(it));
1623         return iam_leaf_key(&it->ii_path.ip_leaf);
1624 }
1625
1626 /*
1627  * Return size of key under iterator (in bytes)
1628  *
1629  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1630  *                it_state(it) == IAM_IT_SKEWED
1631  */
1632 int iam_it_key_size(const struct iam_iterator *it)
1633 {
1634         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1635                     it_state(it) == IAM_IT_SKEWED);
1636         assert_corr(it_at_rec(it));
1637         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1638 }
1639
1640 static struct buffer_head *
1641 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1642 {
1643         struct inode *inode = c->ic_object;
1644         struct buffer_head *bh = NULL;
1645         struct iam_idle_head *head;
1646         struct buffer_head *idle;
1647         __u32 *idle_blocks;
1648         __u16 count;
1649
1650         if (c->ic_idle_bh == NULL)
1651                 goto newblock;
1652
1653         mutex_lock(&c->ic_idle_mutex);
1654         if (unlikely(c->ic_idle_bh == NULL)) {
1655                 mutex_unlock(&c->ic_idle_mutex);
1656                 goto newblock;
1657         }
1658
1659         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1660         count = le16_to_cpu(head->iih_count);
1661         if (count > 0) {
1662                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1663                 if (*e != 0)
1664                         goto fail;
1665
1666                 --count;
1667                 *b = le32_to_cpu(head->iih_blks[count]);
1668                 head->iih_count = cpu_to_le16(count);
1669                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1670                 if (*e != 0)
1671                         goto fail;
1672
1673                 mutex_unlock(&c->ic_idle_mutex);
1674                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1675                 if (bh == NULL)
1676                         return NULL;
1677                 goto got;
1678         }
1679
1680         /* The block itself which contains the iam_idle_head is
1681          * also an idle block, and can be used as the new node. */
1682         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1683                                 c->ic_descr->id_root_gap +
1684                                 sizeof(struct dx_countlimit));
1685         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1686         if (*e != 0)
1687                 goto fail;
1688
1689         *b = le32_to_cpu(*idle_blocks);
1690         iam_lock_bh(c->ic_root_bh);
1691         *idle_blocks = head->iih_next;
1692         iam_unlock_bh(c->ic_root_bh);
1693         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1694         if (*e != 0) {
1695                 iam_lock_bh(c->ic_root_bh);
1696                 *idle_blocks = cpu_to_le32(*b);
1697                 iam_unlock_bh(c->ic_root_bh);
1698                 goto fail;
1699         }
1700
1701         bh = c->ic_idle_bh;
1702         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1703         if (idle != NULL && IS_ERR(idle)) {
1704                 *e = PTR_ERR(idle);
1705                 c->ic_idle_bh = NULL;
1706                 brelse(bh);
1707                 goto fail;
1708         }
1709
1710         c->ic_idle_bh = idle;
1711         mutex_unlock(&c->ic_idle_mutex);
1712
1713 got:
1714         /* get write access for the found buffer head */
1715         *e = ldiskfs_journal_get_write_access(h, bh);
1716         if (*e != 0) {
1717                 brelse(bh);
1718                 bh = NULL;
1719                 ldiskfs_std_error(inode->i_sb, *e);
1720         } else {
1721                 /* Clear the reused node as new node does. */
1722                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1723                 set_buffer_uptodate(bh);
1724         }
1725         return bh;
1726
1727 newblock:
1728         bh = osd_ldiskfs_append(h, inode, b, e);
1729         return bh;
1730
1731 fail:
1732         mutex_unlock(&c->ic_idle_mutex);
1733         ldiskfs_std_error(inode->i_sb, *e);
1734         return NULL;
1735 }
1736
1737 /*
1738  * Insertion of new record. Interaction with jbd during non-trivial case (when
1739  * split happens) is as following:
1740  *
1741  *  - new leaf node is involved into transaction by iam_new_node();
1742  *
1743  *  - old leaf node is involved into transaction by iam_add_rec();
1744  *
1745  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1746  *
1747  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1748  *  iam_new_leaf();
1749  *
1750  *  - split index nodes are involved into transaction and marked dirty by
1751  *  split_index_node().
1752  *
1753  *  - "safe" index node, which is no split, but where new pointer is inserted
1754  *  is involved into transaction and marked dirty by split_index_node().
1755  *
1756  *  - index node where pointer to new leaf is inserted is involved into
1757  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1758  *
1759  *  - inode is marked dirty by iam_add_rec().
1760  *
1761  */
1762
1763 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1764 {
1765         int err;
1766         iam_ptr_t blknr;
1767         struct buffer_head   *new_leaf;
1768         struct buffer_head   *old_leaf;
1769         struct iam_container *c;
1770         struct inode         *obj;
1771         struct iam_path      *path;
1772
1773         assert_inv(iam_leaf_check(leaf));
1774
1775         c = iam_leaf_container(leaf);
1776         path = leaf->il_path;
1777
1778         obj = c->ic_object;
1779         new_leaf = iam_new_node(handle, c, &blknr, &err);
1780         do_corr(schedule());
1781         if (new_leaf != NULL) {
1782                 struct dynlock_handle *lh;
1783
1784                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1785                 do_corr(schedule());
1786                 if (lh != NULL) {
1787                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1788                         do_corr(schedule());
1789                         old_leaf = leaf->il_bh;
1790                         iam_leaf_split(leaf, &new_leaf, blknr);
1791                         if (old_leaf != leaf->il_bh) {
1792                                 /*
1793                                  * Switched to the new leaf.
1794                                  */
1795                                 iam_leaf_unlock(leaf);
1796                                 leaf->il_lock = lh;
1797                                 path->ip_frame->leaf = blknr;
1798                         } else
1799                                 iam_unlock_htree(path->ip_container, lh);
1800                         do_corr(schedule());
1801                         err = iam_txn_dirty(handle, path, new_leaf);
1802                         brelse(new_leaf);
1803                         if (err == 0)
1804                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1805                         do_corr(schedule());
1806                 } else
1807                         err = -ENOMEM;
1808         }
1809         assert_inv(iam_leaf_check(leaf));
1810         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1811         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1812         return err;
1813 }
1814
1815 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1816 {
1817         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1818 }
1819
1820 static int iam_shift_entries(struct iam_path *path,
1821                          struct iam_frame *frame, unsigned count,
1822                          struct iam_entry *entries, struct iam_entry *entries2,
1823                          u32 newblock)
1824 {
1825         unsigned count1;
1826         unsigned count2;
1827         int delta;
1828
1829         struct iam_frame *parent = frame - 1;
1830         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1831
1832         delta = dx_index_is_compat(path) ? 0 : +1;
1833
1834         count1 = count/2 + delta;
1835         count2 = count - count1;
1836         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1837
1838         dxtrace(printk("Split index %d/%d\n", count1, count2));
1839
1840         memcpy((char *) iam_entry_shift(path, entries2, delta),
1841                (char *) iam_entry_shift(path, entries, count1),
1842                count2 * iam_entry_size(path));
1843
1844         dx_set_count(entries2, count2 + delta);
1845         dx_set_limit(entries2, dx_node_limit(path));
1846
1847         /*
1848          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1849          * level index in root index, then we insert new index here and set
1850          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1851          * index w/o hash it looks for. the solution is to check root index
1852          * after we locked just founded 2nd level index -bzzz
1853          */
1854         iam_insert_key_lock(path, parent, pivot, newblock);
1855
1856         /*
1857          * now old and new 2nd level index blocks contain all pointers, so
1858          * dx_probe() may find it in the both.  it's OK -bzzz
1859          */
1860         iam_lock_bh(frame->bh);
1861         dx_set_count(entries, count1);
1862         iam_unlock_bh(frame->bh);
1863
1864         /*
1865          * now old 2nd level index block points to first half of leafs. it's
1866          * importand that dx_probe() must check root index block for changes
1867          * under dx_lock_bh(frame->bh) -bzzz
1868          */
1869
1870         return count1;
1871 }
1872
1873
1874 int split_index_node(handle_t *handle, struct iam_path *path,
1875                      struct dynlock_handle **lh)
1876 {
1877
1878         struct iam_entry *entries;   /* old block contents */
1879         struct iam_entry *entries2;  /* new block contents */
1880         struct iam_frame *frame, *safe;
1881         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1882         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1883         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1884         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1885         struct inode *dir = iam_path_obj(path);
1886         struct iam_descr *descr;
1887         int nr_splet;
1888         int i, err;
1889
1890         descr = iam_path_descr(path);
1891         /*
1892          * Algorithm below depends on this.
1893          */
1894         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1895
1896         frame = path->ip_frame;
1897         entries = frame->entries;
1898
1899         /*
1900          * Tall-tree handling: we might have to split multiple index blocks
1901          * all the way up to tree root. Tricky point here is error handling:
1902          * to avoid complicated undo/rollback we
1903          *
1904          *   - first allocate all necessary blocks
1905          *
1906          *   - insert pointers into them atomically.
1907          */
1908
1909         /*
1910          * Locking: leaf is already locked. htree-locks are acquired on all
1911          * index nodes that require split bottom-to-top, on the "safe" node,
1912          * and on all new nodes
1913          */
1914
1915         dxtrace(printk("using %u of %u node entries\n",
1916                        dx_get_count(entries), dx_get_limit(entries)));
1917
1918         /* What levels need split? */
1919         for (nr_splet = 0; frame >= path->ip_frames &&
1920              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1921              --frame, ++nr_splet) {
1922                 do_corr(schedule());
1923                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1924                         /*
1925                         CWARN(dir->i_sb, __FUNCTION__,
1926                                      "Directory index full!\n");
1927                                      */
1928                         err = -ENOSPC;
1929                         goto cleanup;
1930                 }
1931         }
1932
1933         safe = frame;
1934
1935         /*
1936          * Lock all nodes, bottom to top.
1937          */
1938         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1939                 do_corr(schedule());
1940                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1941                                          DLT_WRITE);
1942                 if (lock[i] == NULL) {
1943                         err = -ENOMEM;
1944                         goto cleanup;
1945                 }
1946         }
1947
1948         /*
1949          * Check for concurrent index modification.
1950          */
1951         err = iam_check_full_path(path, 1);
1952         if (err)
1953                 goto cleanup;
1954         /*
1955          * And check that the same number of nodes is to be split.
1956          */
1957         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1958              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1959              --frame, ++i) {
1960                 ;
1961         }
1962         if (i != nr_splet) {
1963                 err = -EAGAIN;
1964                 goto cleanup;
1965         }
1966
1967         /* Go back down, allocating blocks, locking them, and adding into
1968          * transaction... */
1969         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1970                 bh_new[i] = iam_new_node(handle, path->ip_container,
1971                                          &newblock[i], &err);
1972                 do_corr(schedule());
1973                 if (!bh_new[i] ||
1974                     descr->id_ops->id_node_init(path->ip_container,
1975                                                 bh_new[i], 0) != 0)
1976                         goto cleanup;
1977                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1978                                              DLT_WRITE);
1979                 if (new_lock[i] == NULL) {
1980                         err = -ENOMEM;
1981                         goto cleanup;
1982                 }
1983                 do_corr(schedule());
1984                 BUFFER_TRACE(frame->bh, "get_write_access");
1985                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1986                 if (err)
1987                         goto journal_error;
1988         }
1989         /* Add "safe" node to transaction too */
1990         if (safe + 1 != path->ip_frames) {
1991                 do_corr(schedule());
1992                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1993                 if (err)
1994                         goto journal_error;
1995         }
1996
1997         /* Go through nodes once more, inserting pointers */
1998         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1999                 unsigned count;
2000                 int idx;
2001                 struct buffer_head *bh2;
2002                 struct buffer_head *bh;
2003
2004                 entries = frame->entries;
2005                 count = dx_get_count(entries);
2006                 idx = iam_entry_diff(path, frame->at, entries);
2007
2008                 bh2 = bh_new[i];
2009                 entries2 = dx_get_entries(path, bh2->b_data, 0);
2010
2011                 bh = frame->bh;
2012                 if (frame == path->ip_frames) {
2013                         /* splitting root node. Tricky point:
2014                          *
2015                          * In the "normal" B-tree we'd split root *and* add
2016                          * new root to the tree with pointers to the old root
2017                          * and its sibling (thus introducing two new nodes).
2018                          *
2019                          * In htree it's enough to add one node, because
2020                          * capacity of the root node is smaller than that of
2021                          * non-root one.
2022                          */
2023                         struct iam_frame *frames;
2024                         struct iam_entry *next;
2025
2026                         assert_corr(i == 0);
2027
2028                         do_corr(schedule());
2029
2030                         frames = path->ip_frames;
2031                         memcpy((char *) entries2, (char *) entries,
2032                                count * iam_entry_size(path));
2033                         dx_set_limit(entries2, dx_node_limit(path));
2034
2035                         /* Set up root */
2036                           iam_lock_bh(frame->bh);
2037                         next = descr->id_ops->id_root_inc(path->ip_container,
2038                                                           path, frame);
2039                         dx_set_block(path, next, newblock[0]);
2040                           iam_unlock_bh(frame->bh);
2041
2042                         do_corr(schedule());
2043                         /* Shift frames in the path */
2044                         memmove(frames + 2, frames + 1,
2045                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2046                         /* Add new access path frame */
2047                         frames[1].at = iam_entry_shift(path, entries2, idx);
2048                         frames[1].entries = entries = entries2;
2049                         frames[1].bh = bh2;
2050                         assert_inv(dx_node_check(path, frame));
2051                         ++ path->ip_frame;
2052                         ++ frame;
2053                         assert_inv(dx_node_check(path, frame));
2054                         bh_new[0] = NULL; /* buffer head is "consumed" */
2055                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2056                         if (err)
2057                                 goto journal_error;
2058                         do_corr(schedule());
2059                 } else {
2060                         /* splitting non-root index node. */
2061                         struct iam_frame *parent = frame - 1;
2062
2063                         do_corr(schedule());
2064                         count = iam_shift_entries(path, frame, count,
2065                                               entries, entries2, newblock[i]);
2066                         /* Which index block gets the new entry? */
2067                         if (idx >= count) {
2068                                 int d = dx_index_is_compat(path) ? 0 : +1;
2069
2070                                 frame->at = iam_entry_shift(path, entries2,
2071                                                             idx - count + d);
2072                                 frame->entries = entries = entries2;
2073                                 frame->curidx = newblock[i];
2074                                 swap(frame->bh, bh2);
2075                                 assert_corr(lock[i + 1] != NULL);
2076                                 assert_corr(new_lock[i] != NULL);
2077                                 swap(lock[i + 1], new_lock[i]);
2078                                 bh_new[i] = bh2;
2079                                 parent->at = iam_entry_shift(path,
2080                                                              parent->at, +1);
2081                         }
2082                         assert_inv(dx_node_check(path, frame));
2083                         assert_inv(dx_node_check(path, parent));
2084                         dxtrace(dx_show_index ("node", frame->entries));
2085                         dxtrace(dx_show_index ("node",
2086                                ((struct dx_node *) bh2->b_data)->entries));
2087                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2088                         if (err)
2089                                 goto journal_error;
2090                         do_corr(schedule());
2091                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2092                                                             parent->bh);
2093                         if (err)
2094                                 goto journal_error;
2095                 }
2096                 do_corr(schedule());
2097                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2098                 if (err)
2099                         goto journal_error;
2100         }
2101                 /*
2102                  * This function was called to make insertion of new leaf
2103                  * possible. Check that it fulfilled its obligations.
2104                  */
2105                 assert_corr(dx_get_count(path->ip_frame->entries) <
2106                             dx_get_limit(path->ip_frame->entries));
2107         assert_corr(lock[nr_splet] != NULL);
2108         *lh = lock[nr_splet];
2109         lock[nr_splet] = NULL;
2110         if (nr_splet > 0) {
2111                 /*
2112                  * Log ->i_size modification.
2113                  */
2114                 err = ldiskfs_mark_inode_dirty(handle, dir);
2115                 if (err)
2116                         goto journal_error;
2117         }
2118         goto cleanup;
2119 journal_error:
2120         ldiskfs_std_error(dir->i_sb, err);
2121
2122 cleanup:
2123         iam_unlock_array(path->ip_container, lock);
2124         iam_unlock_array(path->ip_container, new_lock);
2125
2126         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2127
2128         do_corr(schedule());
2129         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2130                 if (bh_new[i] != NULL)
2131                         brelse(bh_new[i]);
2132         }
2133         return err;
2134 }
2135
2136 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2137                        struct iam_path *path,
2138                        const struct iam_key *k, const struct iam_rec *r)
2139 {
2140         int err;
2141         struct iam_leaf *leaf;
2142
2143         leaf = &path->ip_leaf;
2144         assert_inv(iam_leaf_check(leaf));
2145         assert_inv(iam_path_check(path));
2146         err = iam_txn_add(handle, path, leaf->il_bh);
2147         if (err == 0) {
2148                 do_corr(schedule());
2149                 if (!iam_leaf_can_add(leaf, k, r)) {
2150                         struct dynlock_handle *lh = NULL;
2151
2152                         do {
2153                                 assert_corr(lh == NULL);
2154                                 do_corr(schedule());
2155                                 err = split_index_node(handle, path, &lh);
2156                                 if (err == -EAGAIN) {
2157                                         assert_corr(lh == NULL);
2158
2159                                         iam_path_fini(path);
2160                                         it->ii_state = IAM_IT_DETACHED;
2161
2162                                         do_corr(schedule());
2163                                         err = iam_it_get_exact(it, k);
2164                                         if (err == -ENOENT)
2165                                                 err = +1; /* repeat split */
2166                                         else if (err == 0)
2167                                                 err = -EEXIST;
2168                                 }
2169                         } while (err > 0);
2170                         assert_inv(iam_path_check(path));
2171                         if (err == 0) {
2172                                 assert_corr(lh != NULL);
2173                                 do_corr(schedule());
2174                                 err = iam_new_leaf(handle, leaf);
2175                                 if (err == 0)
2176                                         err = iam_txn_dirty(handle, path,
2177                                                             path->ip_frame->bh);
2178                         }
2179                         iam_unlock_htree(path->ip_container, lh);
2180                         do_corr(schedule());
2181                 }
2182                 if (err == 0) {
2183                         iam_leaf_rec_add(leaf, k, r);
2184                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2185                 }
2186         }
2187         assert_inv(iam_leaf_check(leaf));
2188         assert_inv(iam_leaf_check(&path->ip_leaf));
2189         assert_inv(iam_path_check(path));
2190         return err;
2191 }
2192
2193 /*
2194  * Insert new record with key @k and contents from @r, shifting records to the
2195  * right. On success, iterator is positioned on the newly inserted record.
2196  *
2197  * precondition: it->ii_flags&IAM_IT_WRITE &&
2198  *               (it_state(it) == IAM_IT_ATTACHED ||
2199  *                it_state(it) == IAM_IT_SKEWED) &&
2200  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2201  *                    it_keycmp(it, k) <= 0) &&
2202  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2203  * postcondition: ergo(result == 0,
2204  *                     it_state(it) == IAM_IT_ATTACHED &&
2205  *                     it_keycmp(it, k) == 0 &&
2206  *                     !memcmp(iam_it_rec_get(it), r, ...))
2207  */
2208 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2209                       const struct iam_key *k, const struct iam_rec *r)
2210 {
2211         int result;
2212         struct iam_path *path;
2213
2214         path = &it->ii_path;
2215
2216         assert_corr(it->ii_flags&IAM_IT_WRITE);
2217         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2218                     it_state(it) == IAM_IT_SKEWED);
2219         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2220                          it_keycmp(it, k) <= 0));
2221         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2222         result = iam_add_rec(h, it, path, k, r);
2223         if (result == 0)
2224                 it->ii_state = IAM_IT_ATTACHED;
2225         assert_corr(ergo(result == 0,
2226                          it_state(it) == IAM_IT_ATTACHED &&
2227                          it_keycmp(it, k) == 0));
2228         return result;
2229 }
2230
2231 static inline int iam_idle_blocks_limit(struct inode *inode)
2232 {
2233         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2234 }
2235
2236 /*
2237  * If the leaf cannnot be recycled, we will lose one block for reusing.
2238  * It is not a serious issue because it almost the same of non-recycle.
2239  */
2240 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2241                                   struct iam_leaf *l, struct buffer_head **bh)
2242 {
2243         struct iam_container *c = p->ip_container;
2244         struct inode *inode = c->ic_object;
2245         struct iam_frame *frame = p->ip_frame;
2246         struct iam_entry *entries;
2247         struct iam_entry *pos;
2248         struct dynlock_handle *lh;
2249         int count;
2250         int rc;
2251
2252         if (c->ic_idle_failed)
2253                 return 0;
2254
2255         if (unlikely(frame == NULL))
2256                 return 0;
2257
2258         if (!iam_leaf_empty(l))
2259                 return 0;
2260
2261         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2262         if (lh == NULL) {
2263                 CWARN("%.16s: No memory to recycle idle blocks\n",
2264                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2265                 return 0;
2266         }
2267
2268         rc = iam_txn_add(h, p, frame->bh);
2269         if (rc != 0) {
2270                 iam_unlock_htree(c, lh);
2271                 return 0;
2272         }
2273
2274         iam_lock_bh(frame->bh);
2275         entries = frame->entries;
2276         count = dx_get_count(entries);
2277         /* NOT shrink the last entry in the index node, which can be reused
2278          * directly by next new node. */
2279         if (count == 2) {
2280                 iam_unlock_bh(frame->bh);
2281                 iam_unlock_htree(c, lh);
2282                 return 0;
2283         }
2284
2285         pos = iam_find_position(p, frame);
2286         /* There may be some new leaf nodes have been added or empty leaf nodes
2287          * have been shrinked during my delete operation.
2288          *
2289          * If the empty leaf is not under current index node because the index
2290          * node has been split, then just skip the empty leaf, which is rare. */
2291         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2292                 iam_unlock_bh(frame->bh);
2293                 iam_unlock_htree(c, lh);
2294                 return 0;
2295         }
2296
2297         frame->at = pos;
2298         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2299                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2300
2301                 memmove(frame->at, n,
2302                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2303                 frame->at_shifted = 1;
2304         }
2305         dx_set_count(entries, count - 1);
2306         iam_unlock_bh(frame->bh);
2307         rc = iam_txn_dirty(h, p, frame->bh);
2308         iam_unlock_htree(c, lh);
2309         if (rc != 0)
2310                 return 0;
2311
2312         get_bh(l->il_bh);
2313         *bh = l->il_bh;
2314         return frame->leaf;
2315 }
2316
2317 static int
2318 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2319                         __u32 *idle_blocks, iam_ptr_t blk)
2320 {
2321         struct iam_container *c = p->ip_container;
2322         struct buffer_head *old = c->ic_idle_bh;
2323         struct iam_idle_head *head;
2324         int rc;
2325
2326         head = (struct iam_idle_head *)(bh->b_data);
2327         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2328         head->iih_count = 0;
2329         head->iih_next = *idle_blocks;
2330         /* The bh already get_write_accessed. */
2331         rc = iam_txn_dirty(h, p, bh);
2332         if (rc != 0)
2333                 return rc;
2334
2335         rc = iam_txn_add(h, p, c->ic_root_bh);
2336         if (rc != 0)
2337                 return rc;
2338
2339         iam_lock_bh(c->ic_root_bh);
2340         *idle_blocks = cpu_to_le32(blk);
2341         iam_unlock_bh(c->ic_root_bh);
2342         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2343         if (rc == 0) {
2344                 /* NOT release old before new assigned. */
2345                 get_bh(bh);
2346                 c->ic_idle_bh = bh;
2347                 brelse(old);
2348         } else {
2349                 iam_lock_bh(c->ic_root_bh);
2350                 *idle_blocks = head->iih_next;
2351                 iam_unlock_bh(c->ic_root_bh);
2352         }
2353         return rc;
2354 }
2355
2356 /*
2357  * If the leaf cannnot be recycled, we will lose one block for reusing.
2358  * It is not a serious issue because it almost the same of non-recycle.
2359  */
2360 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2361                              struct buffer_head *bh, iam_ptr_t blk)
2362 {
2363         struct iam_container *c = p->ip_container;
2364         struct inode *inode = c->ic_object;
2365         struct iam_idle_head *head;
2366         __u32 *idle_blocks;
2367         int count;
2368         int rc;
2369
2370         mutex_lock(&c->ic_idle_mutex);
2371         if (unlikely(c->ic_idle_failed)) {
2372                 rc = -EFAULT;
2373                 goto unlock;
2374         }
2375
2376         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2377                                 c->ic_descr->id_root_gap +
2378                                 sizeof(struct dx_countlimit));
2379         /* It is the first idle block. */
2380         if (c->ic_idle_bh == NULL) {
2381                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2382                 goto unlock;
2383         }
2384
2385         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2386         count = le16_to_cpu(head->iih_count);
2387         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2388         if (count == iam_idle_blocks_limit(inode)) {
2389                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2390                 goto unlock;
2391         }
2392
2393         /* Just add to ic_idle_bh. */
2394         rc = iam_txn_add(h, p, c->ic_idle_bh);
2395         if (rc != 0)
2396                 goto unlock;
2397
2398         head->iih_blks[count] = cpu_to_le32(blk);
2399         head->iih_count = cpu_to_le16(count + 1);
2400         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2401
2402 unlock:
2403         mutex_unlock(&c->ic_idle_mutex);
2404         if (rc != 0)
2405                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2406                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2407 }
2408
2409 /*
2410  * Delete record under iterator.
2411  *
2412  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2413  *                it->ii_flags&IAM_IT_WRITE &&
2414  *                it_at_rec(it)
2415  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2416  *                it_state(it) == IAM_IT_DETACHED
2417  */
2418 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2419 {
2420         int result;
2421         struct iam_leaf *leaf;
2422         struct iam_path *path;
2423
2424         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2425                     it->ii_flags&IAM_IT_WRITE);
2426         assert_corr(it_at_rec(it));
2427
2428         path = &it->ii_path;
2429         leaf = &path->ip_leaf;
2430
2431         assert_inv(iam_leaf_check(leaf));
2432         assert_inv(iam_path_check(path));
2433
2434         result = iam_txn_add(h, path, leaf->il_bh);
2435         /*
2436          * no compaction for now.
2437          */
2438         if (result == 0) {
2439                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2440                 result = iam_txn_dirty(h, path, leaf->il_bh);
2441                 if (result == 0 && iam_leaf_at_end(leaf)) {
2442                         struct buffer_head *bh = NULL;
2443                         iam_ptr_t blk;
2444
2445                         blk = iam_index_shrink(h, path, leaf, &bh);
2446                         if (it->ii_flags & IAM_IT_MOVE) {
2447                                 result = iam_it_next(it);
2448                                 if (result > 0)
2449                                         result = 0;
2450                         }
2451
2452                         if (bh != NULL) {
2453                                 iam_recycle_leaf(h, path, bh, blk);
2454                                 brelse(bh);
2455                         }
2456                 }
2457         }
2458         assert_inv(iam_leaf_check(leaf));
2459         assert_inv(iam_path_check(path));
2460         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2461                     it_state(it) == IAM_IT_DETACHED);
2462         return result;
2463 }
2464
2465 /*
2466  * Convert iterator to cookie.
2467  *
2468  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2469  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2470  * postcondition: it_state(it) == IAM_IT_ATTACHED
2471  */
2472 iam_pos_t iam_it_store(const struct iam_iterator *it)
2473 {
2474         iam_pos_t result;
2475
2476         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2477         assert_corr(it_at_rec(it));
2478         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2479                     sizeof result);
2480
2481         result = 0;
2482         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2483 }
2484
2485 /*
2486  * Restore iterator from cookie.
2487  *
2488  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2489  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2490  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2491  *                                  iam_it_store(it) == pos)
2492  */
2493 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2494 {
2495         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2496                     it->ii_flags&IAM_IT_MOVE);
2497         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2498         return iam_it_iget(it, (struct iam_ikey *)&pos);
2499 }
2500
2501 /***********************************************************************/
2502 /* invariants                                                          */
2503 /***********************************************************************/
2504
2505 static inline int ptr_inside(void *base, size_t size, void *ptr)
2506 {
2507         return (base <= ptr) && (ptr < base + size);
2508 }
2509
2510 static int iam_frame_invariant(struct iam_frame *f)
2511 {
2512         return
2513                 (f->bh != NULL &&
2514                 f->bh->b_data != NULL &&
2515                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2516                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2517                 f->entries <= f->at);
2518 }
2519
2520 static int iam_leaf_invariant(struct iam_leaf *l)
2521 {
2522         return
2523                 l->il_bh != NULL &&
2524                 l->il_bh->b_data != NULL &&
2525                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2526                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2527                 l->il_entries <= l->il_at;
2528 }
2529
2530 static int iam_path_invariant(struct iam_path *p)
2531 {
2532         int i;
2533
2534         if (p->ip_container == NULL ||
2535             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2536             p->ip_frame != p->ip_frames + p->ip_indirect ||
2537             !iam_leaf_invariant(&p->ip_leaf))
2538                 return 0;
2539         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2540                 if (i <= p->ip_indirect) {
2541                         if (!iam_frame_invariant(&p->ip_frames[i]))
2542                                 return 0;
2543                 }
2544         }
2545         return 1;
2546 }
2547
2548 int iam_it_invariant(struct iam_iterator *it)
2549 {
2550         return
2551                 (it->ii_state == IAM_IT_DETACHED ||
2552                  it->ii_state == IAM_IT_ATTACHED ||
2553                  it->ii_state == IAM_IT_SKEWED) &&
2554                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2555                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2556                      it->ii_state == IAM_IT_SKEWED,
2557                      iam_path_invariant(&it->ii_path) &&
2558                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2559 }
2560
2561 /*
2562  * Search container @c for record with key @k. If record is found, its data
2563  * are moved into @r.
2564  *
2565  * Return values: 0: found, -ENOENT: not-found, -ve: error
2566  */
2567 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2568                struct iam_rec *r, struct iam_path_descr *pd)
2569 {
2570         struct iam_iterator it;
2571         int result;
2572
2573         iam_it_init(&it, c, 0, pd);
2574
2575         result = iam_it_get_exact(&it, k);
2576         if (result == 0)
2577                 /*
2578                  * record with required key found, copy it into user buffer
2579                  */
2580                 iam_reccpy(&it.ii_path.ip_leaf, r);
2581         iam_it_put(&it);
2582         iam_it_fini(&it);
2583         return result;
2584 }
2585
2586 /*
2587  * Insert new record @r with key @k into container @c (within context of
2588  * transaction @h).
2589  *
2590  * Return values: 0: success, -ve: error, including -EEXIST when record with
2591  * given key is already present.
2592  *
2593  * postcondition: ergo(result == 0 || result == -EEXIST,
2594  *                                  iam_lookup(c, k, r2) > 0;
2595  */
2596 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2597                const struct iam_rec *r, struct iam_path_descr *pd)
2598 {
2599         struct iam_iterator it;
2600         int result;
2601
2602         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2603
2604         result = iam_it_get_exact(&it, k);
2605         if (result == -ENOENT)
2606                 result = iam_it_rec_insert(h, &it, k, r);
2607         else if (result == 0)
2608                 result = -EEXIST;
2609         iam_it_put(&it);
2610         iam_it_fini(&it);
2611         return result;
2612 }
2613
2614 /*
2615  * Update record with the key @k in container @c (within context of
2616  * transaction @h), new record is given by @r.
2617  *
2618  * Return values: +1: skip because of the same rec value, 0: success,
2619  * -ve: error, including -ENOENT if no record with the given key found.
2620  */
2621 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2622                const struct iam_rec *r, struct iam_path_descr *pd)
2623 {
2624         struct iam_iterator it;
2625         struct iam_leaf *folio;
2626         int result;
2627
2628         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2629
2630         result = iam_it_get_exact(&it, k);
2631         if (result == 0) {
2632                 folio = &it.ii_path.ip_leaf;
2633                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2634                 if (result == 0)
2635                         iam_it_rec_set(h, &it, r);
2636                 else
2637                         result = 1;
2638         }
2639         iam_it_put(&it);
2640         iam_it_fini(&it);
2641         return result;
2642 }
2643
2644 /*
2645  * Delete existing record with key @k.
2646  *
2647  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2648  *
2649  * postcondition: ergo(result == 0 || result == -ENOENT,
2650  *                                 !iam_lookup(c, k, *));
2651  */
2652 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2653                struct iam_path_descr *pd)
2654 {
2655         struct iam_iterator it;
2656         int result;
2657
2658         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2659
2660         result = iam_it_get_exact(&it, k);
2661         if (result == 0)
2662                 iam_it_rec_delete(h, &it);
2663         iam_it_put(&it);
2664         iam_it_fini(&it);
2665         return result;
2666 }
2667
2668 int iam_root_limit(int rootgap, int blocksize, int size)
2669 {
2670         int limit;
2671         int nlimit;
2672
2673         limit = (blocksize - rootgap) / size;
2674         nlimit = blocksize / size;
2675         if (limit == nlimit)
2676                 limit--;
2677         return limit;
2678 }