Whamcloud - gitweb
LU-1818 quota: en/disable quota enforcement via conf_param
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150 #include "osd_internal.h"
151
152 #include "xattr.h"
153 #include "acl.h"
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static CFS_LIST_HEAD(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         cfs_list_add(&fmt->if_linkage, &iam_formats);
165 }
166 EXPORT_SYMBOL(iam_format_register);
167
168 static struct buffer_head *
169 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
170 {
171         struct inode *inode = c->ic_object;
172         struct iam_idle_head *head;
173         struct buffer_head *bh;
174         int err;
175
176         LASSERT_SEM_LOCKED(&c->ic_idle_sem);
177
178         if (blk == 0)
179                 return NULL;
180
181         bh = ldiskfs_bread(NULL, inode, blk, 0, &err);
182         if (bh == NULL) {
183                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %d\n",
184                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk, err);
185                 c->ic_idle_failed = 1;
186                 return ERR_PTR(err);
187         }
188
189         head = (struct iam_idle_head *)(bh->b_data);
190         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
191                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
192                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
193                        le16_to_cpu(head->iih_magic));
194                 brelse(bh);
195                 c->ic_idle_failed = 1;
196                 return ERR_PTR(-EBADF);
197         }
198
199         return bh;
200 }
201
202 /*
203  * Determine format of given container. This is done by scanning list of
204  * registered formats and calling ->if_guess() method of each in turn.
205  */
206 static int iam_format_guess(struct iam_container *c)
207 {
208         int result;
209         struct iam_format *fmt;
210
211         /*
212          * XXX temporary initialization hook.
213          */
214         {
215                 static int initialized = 0;
216
217                 if (!initialized) {
218                         iam_lvar_format_init();
219                         iam_lfix_format_init();
220                         initialized = 1;
221                 }
222         }
223
224         result = -ENOENT;
225         cfs_list_for_each_entry(fmt, &iam_formats, if_linkage) {
226                 result = fmt->if_guess(c);
227                 if (result == 0)
228                         break;
229         }
230
231         if (result == 0) {
232                 struct buffer_head *bh;
233                 __u32 *idle_blocks;
234
235                 LASSERT(c->ic_root_bh != NULL);
236
237                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
238                                         c->ic_descr->id_root_gap +
239                                         sizeof(struct dx_countlimit));
240                 cfs_down(&c->ic_idle_sem);
241                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
242                 if (bh != NULL && IS_ERR(bh))
243                         result = PTR_ERR(bh);
244                 else
245                         c->ic_idle_bh = bh;
246                 cfs_up(&c->ic_idle_sem);
247         }
248
249         return result;
250 }
251
252 /*
253  * Initialize container @c.
254  */
255 int iam_container_init(struct iam_container *c,
256                        struct iam_descr *descr, struct inode *inode)
257 {
258         memset(c, 0, sizeof *c);
259         c->ic_descr  = descr;
260         c->ic_object = inode;
261         cfs_init_rwsem(&c->ic_sem);
262         dynlock_init(&c->ic_tree_lock);
263         cfs_sema_init(&c->ic_idle_sem, 1);
264         return 0;
265 }
266 EXPORT_SYMBOL(iam_container_init);
267
268 /*
269  * Determine container format.
270  */
271 int iam_container_setup(struct iam_container *c)
272 {
273         return iam_format_guess(c);
274 }
275 EXPORT_SYMBOL(iam_container_setup);
276
277 /*
278  * Finalize container @c, release all resources.
279  */
280 void iam_container_fini(struct iam_container *c)
281 {
282         brelse(c->ic_idle_bh);
283         c->ic_idle_bh = NULL;
284         brelse(c->ic_root_bh);
285         c->ic_root_bh = NULL;
286 }
287 EXPORT_SYMBOL(iam_container_fini);
288
289 void iam_path_init(struct iam_path *path, struct iam_container *c,
290                    struct iam_path_descr *pd)
291 {
292         memset(path, 0, sizeof *path);
293         path->ip_container = c;
294         path->ip_frame = path->ip_frames;
295         path->ip_data = pd;
296         path->ip_leaf.il_path = path;
297 }
298
299 static void iam_leaf_fini(struct iam_leaf *leaf);
300
301 void iam_path_release(struct iam_path *path)
302 {
303         int i;
304
305         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
306                 if (path->ip_frames[i].bh != NULL) {
307                         brelse(path->ip_frames[i].bh);
308                         path->ip_frames[i].bh = NULL;
309                 }
310         }
311 }
312
313 void iam_path_fini(struct iam_path *path)
314 {
315         iam_leaf_fini(&path->ip_leaf);
316         iam_path_release(path);
317 }
318
319
320 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
321 {
322         int i;
323
324         path->ipc_hinfo = &path->ipc_hinfo_area;
325         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
326                 path->ipc_descr.ipd_key_scratch[i] =
327                         (struct iam_ikey *)&path->ipc_scratch[i];
328
329         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
330 }
331
332 void iam_path_compat_fini(struct iam_path_compat *path)
333 {
334         iam_path_fini(&path->ipc_path);
335 }
336
337 /*
338  * Helper function initializing iam_path_descr and its key scratch area.
339  */
340 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
341 {
342         struct iam_path_descr *ipd;
343         void *karea;
344         int i;
345
346         ipd = area;
347         karea = ipd + 1;
348         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
349                 ipd->ipd_key_scratch[i] = karea;
350         return ipd;
351 }
352 EXPORT_SYMBOL(iam_ipd_alloc);
353
354 void iam_ipd_free(struct iam_path_descr *ipd)
355 {
356 }
357 EXPORT_SYMBOL(iam_ipd_free);
358
359 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
360                   handle_t *h, struct buffer_head **bh)
361 {
362         int result = 0;
363
364         /* NB: it can be called by iam_lfix_guess() which is still at
365          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
366          * haven't been intialized yet.
367          * Also, we don't have this for IAM dir.
368          */
369         if (c->ic_root_bh != NULL &&
370             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
371                 get_bh(c->ic_root_bh);
372                 *bh = c->ic_root_bh;
373                 return 0;
374         }
375
376         *bh = ldiskfs_bread(h, c->ic_object, (int)ptr, 0, &result);
377         if (*bh == NULL)
378                 result = -EIO;
379         return result;
380 }
381
382 /*
383  * Return pointer to current leaf record. Pointer is valid while corresponding
384  * leaf node is locked and pinned.
385  */
386 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
387 {
388         return iam_leaf_ops(leaf)->rec(leaf);
389 }
390
391 /*
392  * Return pointer to the current leaf key. This function returns pointer to
393  * the key stored in node.
394  *
395  * Caller should assume that returned pointer is only valid while leaf node is
396  * pinned and locked.
397  */
398 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
399 {
400         return iam_leaf_ops(leaf)->key(leaf);
401 }
402
403 static int iam_leaf_key_size(const struct iam_leaf *leaf)
404 {
405         return iam_leaf_ops(leaf)->key_size(leaf);
406 }
407
408 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
409                                       struct iam_ikey *key)
410 {
411         return iam_leaf_ops(leaf)->ikey(leaf, key);
412 }
413
414 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
415                            const struct iam_key *key)
416 {
417         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
418 }
419
420 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
421                           const struct iam_key *key)
422 {
423         return iam_leaf_ops(leaf)->key_eq(leaf, key);
424 }
425
426 #if LDISKFS_INVARIANT_ON
427 static int iam_leaf_check(struct iam_leaf *leaf);
428 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
429
430 static int iam_path_check(struct iam_path *p)
431 {
432         int i;
433         int result;
434         struct iam_frame *f;
435         struct iam_descr *param;
436
437         result = 1;
438         param = iam_path_descr(p);
439         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
440                 f = &p->ip_frames[i];
441                 if (f->bh != NULL) {
442                         result = dx_node_check(p, f);
443                         if (result)
444                                 result = !param->id_ops->id_node_check(p, f);
445                 }
446         }
447         if (result && p->ip_leaf.il_bh != NULL)
448                 result = iam_leaf_check(&p->ip_leaf);
449         if (result == 0) {
450                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
451         }
452         return result;
453 }
454 #endif
455
456 static int iam_leaf_load(struct iam_path *path)
457 {
458         iam_ptr_t block;
459         int err;
460         struct iam_container *c;
461         struct buffer_head   *bh;
462         struct iam_leaf      *leaf;
463         struct iam_descr     *descr;
464
465         c     = path->ip_container;
466         leaf  = &path->ip_leaf;
467         descr = iam_path_descr(path);
468         block = path->ip_frame->leaf;
469         if (block == 0) {
470                 /* XXX bug 11027 */
471                 printk(CFS_KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
472                        (long unsigned)path->ip_frame->leaf,
473                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
474                        path->ip_frames[0].bh, path->ip_frames[1].bh,
475                        path->ip_frames[2].bh);
476         }
477         err   = descr->id_ops->id_node_read(c, block, NULL, &bh);
478         if (err == 0) {
479                 leaf->il_bh = bh;
480                 leaf->il_curidx = block;
481                 err = iam_leaf_ops(leaf)->init(leaf);
482                 assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
483         }
484         return err;
485 }
486
487 static void iam_unlock_htree(struct iam_container *ic,
488                              struct dynlock_handle *lh)
489 {
490         if (lh != NULL)
491                 dynlock_unlock(&ic->ic_tree_lock, lh);
492 }
493
494
495 static void iam_leaf_unlock(struct iam_leaf *leaf)
496 {
497         if (leaf->il_lock != NULL) {
498                 iam_unlock_htree(iam_leaf_container(leaf),
499                                  leaf->il_lock);
500                 do_corr(schedule());
501                 leaf->il_lock = NULL;
502         }
503 }
504
505 static void iam_leaf_fini(struct iam_leaf *leaf)
506 {
507         if (leaf->il_path != NULL) {
508                 iam_leaf_unlock(leaf);
509                 assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
510                 iam_leaf_ops(leaf)->fini(leaf);
511                 if (leaf->il_bh) {
512                         brelse(leaf->il_bh);
513                         leaf->il_bh = NULL;
514                         leaf->il_curidx = 0;
515                 }
516         }
517 }
518
519 static void iam_leaf_start(struct iam_leaf *folio)
520 {
521         iam_leaf_ops(folio)->start(folio);
522 }
523
524 void iam_leaf_next(struct iam_leaf *folio)
525 {
526         iam_leaf_ops(folio)->next(folio);
527 }
528
529 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
530                              const struct iam_rec *rec)
531 {
532         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
533 }
534
535 static void iam_rec_del(struct iam_leaf *leaf, int shift)
536 {
537         iam_leaf_ops(leaf)->rec_del(leaf, shift);
538 }
539
540 int iam_leaf_at_end(const struct iam_leaf *leaf)
541 {
542         return iam_leaf_ops(leaf)->at_end(leaf);
543 }
544
545 void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh, iam_ptr_t nr)
546 {
547         iam_leaf_ops(l)->split(l, bh, nr);
548 }
549
550 static inline int iam_leaf_empty(struct iam_leaf *l)
551 {
552         return iam_leaf_ops(l)->leaf_empty(l);
553 }
554
555 int iam_leaf_can_add(const struct iam_leaf *l,
556                      const struct iam_key *k, const struct iam_rec *r)
557 {
558         return iam_leaf_ops(l)->can_add(l, k, r);
559 }
560
561 #if LDISKFS_INVARIANT_ON
562 static int iam_leaf_check(struct iam_leaf *leaf)
563 {
564         return 1;
565 #if 0
566         struct iam_lentry    *orig;
567         struct iam_path      *path;
568         struct iam_container *bag;
569         struct iam_ikey       *k0;
570         struct iam_ikey       *k1;
571         int result;
572         int first;
573
574         orig = leaf->il_at;
575         path = iam_leaf_path(leaf);
576         bag  = iam_leaf_container(leaf);
577
578         result = iam_leaf_ops(leaf)->init(leaf);
579         if (result != 0)
580                 return result;
581
582         first = 1;
583         iam_leaf_start(leaf);
584         k0 = iam_path_ikey(path, 0);
585         k1 = iam_path_ikey(path, 1);
586         while (!iam_leaf_at_end(leaf)) {
587                 iam_ikeycpy(bag, k0, k1);
588                 iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
589                 if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
590                         return 0;
591                 }
592                 first = 0;
593                 iam_leaf_next(leaf);
594         }
595         leaf->il_at = orig;
596         return 1;
597 #endif
598 }
599 #endif
600
601 static int iam_txn_dirty(handle_t *handle,
602                          struct iam_path *path, struct buffer_head *bh)
603 {
604         int result;
605
606         result = ldiskfs_journal_dirty_metadata(handle, bh);
607         if (result != 0)
608                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
609         return result;
610 }
611
612 static int iam_txn_add(handle_t *handle,
613                        struct iam_path *path, struct buffer_head *bh)
614 {
615         int result;
616
617         result = ldiskfs_journal_get_write_access(handle, bh);
618         if (result != 0)
619                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
620         return result;
621 }
622
623 /***********************************************************************/
624 /* iterator interface                                                  */
625 /***********************************************************************/
626
627 static enum iam_it_state it_state(const struct iam_iterator *it)
628 {
629         return it->ii_state;
630 }
631
632 /*
633  * Helper function returning scratch key.
634  */
635 static struct iam_container *iam_it_container(const struct iam_iterator *it)
636 {
637         return it->ii_path.ip_container;
638 }
639
640 static inline int it_keycmp(const struct iam_iterator *it,
641                             const struct iam_key *k)
642 {
643         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
644 }
645
646 static inline int it_keyeq(const struct iam_iterator *it,
647                            const struct iam_key *k)
648 {
649         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
650 }
651
652 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
653 {
654         return iam_ikeycmp(it->ii_path.ip_container,
655                            iam_leaf_ikey(&it->ii_path.ip_leaf,
656                                          iam_path_ikey(&it->ii_path, 0)), ik);
657 }
658
659 static inline int it_at_rec(const struct iam_iterator *it)
660 {
661         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
662 }
663
664 static inline int it_before(const struct iam_iterator *it)
665 {
666         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
667 }
668
669 /*
670  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
671  * with exactly the same key as asked is found.
672  */
673 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
674 {
675         int result;
676
677         result = iam_it_get(it, k);
678         if (result > 0)
679                 result = 0;
680         else if (result == 0)
681                 /*
682                  * Return -ENOENT if cursor is located above record with a key
683                  * different from one specified, or in the empty leaf.
684                  *
685                  * XXX returning -ENOENT only works if iam_it_get() never
686                  * returns -ENOENT as a legitimate error.
687                  */
688                 result = -ENOENT;
689         return result;
690 }
691
692 void iam_container_write_lock(struct iam_container *ic)
693 {
694         cfs_down_write(&ic->ic_sem);
695 }
696
697 void iam_container_write_unlock(struct iam_container *ic)
698 {
699         cfs_up_write(&ic->ic_sem);
700 }
701
702 void iam_container_read_lock(struct iam_container *ic)
703 {
704         cfs_down_read(&ic->ic_sem);
705 }
706
707 void iam_container_read_unlock(struct iam_container *ic)
708 {
709         cfs_up_read(&ic->ic_sem);
710 }
711
712 /*
713  * Initialize iterator to IAM_IT_DETACHED state.
714  *
715  * postcondition: it_state(it) == IAM_IT_DETACHED
716  */
717 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
718                  struct iam_path_descr *pd)
719 {
720         memset(it, 0, sizeof *it);
721         it->ii_flags  = flags;
722         it->ii_state  = IAM_IT_DETACHED;
723         iam_path_init(&it->ii_path, c, pd);
724         return 0;
725 }
726 EXPORT_SYMBOL(iam_it_init);
727
728 /*
729  * Finalize iterator and release all resources.
730  *
731  * precondition: it_state(it) == IAM_IT_DETACHED
732  */
733 void iam_it_fini(struct iam_iterator *it)
734 {
735         assert_corr(it_state(it) == IAM_IT_DETACHED);
736         iam_path_fini(&it->ii_path);
737 }
738 EXPORT_SYMBOL(iam_it_fini);
739
740 /*
741  * this locking primitives are used to protect parts
742  * of dir's htree. protection unit is block: leaf or index
743  */
744 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
745                                              unsigned long value,
746                                              enum dynlock_type lt)
747 {
748         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
749 }
750
751 int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
752 {
753         struct iam_frame *f;
754
755         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
756                 do_corr(schedule());
757                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
758                 if (*lh == NULL)
759                         return -ENOMEM;
760         }
761         return 0;
762 }
763
764 /*
765  * Fast check for frame consistency.
766  */
767 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
768 {
769         struct iam_container *bag;
770         struct iam_entry *next;
771         struct iam_entry *last;
772         struct iam_entry *entries;
773         struct iam_entry *at;
774
775         bag     = path->ip_container;
776         at      = frame->at;
777         entries = frame->entries;
778         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
779
780         if (unlikely(at > last))
781                 return -EAGAIN;
782
783         if (unlikely(dx_get_block(path, at) != frame->leaf))
784                 return -EAGAIN;
785
786         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
787                                  path->ip_ikey_target) > 0))
788                 return -EAGAIN;
789
790         next = iam_entry_shift(path, at, +1);
791         if (next <= last) {
792                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
793                                          path->ip_ikey_target) <= 0))
794                         return -EAGAIN;
795         }
796         return 0;
797 }
798
799 int dx_index_is_compat(struct iam_path *path)
800 {
801         return iam_path_descr(path) == NULL;
802 }
803
804 /*
805  * dx_find_position
806  *
807  * search position of specified hash in index
808  *
809  */
810
811 struct iam_entry *iam_find_position(struct iam_path *path,
812                                    struct iam_frame *frame)
813 {
814         int count;
815         struct iam_entry *p;
816         struct iam_entry *q;
817         struct iam_entry *m;
818
819         count = dx_get_count(frame->entries);
820         assert_corr(count && count <= dx_get_limit(frame->entries));
821         p = iam_entry_shift(path, frame->entries,
822                             dx_index_is_compat(path) ? 1 : 2);
823         q = iam_entry_shift(path, frame->entries, count - 1);
824         while (p <= q) {
825                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
826                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
827                                 path->ip_ikey_target) > 0)
828                         q = iam_entry_shift(path, m, -1);
829                 else
830                         p = iam_entry_shift(path, m, +1);
831         }
832         return iam_entry_shift(path, p, -1);
833 }
834
835
836
837 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
838 {
839         return dx_get_block(path, iam_find_position(path, frame));
840 }
841
842 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
843                     const struct iam_ikey *key, iam_ptr_t ptr)
844 {
845         struct iam_entry *entries = frame->entries;
846         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
847         int count = dx_get_count(entries);
848
849         /*
850          * Unfortunately we cannot assert this, as this function is sometimes
851          * called by VFS under i_sem and without pdirops lock.
852          */
853         assert_corr(1 || iam_frame_is_locked(path, frame));
854         assert_corr(count < dx_get_limit(entries));
855         assert_corr(frame->at < iam_entry_shift(path, entries, count));
856         assert_inv(dx_node_check(path, frame));
857
858         memmove(iam_entry_shift(path, new, 1), new,
859                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
860         dx_set_ikey(path, new, key);
861         dx_set_block(path, new, ptr);
862         dx_set_count(entries, count + 1);
863         assert_inv(dx_node_check(path, frame));
864 }
865
866 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
867                          const struct iam_ikey *key, iam_ptr_t ptr)
868 {
869         iam_lock_bh(frame->bh);
870         iam_insert_key(path, frame, key, ptr);
871         iam_unlock_bh(frame->bh);
872 }
873 /*
874  * returns 0 if path was unchanged, -EAGAIN otherwise.
875  */
876 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
877 {
878         int equal;
879
880         iam_lock_bh(frame->bh);
881         equal = iam_check_fast(path, frame) == 0 ||
882                 frame->leaf == iam_find_ptr(path, frame);
883         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
884         iam_unlock_bh(frame->bh);
885
886         return equal ? 0 : -EAGAIN;
887 }
888
889 static int iam_lookup_try(struct iam_path *path)
890 {
891         u32 ptr;
892         int err = 0;
893         int i;
894
895         struct iam_descr *param;
896         struct iam_frame *frame;
897         struct iam_container *c;
898
899         param = iam_path_descr(path);
900         c = path->ip_container;
901
902         ptr = param->id_ops->id_root_ptr(c);
903         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
904              ++frame, ++i) {
905                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
906                                                   &frame->bh);
907                 do_corr(schedule());
908
909                 iam_lock_bh(frame->bh);
910                 /*
911                  * node must be initialized under bh lock because concurrent
912                  * creation procedure may change it and iam_lookup_try() will
913                  * see obsolete tree height. -bzzz
914                  */
915                 if (err != 0)
916                         break;
917
918                 if (LDISKFS_INVARIANT_ON) {
919                         err = param->id_ops->id_node_check(path, frame);
920                         if (err != 0)
921                                 break;
922                 }
923
924                 err = param->id_ops->id_node_load(path, frame);
925                 if (err != 0)
926                         break;
927
928                 assert_inv(dx_node_check(path, frame));
929                 /*
930                  * splitting may change root index block and move hash we're
931                  * looking for into another index block so, we have to check
932                  * this situation and repeat from begining if path got changed
933                  * -bzzz
934                  */
935                 if (i > 0) {
936                         err = iam_check_path(path, frame - 1);
937                         if (err != 0)
938                                 break;
939                 }
940
941                 frame->at = iam_find_position(path, frame);
942                 frame->curidx = ptr;
943                 frame->leaf = ptr = dx_get_block(path, frame->at);
944
945                 iam_unlock_bh(frame->bh);
946                 do_corr(schedule());
947         }
948         if (err != 0)
949                 iam_unlock_bh(frame->bh);
950         path->ip_frame = --frame;
951         return err;
952 }
953
954 static int __iam_path_lookup(struct iam_path *path)
955 {
956         int err;
957         int i;
958
959         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
960                 assert(path->ip_frames[i].bh == NULL);
961
962         do {
963                 err = iam_lookup_try(path);
964                 do_corr(schedule());
965                 if (err != 0)
966                         iam_path_fini(path);
967         } while (err == -EAGAIN);
968
969         return err;
970 }
971
972 /*
973  * returns 0 if path was unchanged, -EAGAIN otherwise.
974  */
975 static int iam_check_full_path(struct iam_path *path, int search)
976 {
977         struct iam_frame *bottom;
978         struct iam_frame *scan;
979         int i;
980         int result;
981
982         do_corr(schedule());
983
984         for (bottom = path->ip_frames, i = 0;
985              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
986                 ; /* find last filled in frame */
987         }
988
989         /*
990          * Lock frames, bottom to top.
991          */
992         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
993                 iam_lock_bh(scan->bh);
994         /*
995          * Check them top to bottom.
996          */
997         result = 0;
998         for (scan = path->ip_frames; scan < bottom; ++scan) {
999                 struct iam_entry *pos;
1000
1001                 if (search) {
1002                         if (iam_check_fast(path, scan) == 0)
1003                                 continue;
1004
1005                         pos = iam_find_position(path, scan);
1006                         if (scan->leaf != dx_get_block(path, pos)) {
1007                                 result = -EAGAIN;
1008                                 break;
1009                         }
1010                         scan->at = pos;
1011                 } else {
1012                         pos = iam_entry_shift(path, scan->entries,
1013                                               dx_get_count(scan->entries) - 1);
1014                         if (scan->at > pos ||
1015                             scan->leaf != dx_get_block(path, scan->at)) {
1016                                 result = -EAGAIN;
1017                                 break;
1018                         }
1019                 }
1020         }
1021
1022         /*
1023          * Unlock top to bottom.
1024          */
1025         for (scan = path->ip_frames; scan < bottom; ++scan)
1026                 iam_unlock_bh(scan->bh);
1027         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
1028         do_corr(schedule());
1029
1030         return result;
1031 }
1032
1033
1034 /*
1035  * Performs path lookup and returns with found leaf (if any) locked by htree
1036  * lock.
1037  */
1038 int iam_lookup_lock(struct iam_path *path,
1039                    struct dynlock_handle **dl, enum dynlock_type lt)
1040 {
1041         int result;
1042         struct inode *dir;
1043
1044         dir = iam_path_obj(path);
1045         while ((result = __iam_path_lookup(path)) == 0) {
1046                 do_corr(schedule());
1047                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1048                                      lt);
1049                 if (*dl == NULL) {
1050                         iam_path_fini(path);
1051                         result = -ENOMEM;
1052                         break;
1053                 }
1054                 do_corr(schedule());
1055                 /*
1056                  * while locking leaf we just found may get split so we need
1057                  * to check this -bzzz
1058                  */
1059                 if (iam_check_full_path(path, 1) == 0)
1060                         break;
1061                 iam_unlock_htree(path->ip_container, *dl);
1062                 *dl = NULL;
1063                 iam_path_fini(path);
1064         }
1065         return result;
1066 }
1067 /*
1068  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1069  * node.
1070  */
1071 static int iam_path_lookup(struct iam_path *path, int index)
1072 {
1073         struct iam_container *c;
1074         struct iam_descr *descr;
1075         struct iam_leaf  *leaf;
1076         int result;
1077
1078         c = path->ip_container;
1079         leaf = &path->ip_leaf;
1080         descr = iam_path_descr(path);
1081         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1082         assert_inv(iam_path_check(path));
1083         do_corr(schedule());
1084         if (result == 0) {
1085                 result = iam_leaf_load(path);
1086                 assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
1087                 if (result == 0) {
1088                         do_corr(schedule());
1089                         if (index)
1090                                 result = iam_leaf_ops(leaf)->
1091                                         ilookup(leaf, path->ip_ikey_target);
1092                         else
1093                                 result = iam_leaf_ops(leaf)->
1094                                         lookup(leaf, path->ip_key_target);
1095                         do_corr(schedule());
1096                 }
1097                 if (result < 0)
1098                         iam_leaf_unlock(leaf);
1099         }
1100         return result;
1101 }
1102
1103 /*
1104  * Common part of iam_it_{i,}get().
1105  */
1106 static int __iam_it_get(struct iam_iterator *it, int index)
1107 {
1108         int result;
1109         assert_corr(it_state(it) == IAM_IT_DETACHED);
1110
1111         result = iam_path_lookup(&it->ii_path, index);
1112         if (result >= 0) {
1113                 int collision;
1114
1115                 collision = result & IAM_LOOKUP_LAST;
1116                 switch (result & ~IAM_LOOKUP_LAST) {
1117                 case IAM_LOOKUP_EXACT:
1118                         result = +1;
1119                         it->ii_state = IAM_IT_ATTACHED;
1120                         break;
1121                 case IAM_LOOKUP_OK:
1122                         result = 0;
1123                         it->ii_state = IAM_IT_ATTACHED;
1124                         break;
1125                 case IAM_LOOKUP_BEFORE:
1126                 case IAM_LOOKUP_EMPTY:
1127                         result = 0;
1128                         it->ii_state = IAM_IT_SKEWED;
1129                         break;
1130                 default:
1131                         assert(0);
1132                 }
1133                 result |= collision;
1134         }
1135         /*
1136          * See iam_it_get_exact() for explanation.
1137          */
1138         assert_corr(result != -ENOENT);
1139         return result;
1140 }
1141
1142 /*
1143  * Correct hash, but not the same key was found, iterate through hash
1144  * collision chain, looking for correct record.
1145  */
1146 static int iam_it_collision(struct iam_iterator *it)
1147 {
1148         int result;
1149
1150         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1151
1152         while ((result = iam_it_next(it)) == 0) {
1153                 do_corr(schedule());
1154                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1155                         return -ENOENT;
1156                 if (it_keyeq(it, it->ii_path.ip_key_target))
1157                         return 0;
1158         }
1159         return result;
1160 }
1161
1162 /*
1163  * Attach iterator. After successful completion, @it points to record with
1164  * least key not larger than @k.
1165  *
1166  * Return value: 0: positioned on existing record,
1167  *             +ve: exact position found,
1168  *             -ve: error.
1169  *
1170  * precondition:  it_state(it) == IAM_IT_DETACHED
1171  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1172  *                     it_keycmp(it, k) <= 0)
1173  */
1174 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1175 {
1176         int result;
1177         assert_corr(it_state(it) == IAM_IT_DETACHED);
1178
1179         it->ii_path.ip_ikey_target = NULL;
1180         it->ii_path.ip_key_target  = k;
1181
1182         result = __iam_it_get(it, 0);
1183
1184         if (result == IAM_LOOKUP_LAST) {
1185                 result = iam_it_collision(it);
1186                 if (result != 0) {
1187                         iam_it_put(it);
1188                         iam_it_fini(it);
1189                         result = __iam_it_get(it, 0);
1190                 } else
1191                         result = +1;
1192         }
1193         if (result > 0)
1194                 result &= ~IAM_LOOKUP_LAST;
1195
1196         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1197         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1198                          it_keycmp(it, k) <= 0));
1199         return result;
1200 }
1201 EXPORT_SYMBOL(iam_it_get);
1202
1203 /*
1204  * Attach iterator by index key.
1205  */
1206 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1207 {
1208         assert_corr(it_state(it) == IAM_IT_DETACHED);
1209
1210         it->ii_path.ip_ikey_target = k;
1211         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1212 }
1213
1214 /*
1215  * Attach iterator, and assure it points to the record (not skewed).
1216  *
1217  * Return value: 0: positioned on existing record,
1218  *             +ve: exact position found,
1219  *             -ve: error.
1220  *
1221  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1222  *                !(it->ii_flags&IAM_IT_WRITE)
1223  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1224  */
1225 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1226 {
1227         int result;
1228         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1229                     !(it->ii_flags&IAM_IT_WRITE));
1230         result = iam_it_get(it, k);
1231         if (result == 0) {
1232                 if (it_state(it) != IAM_IT_ATTACHED) {
1233                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1234                         result = iam_it_next(it);
1235                 }
1236         }
1237         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1238         return result;
1239 }
1240 EXPORT_SYMBOL(iam_it_get_at);
1241
1242 /*
1243  * Duplicates iterator.
1244  *
1245  * postcondition: it_state(dst) == it_state(src) &&
1246  *                iam_it_container(dst) == iam_it_container(src) &&
1247  *                dst->ii_flags = src->ii_flags &&
1248  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1249  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1250  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1251  */
1252 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1253 {
1254         dst->ii_flags     = src->ii_flags;
1255         dst->ii_state     = src->ii_state;
1256         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1257         /*
1258          * XXX: duplicate lock.
1259          */
1260         assert_corr(it_state(dst) == it_state(src));
1261         assert_corr(iam_it_container(dst) == iam_it_container(src));
1262         assert_corr(dst->ii_flags = src->ii_flags);
1263         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1264                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1265                     iam_it_key_get(dst) == iam_it_key_get(src)));
1266
1267 }
1268
1269 /*
1270  * Detach iterator. Does nothing it detached state.
1271  *
1272  * postcondition: it_state(it) == IAM_IT_DETACHED
1273  */
1274 void iam_it_put(struct iam_iterator *it)
1275 {
1276         if (it->ii_state != IAM_IT_DETACHED) {
1277                 it->ii_state = IAM_IT_DETACHED;
1278                 iam_leaf_fini(&it->ii_path.ip_leaf);
1279         }
1280 }
1281 EXPORT_SYMBOL(iam_it_put);
1282
1283 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1284                                         struct iam_ikey *ikey);
1285
1286
1287 /*
1288  * This function increments the frame pointer to search the next leaf
1289  * block, and reads in the necessary intervening nodes if the search
1290  * should be necessary.  Whether or not the search is necessary is
1291  * controlled by the hash parameter.  If the hash value is even, then
1292  * the search is only continued if the next block starts with that
1293  * hash value.  This is used if we are searching for a specific file.
1294  *
1295  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1296  *
1297  * This function returns 1 if the caller should continue to search,
1298  * or 0 if it should not.  If there is an error reading one of the
1299  * index blocks, it will a negative error code.
1300  *
1301  * If start_hash is non-null, it will be filled in with the starting
1302  * hash of the next page.
1303  */
1304 static int iam_htree_advance(struct inode *dir, __u32 hash,
1305                               struct iam_path *path, __u32 *start_hash,
1306                               int compat)
1307 {
1308         struct iam_frame *p;
1309         struct buffer_head *bh;
1310         int err, num_frames = 0;
1311         __u32 bhash;
1312
1313         p = path->ip_frame;
1314         /*
1315          * Find the next leaf page by incrementing the frame pointer.
1316          * If we run out of entries in the interior node, loop around and
1317          * increment pointer in the parent node.  When we break out of
1318          * this loop, num_frames indicates the number of interior
1319          * nodes need to be read.
1320          */
1321         while (1) {
1322                 do_corr(schedule());
1323                 iam_lock_bh(p->bh);
1324                 if (p->at_shifted)
1325                         p->at_shifted = 0;
1326                 else
1327                         p->at = iam_entry_shift(path, p->at, +1);
1328                 if (p->at < iam_entry_shift(path, p->entries,
1329                                             dx_get_count(p->entries))) {
1330                         p->leaf = dx_get_block(path, p->at);
1331                         iam_unlock_bh(p->bh);
1332                         break;
1333                 }
1334                 iam_unlock_bh(p->bh);
1335                 if (p == path->ip_frames)
1336                         return 0;
1337                 num_frames++;
1338                 --p;
1339         }
1340
1341         if (compat) {
1342                 /*
1343                  * Htree hash magic.
1344                  */
1345         /*
1346          * If the hash is 1, then continue only if the next page has a
1347          * continuation hash of any value.  This is used for readdir
1348          * handling.  Otherwise, check to see if the hash matches the
1349          * desired contiuation hash.  If it doesn't, return since
1350          * there's no point to read in the successive index pages.
1351          */
1352                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1353         if (start_hash)
1354                 *start_hash = bhash;
1355         if ((hash & 1) == 0) {
1356                 if ((bhash & ~1) != hash)
1357                         return 0;
1358         }
1359         }
1360         /*
1361          * If the hash is HASH_NB_ALWAYS, we always go to the next
1362          * block so no check is necessary
1363          */
1364         while (num_frames--) {
1365                 iam_ptr_t idx;
1366
1367                 do_corr(schedule());
1368                 iam_lock_bh(p->bh);
1369                 idx = p->leaf = dx_get_block(path, p->at);
1370                 iam_unlock_bh(p->bh);
1371                 err = iam_path_descr(path)->id_ops->
1372                         id_node_read(path->ip_container, idx, NULL, &bh);
1373                 if (err != 0)
1374                         return err; /* Failure */
1375                 ++p;
1376                 brelse(p->bh);
1377                 assert_corr(p->bh != bh);
1378                 p->bh = bh;
1379                 p->entries = dx_node_get_entries(path, p);
1380                 p->at = iam_entry_shift(path, p->entries, !compat);
1381                 assert_corr(p->curidx != idx);
1382                 p->curidx = idx;
1383                 iam_lock_bh(p->bh);
1384                 assert_corr(p->leaf != dx_get_block(path, p->at));
1385                 p->leaf = dx_get_block(path, p->at);
1386                 iam_unlock_bh(p->bh);
1387                 assert_inv(dx_node_check(path, p));
1388         }
1389         return 1;
1390 }
1391
1392
1393 static inline int iam_index_advance(struct iam_path *path)
1394 {
1395         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1396 }
1397
1398 static void iam_unlock_array(struct iam_container *ic,
1399                              struct dynlock_handle **lh)
1400 {
1401         int i;
1402
1403         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1404                 if (*lh != NULL) {
1405                         iam_unlock_htree(ic, *lh);
1406                         *lh = NULL;
1407                 }
1408         }
1409 }
1410 /*
1411  * Advance index part of @path to point to the next leaf. Returns 1 on
1412  * success, 0, when end of container was reached. Leaf node is locked.
1413  */
1414 int iam_index_next(struct iam_container *c, struct iam_path *path)
1415 {
1416         iam_ptr_t cursor;
1417         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { 0, };
1418         int result;
1419         struct inode *object;
1420
1421         /*
1422          * Locking for iam_index_next()... is to be described.
1423          */
1424
1425         object = c->ic_object;
1426         cursor = path->ip_frame->leaf;
1427
1428         while (1) {
1429                 result = iam_index_lock(path, lh);
1430                 do_corr(schedule());
1431                 if (result < 0)
1432                         break;
1433
1434                 result = iam_check_full_path(path, 0);
1435                 if (result == 0 && cursor == path->ip_frame->leaf) {
1436                         result = iam_index_advance(path);
1437
1438                         assert_corr(result == 0 ||
1439                                     cursor != path->ip_frame->leaf);
1440                         break;
1441                 }
1442                 do {
1443                         iam_unlock_array(c, lh);
1444
1445                         iam_path_release(path);
1446                         do_corr(schedule());
1447
1448                         result = __iam_path_lookup(path);
1449                         if (result < 0)
1450                                 break;
1451
1452                         while (path->ip_frame->leaf != cursor) {
1453                                 do_corr(schedule());
1454
1455                                 result = iam_index_lock(path, lh);
1456                                 do_corr(schedule());
1457                                 if (result < 0)
1458                                         break;
1459
1460                                 result = iam_check_full_path(path, 0);
1461                                 if (result != 0)
1462                                         break;
1463
1464                                 result = iam_index_advance(path);
1465                                 if (result == 0) {
1466                                         CERROR("cannot find cursor : %u\n",
1467                                                 cursor);
1468                                         result = -EIO;
1469                                 }
1470                                 if (result < 0)
1471                                         break;
1472                                 result = iam_check_full_path(path, 0);
1473                                 if (result != 0)
1474                                         break;
1475                                 iam_unlock_array(c, lh);
1476                         }
1477                 } while (result == -EAGAIN);
1478                 if (result < 0)
1479                         break;
1480         }
1481         iam_unlock_array(c, lh);
1482         return result;
1483 }
1484
1485 /*
1486  * Move iterator one record right.
1487  *
1488  * Return value: 0: success,
1489  *              +1: end of container reached
1490  *             -ve: error
1491  *
1492  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1493  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1494  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1495  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1496  */
1497 int iam_it_next(struct iam_iterator *it)
1498 {
1499         int result;
1500         struct iam_path      *path;
1501         struct iam_leaf      *leaf;
1502         struct inode         *obj;
1503         do_corr(struct iam_ikey *ik_orig);
1504
1505         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1506         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1507                     it_state(it) == IAM_IT_SKEWED);
1508
1509         path = &it->ii_path;
1510         leaf = &path->ip_leaf;
1511         obj  = iam_path_obj(path);
1512
1513         assert_corr(iam_leaf_is_locked(leaf));
1514
1515         result = 0;
1516         do_corr(ik_orig = it_at_rec(it) ?
1517                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1518         if (it_before(it)) {
1519                 assert_corr(!iam_leaf_at_end(leaf));
1520                 it->ii_state = IAM_IT_ATTACHED;
1521         } else {
1522                 if (!iam_leaf_at_end(leaf))
1523                         /* advance within leaf node */
1524                         iam_leaf_next(leaf);
1525                 /*
1526                  * multiple iterations may be necessary due to empty leaves.
1527                  */
1528                 while (result == 0 && iam_leaf_at_end(leaf)) {
1529                         do_corr(schedule());
1530                         /* advance index portion of the path */
1531                         result = iam_index_next(iam_it_container(it), path);
1532                         assert_corr(iam_leaf_is_locked(leaf));
1533                         if (result == 1) {
1534                                 struct dynlock_handle *lh;
1535                                 lh = iam_lock_htree(iam_it_container(it),
1536                                                     path->ip_frame->leaf,
1537                                                     DLT_WRITE);
1538                                 if (lh != NULL) {
1539                                         iam_leaf_fini(leaf);
1540                                         leaf->il_lock = lh;
1541                                         result = iam_leaf_load(path);
1542                                         if (result == 0)
1543                                                 iam_leaf_start(leaf);
1544                                 } else
1545                                         result = -ENOMEM;
1546                         } else if (result == 0)
1547                                 /* end of container reached */
1548                                 result = +1;
1549                         if (result != 0)
1550                                 iam_it_put(it);
1551                 }
1552                 if (result == 0)
1553                         it->ii_state = IAM_IT_ATTACHED;
1554         }
1555         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1556         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1557         assert_corr(ergo(result == 0 && ik_orig != NULL,
1558                          it_ikeycmp(it, ik_orig) >= 0));
1559         return result;
1560 }
1561 EXPORT_SYMBOL(iam_it_next);
1562
1563 /*
1564  * Return pointer to the record under iterator.
1565  *
1566  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1567  * postcondition: it_state(it) == IAM_IT_ATTACHED
1568  */
1569 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1570 {
1571         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1572         assert_corr(it_at_rec(it));
1573         return iam_leaf_rec(&it->ii_path.ip_leaf);
1574 }
1575 EXPORT_SYMBOL(iam_it_rec_get);
1576
1577 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1578 {
1579         struct iam_leaf *folio;
1580
1581         folio = &it->ii_path.ip_leaf;
1582         iam_leaf_ops(folio)->rec_set(folio, r);
1583 }
1584
1585 /*
1586  * Replace contents of record under iterator.
1587  *
1588  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1589  *                it->ii_flags&IAM_IT_WRITE
1590  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1591  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1592  */
1593 int iam_it_rec_set(handle_t *h,
1594                    struct iam_iterator *it, const struct iam_rec *r)
1595 {
1596         int result;
1597         struct iam_path *path;
1598         struct buffer_head *bh;
1599
1600         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1601                     it->ii_flags&IAM_IT_WRITE);
1602         assert_corr(it_at_rec(it));
1603
1604         path = &it->ii_path;
1605         bh   = path->ip_leaf.il_bh;
1606         result = iam_txn_add(h, path, bh);
1607         if (result == 0) {
1608                 iam_it_reccpy(it, r);
1609                 result = iam_txn_dirty(h, path, bh);
1610         }
1611         return result;
1612 }
1613 EXPORT_SYMBOL(iam_it_rec_set);
1614
1615 /*
1616  * Return pointer to the index key under iterator.
1617  *
1618  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1619  *                it_state(it) == IAM_IT_SKEWED
1620  */
1621 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1622                                         struct iam_ikey *ikey)
1623 {
1624         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1625                     it_state(it) == IAM_IT_SKEWED);
1626         assert_corr(it_at_rec(it));
1627         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1628 }
1629
1630 /*
1631  * Return pointer to the key under iterator.
1632  *
1633  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1634  *                it_state(it) == IAM_IT_SKEWED
1635  */
1636 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1637 {
1638         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1639                     it_state(it) == IAM_IT_SKEWED);
1640         assert_corr(it_at_rec(it));
1641         return iam_leaf_key(&it->ii_path.ip_leaf);
1642 }
1643 EXPORT_SYMBOL(iam_it_key_get);
1644
1645 /*
1646  * Return size of key under iterator (in bytes)
1647  *
1648  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1649  *                it_state(it) == IAM_IT_SKEWED
1650  */
1651 int iam_it_key_size(const struct iam_iterator *it)
1652 {
1653         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1654                     it_state(it) == IAM_IT_SKEWED);
1655         assert_corr(it_at_rec(it));
1656         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1657 }
1658 EXPORT_SYMBOL(iam_it_key_size);
1659
1660 struct buffer_head *
1661 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1662 {
1663         struct inode *inode = c->ic_object;
1664         struct buffer_head *bh = NULL;
1665         struct iam_idle_head *head;
1666         struct buffer_head *idle;
1667         __u32 *idle_blocks;
1668         __u16 count;
1669
1670         if (c->ic_idle_bh == NULL)
1671                 goto newblock;
1672
1673         cfs_down(&c->ic_idle_sem);
1674         if (unlikely(c->ic_idle_failed || c->ic_idle_bh == NULL)) {
1675                 cfs_up(&c->ic_idle_sem);
1676                 goto newblock;
1677         }
1678
1679         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1680         count = le16_to_cpu(head->iih_count);
1681         if (count > 0) {
1682                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1683                 if (*e != 0)
1684                         goto fail;
1685
1686                 --count;
1687                 *b = le32_to_cpu(head->iih_blks[count]);
1688                 head->iih_count = cpu_to_le16(count);
1689                 *e = ldiskfs_journal_dirty_metadata(h, c->ic_idle_bh);
1690                 if (*e != 0)
1691                         goto fail;
1692
1693                 cfs_up(&c->ic_idle_sem);
1694                 bh = ldiskfs_bread(NULL, inode, *b, 0, e);
1695                 if (bh == NULL)
1696                         return NULL;
1697                 goto got;
1698         }
1699
1700         /* The block itself which contains the iam_idle_head is
1701          * also an idle block, and can be used as the new node. */
1702         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1703                                 c->ic_descr->id_root_gap +
1704                                 sizeof(struct dx_countlimit));
1705         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1706         if (*e != 0)
1707                 goto fail;
1708
1709         *b = le32_to_cpu(*idle_blocks);
1710         iam_lock_bh(c->ic_root_bh);
1711         *idle_blocks = head->iih_next;
1712         iam_unlock_bh(c->ic_root_bh);
1713         *e = ldiskfs_journal_dirty_metadata(h, c->ic_root_bh);
1714         if (*e != 0) {
1715                 iam_lock_bh(c->ic_root_bh);
1716                 *idle_blocks = cpu_to_le32(*b);
1717                 iam_unlock_bh(c->ic_root_bh);
1718                 goto fail;
1719         }
1720
1721         bh = c->ic_idle_bh;
1722         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1723         if (idle != NULL && IS_ERR(idle)) {
1724                 *e = PTR_ERR(idle);
1725                 c->ic_idle_bh = NULL;
1726                 brelse(bh);
1727                 goto fail;
1728         }
1729
1730         c->ic_idle_bh = idle;
1731         cfs_up(&c->ic_idle_sem);
1732
1733 got:
1734         /* get write access for the found buffer head */
1735         *e = ldiskfs_journal_get_write_access(h, bh);
1736         if (*e != 0) {
1737                 brelse(bh);
1738                 bh = NULL;
1739                 ldiskfs_std_error(inode->i_sb, *e);
1740         }
1741         return bh;
1742
1743 newblock:
1744         bh = ldiskfs_append(h, inode, b, e);
1745         return bh;
1746
1747 fail:
1748         cfs_up(&c->ic_idle_sem);
1749         ldiskfs_std_error(inode->i_sb, *e);
1750         return NULL;
1751 }
1752
1753 /*
1754  * Insertion of new record. Interaction with jbd during non-trivial case (when
1755  * split happens) is as following:
1756  *
1757  *  - new leaf node is involved into transaction by iam_new_node();
1758  *
1759  *  - old leaf node is involved into transaction by iam_add_rec();
1760  *
1761  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1762  *
1763  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1764  *  iam_new_leaf();
1765  *
1766  *  - split index nodes are involved into transaction and marked dirty by
1767  *  split_index_node().
1768  *
1769  *  - "safe" index node, which is no split, but where new pointer is inserted
1770  *  is involved into transaction and marked dirty by split_index_node().
1771  *
1772  *  - index node where pointer to new leaf is inserted is involved into
1773  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1774  *
1775  *  - inode is marked dirty by iam_add_rec().
1776  *
1777  */
1778
1779 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1780 {
1781         int err;
1782         iam_ptr_t blknr;
1783         struct buffer_head   *new_leaf;
1784         struct buffer_head   *old_leaf;
1785         struct iam_container *c;
1786         struct inode         *obj;
1787         struct iam_path      *path;
1788
1789         assert_inv(iam_leaf_check(leaf));
1790
1791         c = iam_leaf_container(leaf);
1792         path = leaf->il_path;
1793
1794         obj = c->ic_object;
1795         new_leaf = iam_new_node(handle, c, &blknr, &err);
1796         do_corr(schedule());
1797         if (new_leaf != NULL) {
1798                 struct dynlock_handle *lh;
1799
1800                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1801                 do_corr(schedule());
1802                 if (lh != NULL) {
1803                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1804                         do_corr(schedule());
1805                         old_leaf = leaf->il_bh;
1806                         iam_leaf_split(leaf, &new_leaf, blknr);
1807                         if (old_leaf != leaf->il_bh) {
1808                                 /*
1809                                  * Switched to the new leaf.
1810                                  */
1811                                 iam_leaf_unlock(leaf);
1812                                 leaf->il_lock = lh;
1813                                 path->ip_frame->leaf = blknr;
1814                         } else
1815                                 iam_unlock_htree(path->ip_container, lh);
1816                         do_corr(schedule());
1817                         err = iam_txn_dirty(handle, path, new_leaf);
1818                         brelse(new_leaf);
1819                         if (err == 0)
1820                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1821                         do_corr(schedule());
1822                 } else
1823                         err = -ENOMEM;
1824         }
1825         assert_inv(iam_leaf_check(leaf));
1826         assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
1827         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1828         return err;
1829 }
1830
1831 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1832 {
1833         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1834 }
1835
1836 static int iam_shift_entries(struct iam_path *path,
1837                          struct iam_frame *frame, unsigned count,
1838                          struct iam_entry *entries, struct iam_entry *entries2,
1839                          u32 newblock)
1840 {
1841         unsigned count1;
1842         unsigned count2;
1843         int delta;
1844
1845         struct iam_frame *parent = frame - 1;
1846         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1847
1848         delta = dx_index_is_compat(path) ? 0 : +1;
1849
1850         count1 = count/2 + delta;
1851         count2 = count - count1;
1852         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1853
1854         dxtrace(printk("Split index %d/%d\n", count1, count2));
1855
1856         memcpy((char *) iam_entry_shift(path, entries2, delta),
1857                (char *) iam_entry_shift(path, entries, count1),
1858                count2 * iam_entry_size(path));
1859
1860         dx_set_count(entries2, count2 + delta);
1861         dx_set_limit(entries2, dx_node_limit(path));
1862
1863         /*
1864          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1865          * level index in root index, then we insert new index here and set
1866          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1867          * index w/o hash it looks for. the solution is to check root index
1868          * after we locked just founded 2nd level index -bzzz
1869          */
1870         iam_insert_key_lock(path, parent, pivot, newblock);
1871
1872         /*
1873          * now old and new 2nd level index blocks contain all pointers, so
1874          * dx_probe() may find it in the both.  it's OK -bzzz
1875          */
1876         iam_lock_bh(frame->bh);
1877         dx_set_count(entries, count1);
1878         iam_unlock_bh(frame->bh);
1879
1880         /*
1881          * now old 2nd level index block points to first half of leafs. it's
1882          * importand that dx_probe() must check root index block for changes
1883          * under dx_lock_bh(frame->bh) -bzzz
1884          */
1885
1886         return count1;
1887 }
1888
1889
1890 int split_index_node(handle_t *handle, struct iam_path *path,
1891                      struct dynlock_handle **lh)
1892 {
1893
1894         struct iam_entry *entries;   /* old block contents */
1895         struct iam_entry *entries2;  /* new block contents */
1896          struct iam_frame *frame, *safe;
1897         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {0};
1898         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1899         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1900         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1901         struct inode *dir = iam_path_obj(path);
1902         struct iam_descr *descr;
1903         int nr_splet;
1904         int i, err;
1905
1906         descr = iam_path_descr(path);
1907         /*
1908          * Algorithm below depends on this.
1909          */
1910         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1911
1912         frame = path->ip_frame;
1913         entries = frame->entries;
1914
1915         /*
1916          * Tall-tree handling: we might have to split multiple index blocks
1917          * all the way up to tree root. Tricky point here is error handling:
1918          * to avoid complicated undo/rollback we
1919          *
1920          *   - first allocate all necessary blocks
1921          *
1922          *   - insert pointers into them atomically.
1923          */
1924
1925         /*
1926          * Locking: leaf is already locked. htree-locks are acquired on all
1927          * index nodes that require split bottom-to-top, on the "safe" node,
1928          * and on all new nodes
1929          */
1930
1931         dxtrace(printk("using %u of %u node entries\n",
1932                        dx_get_count(entries), dx_get_limit(entries)));
1933
1934         /* What levels need split? */
1935         for (nr_splet = 0; frame >= path->ip_frames &&
1936              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1937              --frame, ++nr_splet) {
1938                 do_corr(schedule());
1939                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1940                         /*
1941                         CWARN(dir->i_sb, __FUNCTION__,
1942                                      "Directory index full!\n");
1943                                      */
1944                         err = -ENOSPC;
1945                         goto cleanup;
1946                 }
1947         }
1948
1949         safe = frame;
1950
1951         /*
1952          * Lock all nodes, bottom to top.
1953          */
1954         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1955                 do_corr(schedule());
1956                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1957                                          DLT_WRITE);
1958                 if (lock[i] == NULL) {
1959                         err = -ENOMEM;
1960                         goto cleanup;
1961                 }
1962         }
1963
1964         /*
1965          * Check for concurrent index modification.
1966          */
1967         err = iam_check_full_path(path, 1);
1968         if (err)
1969                 goto cleanup;
1970         /*
1971          * And check that the same number of nodes is to be split.
1972          */
1973         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1974              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1975              --frame, ++i) {
1976                 ;
1977         }
1978         if (i != nr_splet) {
1979                 err = -EAGAIN;
1980                 goto cleanup;
1981         }
1982
1983         /* Go back down, allocating blocks, locking them, and adding into
1984          * transaction... */
1985         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1986                 bh_new[i] = iam_new_node(handle, path->ip_container,
1987                                          &newblock[i], &err);
1988                 do_corr(schedule());
1989                 if (!bh_new[i] ||
1990                     descr->id_ops->id_node_init(path->ip_container,
1991                                                 bh_new[i], 0) != 0)
1992                         goto cleanup;
1993                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1994                                              DLT_WRITE);
1995                 if (new_lock[i] == NULL) {
1996                         err = -ENOMEM;
1997                         goto cleanup;
1998                 }
1999                 do_corr(schedule());
2000                 BUFFER_TRACE(frame->bh, "get_write_access");
2001                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
2002                 if (err)
2003                         goto journal_error;
2004         }
2005         /* Add "safe" node to transaction too */
2006         if (safe + 1 != path->ip_frames) {
2007                 do_corr(schedule());
2008                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
2009                 if (err)
2010                         goto journal_error;
2011         }
2012
2013         /* Go through nodes once more, inserting pointers */
2014         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
2015                 unsigned count;
2016                 int idx;
2017                 struct buffer_head *bh2;
2018                 struct buffer_head *bh;
2019
2020                 entries = frame->entries;
2021                 count = dx_get_count(entries);
2022                 idx = iam_entry_diff(path, frame->at, entries);
2023
2024                 bh2 = bh_new[i];
2025                 entries2 = dx_get_entries(path, bh2->b_data, 0);
2026
2027                 bh = frame->bh;
2028                 if (frame == path->ip_frames) {
2029                         /* splitting root node. Tricky point:
2030                          *
2031                          * In the "normal" B-tree we'd split root *and* add
2032                          * new root to the tree with pointers to the old root
2033                          * and its sibling (thus introducing two new nodes).
2034                          *
2035                          * In htree it's enough to add one node, because
2036                          * capacity of the root node is smaller than that of
2037                          * non-root one.
2038                          */
2039                         struct iam_frame *frames;
2040                         struct iam_entry *next;
2041
2042                         assert_corr(i == 0);
2043
2044                         do_corr(schedule());
2045
2046                         frames = path->ip_frames;
2047                         memcpy((char *) entries2, (char *) entries,
2048                                count * iam_entry_size(path));
2049                         dx_set_limit(entries2, dx_node_limit(path));
2050
2051                         /* Set up root */
2052                           iam_lock_bh(frame->bh);
2053                         next = descr->id_ops->id_root_inc(path->ip_container,
2054                                                           path, frame);
2055                         dx_set_block(path, next, newblock[0]);
2056                           iam_unlock_bh(frame->bh);
2057
2058                         do_corr(schedule());
2059                         /* Shift frames in the path */
2060                         memmove(frames + 2, frames + 1,
2061                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2062                         /* Add new access path frame */
2063                         frames[1].at = iam_entry_shift(path, entries2, idx);
2064                         frames[1].entries = entries = entries2;
2065                         frames[1].bh = bh2;
2066                         assert_inv(dx_node_check(path, frame));
2067                         ++ path->ip_frame;
2068                         ++ frame;
2069                         assert_inv(dx_node_check(path, frame));
2070                         bh_new[0] = NULL; /* buffer head is "consumed" */
2071                         err = ldiskfs_journal_get_write_access(handle, bh2);
2072                         if (err)
2073                                 goto journal_error;
2074                         do_corr(schedule());
2075                 } else {
2076                         /* splitting non-root index node. */
2077                         struct iam_frame *parent = frame - 1;
2078
2079                         do_corr(schedule());
2080                         count = iam_shift_entries(path, frame, count,
2081                                               entries, entries2, newblock[i]);
2082                         /* Which index block gets the new entry? */
2083                         if (idx >= count) {
2084                                 int d = dx_index_is_compat(path) ? 0 : +1;
2085
2086                                 frame->at = iam_entry_shift(path, entries2,
2087                                                             idx - count + d);
2088                                 frame->entries = entries = entries2;
2089                                 frame->curidx = newblock[i];
2090                                 swap(frame->bh, bh2);
2091                                 assert_corr(lock[i + 1] != NULL);
2092                                 assert_corr(new_lock[i] != NULL);
2093                                 swap(lock[i + 1], new_lock[i]);
2094                                 bh_new[i] = bh2;
2095                                 parent->at = iam_entry_shift(path,
2096                                                              parent->at, +1);
2097                         }
2098                         assert_inv(dx_node_check(path, frame));
2099                         assert_inv(dx_node_check(path, parent));
2100                         dxtrace(dx_show_index ("node", frame->entries));
2101                         dxtrace(dx_show_index ("node",
2102                                ((struct dx_node *) bh2->b_data)->entries));
2103                         err = ldiskfs_journal_dirty_metadata(handle, bh2);
2104                         if (err)
2105                                 goto journal_error;
2106                         do_corr(schedule());
2107                         err = ldiskfs_journal_dirty_metadata(handle, parent->bh);
2108                         if (err)
2109                                 goto journal_error;
2110                 }
2111                 do_corr(schedule());
2112                 err = ldiskfs_journal_dirty_metadata(handle, bh);
2113                 if (err)
2114                         goto journal_error;
2115         }
2116                 /*
2117                  * This function was called to make insertion of new leaf
2118                  * possible. Check that it fulfilled its obligations.
2119                  */
2120                 assert_corr(dx_get_count(path->ip_frame->entries) <
2121                             dx_get_limit(path->ip_frame->entries));
2122         assert_corr(lock[nr_splet] != NULL);
2123         *lh = lock[nr_splet];
2124         lock[nr_splet] = NULL;
2125         if (nr_splet > 0) {
2126                 /*
2127                  * Log ->i_size modification.
2128                  */
2129                 err = ldiskfs_mark_inode_dirty(handle, dir);
2130                 if (err)
2131                         goto journal_error;
2132         }
2133         goto cleanup;
2134 journal_error:
2135         ldiskfs_std_error(dir->i_sb, err);
2136
2137 cleanup:
2138         iam_unlock_array(path->ip_container, lock);
2139         iam_unlock_array(path->ip_container, new_lock);
2140
2141         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2142
2143         do_corr(schedule());
2144         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2145                 if (bh_new[i] != NULL)
2146                         brelse(bh_new[i]);
2147         }
2148         return err;
2149 }
2150
2151 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2152                        struct iam_path *path,
2153                        const struct iam_key *k, const struct iam_rec *r)
2154 {
2155         int err;
2156         struct iam_leaf *leaf;
2157
2158         leaf = &path->ip_leaf;
2159         assert_inv(iam_leaf_check(leaf));
2160         assert_inv(iam_path_check(path));
2161         err = iam_txn_add(handle, path, leaf->il_bh);
2162         if (err == 0) {
2163                 do_corr(schedule());
2164                 if (!iam_leaf_can_add(leaf, k, r)) {
2165                         struct dynlock_handle *lh = NULL;
2166
2167                         do {
2168                                 assert_corr(lh == NULL);
2169                                 do_corr(schedule());
2170                                 err = split_index_node(handle, path, &lh);
2171                                 if (err == -EAGAIN) {
2172                                         assert_corr(lh == NULL);
2173
2174                                         iam_path_fini(path);
2175                                         it->ii_state = IAM_IT_DETACHED;
2176
2177                                         do_corr(schedule());
2178                                         err = iam_it_get_exact(it, k);
2179                                         if (err == -ENOENT)
2180                                                 err = +1; /* repeat split */
2181                                         else if (err == 0)
2182                                                 err = -EEXIST;
2183                                 }
2184                         } while (err > 0);
2185                         assert_inv(iam_path_check(path));
2186                         if (err == 0) {
2187                                 assert_corr(lh != NULL);
2188                                 do_corr(schedule());
2189                                 err = iam_new_leaf(handle, leaf);
2190                                 if (err == 0)
2191                                         err = iam_txn_dirty(handle, path,
2192                                                             path->ip_frame->bh);
2193                         }
2194                         iam_unlock_htree(path->ip_container, lh);
2195                         do_corr(schedule());
2196                 }
2197                 if (err == 0) {
2198                         iam_leaf_rec_add(leaf, k, r);
2199                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2200                 }
2201         }
2202         assert_inv(iam_leaf_check(leaf));
2203         assert_inv(iam_leaf_check(&path->ip_leaf));
2204         assert_inv(iam_path_check(path));
2205         return err;
2206 }
2207
2208 /*
2209  * Insert new record with key @k and contents from @r, shifting records to the
2210  * right. On success, iterator is positioned on the newly inserted record.
2211  *
2212  * precondition: it->ii_flags&IAM_IT_WRITE &&
2213  *               (it_state(it) == IAM_IT_ATTACHED ||
2214  *                it_state(it) == IAM_IT_SKEWED) &&
2215  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2216  *                    it_keycmp(it, k) <= 0) &&
2217  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2218  * postcondition: ergo(result == 0,
2219  *                     it_state(it) == IAM_IT_ATTACHED &&
2220  *                     it_keycmp(it, k) == 0 &&
2221  *                     !memcmp(iam_it_rec_get(it), r, ...))
2222  */
2223 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2224                       const struct iam_key *k, const struct iam_rec *r)
2225 {
2226         int result;
2227         struct iam_path *path;
2228
2229         path = &it->ii_path;
2230
2231         assert_corr(it->ii_flags&IAM_IT_WRITE);
2232         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2233                     it_state(it) == IAM_IT_SKEWED);
2234         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2235                          it_keycmp(it, k) <= 0));
2236         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2237         result = iam_add_rec(h, it, path, k, r);
2238         if (result == 0)
2239                 it->ii_state = IAM_IT_ATTACHED;
2240         assert_corr(ergo(result == 0,
2241                          it_state(it) == IAM_IT_ATTACHED &&
2242                          it_keycmp(it, k) == 0));
2243         return result;
2244 }
2245 EXPORT_SYMBOL(iam_it_rec_insert);
2246
2247 static inline int iam_idle_blocks_limit(struct inode *inode)
2248 {
2249         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2250 }
2251
2252 /*
2253  * If the leaf cannnot be recycled, we will lose one block for reusing.
2254  * It is not a serious issue because it almost the same of non-recycle.
2255  */
2256 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2257                                   struct iam_leaf *l, struct buffer_head **bh)
2258 {
2259         struct iam_container *c = p->ip_container;
2260         struct inode *inode = c->ic_object;
2261         struct iam_frame *frame = p->ip_frame;
2262         struct iam_entry *entries;
2263         struct dynlock_handle *lh;
2264         int count;
2265         int rc;
2266
2267         if (c->ic_idle_failed)
2268                 return 0;
2269
2270         if (unlikely(frame == NULL))
2271                 return 0;
2272
2273         if (!iam_leaf_empty(l))
2274                 return 0;
2275
2276         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2277         if (lh == NULL) {
2278                 CWARN("%.16s: No memory to recycle idle blocks\n",
2279                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2280                 return 0;
2281         }
2282
2283         rc = iam_txn_add(h, p, frame->bh);
2284         if (rc != 0) {
2285                 iam_unlock_htree(c, lh);
2286                 return 0;
2287         }
2288
2289         iam_lock_bh(frame->bh);
2290         entries = frame->entries;
2291         count = dx_get_count(entries);
2292         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2293                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2294
2295                 memmove(frame->at, n,
2296                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2297                 frame->at_shifted = 1;
2298         }
2299         dx_set_count(entries, count - 1);
2300         rc = iam_txn_dirty(h, p, frame->bh);
2301         iam_unlock_bh(frame->bh);
2302         iam_unlock_htree(c, lh);
2303         if (rc != 0)
2304                 return 0;
2305
2306         get_bh(l->il_bh);
2307         *bh = l->il_bh;
2308         return frame->leaf;
2309 }
2310
2311 static int
2312 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2313                         __u32 *idle_blocks, iam_ptr_t blk)
2314 {
2315         struct iam_container *c = p->ip_container;
2316         struct buffer_head *old = c->ic_idle_bh;
2317         struct iam_idle_head *head;
2318         int rc;
2319
2320         rc = iam_txn_add(h, p, bh);
2321         if (rc != 0)
2322                 return rc;
2323
2324         head = (struct iam_idle_head *)(bh->b_data);
2325         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2326         head->iih_count = 0;
2327         head->iih_next = *idle_blocks;
2328         rc = iam_txn_dirty(h, p, bh);
2329         if (rc != 0)
2330                 return rc;
2331
2332         rc = iam_txn_add(h, p, c->ic_root_bh);
2333         if (rc != 0)
2334                 return rc;
2335
2336         iam_lock_bh(c->ic_root_bh);
2337         *idle_blocks = cpu_to_le32(blk);
2338         iam_unlock_bh(c->ic_root_bh);
2339         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2340         if (rc == 0) {
2341                 /* NOT release old before new assigned. */
2342                 get_bh(bh);
2343                 c->ic_idle_bh = bh;
2344                 brelse(old);
2345         } else {
2346                 iam_lock_bh(c->ic_root_bh);
2347                 *idle_blocks = head->iih_next;
2348                 iam_unlock_bh(c->ic_root_bh);
2349         }
2350         return rc;
2351 }
2352
2353 /*
2354  * If the leaf cannnot be recycled, we will lose one block for reusing.
2355  * It is not a serious issue because it almost the same of non-recycle.
2356  */
2357 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2358                              struct buffer_head *bh, iam_ptr_t blk)
2359 {
2360         struct iam_container *c = p->ip_container;
2361         struct inode *inode = c->ic_object;
2362         struct iam_idle_head *head;
2363         __u32 *idle_blocks;
2364         int count;
2365         int rc;
2366
2367         cfs_down(&c->ic_idle_sem);
2368         if (unlikely(c->ic_idle_failed)) {
2369                 rc = -EFAULT;
2370                 goto unlock;
2371         }
2372
2373         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2374                                 c->ic_descr->id_root_gap +
2375                                 sizeof(struct dx_countlimit));
2376         /* It is the first idle block. */
2377         if (c->ic_idle_bh == NULL) {
2378                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2379                 goto unlock;
2380         }
2381
2382         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2383         count = le16_to_cpu(head->iih_count);
2384         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2385         if (count == iam_idle_blocks_limit(inode)) {
2386                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2387                 goto unlock;
2388         }
2389
2390         /* Just add to ic_idle_bh. */
2391         rc = iam_txn_add(h, p, c->ic_idle_bh);
2392         if (rc != 0)
2393                 goto unlock;
2394
2395         head->iih_blks[count] = cpu_to_le32(blk);
2396         head->iih_count = cpu_to_le16(count + 1);
2397         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2398
2399 unlock:
2400         cfs_up(&c->ic_idle_sem);
2401         if (rc != 0)
2402                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2403                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2404 }
2405
2406 /*
2407  * Delete record under iterator.
2408  *
2409  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2410  *                it->ii_flags&IAM_IT_WRITE &&
2411  *                it_at_rec(it)
2412  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2413  *                it_state(it) == IAM_IT_DETACHED
2414  */
2415 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2416 {
2417         int result;
2418         struct iam_leaf *leaf;
2419         struct iam_path *path;
2420
2421         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2422                     it->ii_flags&IAM_IT_WRITE);
2423         assert_corr(it_at_rec(it));
2424
2425         path = &it->ii_path;
2426         leaf = &path->ip_leaf;
2427
2428         assert_inv(iam_leaf_check(leaf));
2429         assert_inv(iam_path_check(path));
2430
2431         result = iam_txn_add(h, path, leaf->il_bh);
2432         /*
2433          * no compaction for now.
2434          */
2435         if (result == 0) {
2436                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2437                 result = iam_txn_dirty(h, path, leaf->il_bh);
2438                 if (result == 0 && iam_leaf_at_end(leaf)) {
2439                         struct buffer_head *bh = NULL;
2440                         iam_ptr_t blk;
2441
2442                         blk = iam_index_shrink(h, path, leaf, &bh);
2443                         if (it->ii_flags & IAM_IT_MOVE) {
2444                                 result = iam_it_next(it);
2445                                 if (result > 0)
2446                                         result = 0;
2447                         }
2448
2449                         if (bh != NULL) {
2450                                 iam_recycle_leaf(h, path, bh, blk);
2451                                 brelse(bh);
2452                         }
2453                 }
2454         }
2455         assert_inv(iam_leaf_check(leaf));
2456         assert_inv(iam_path_check(path));
2457         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2458                     it_state(it) == IAM_IT_DETACHED);
2459         return result;
2460 }
2461 EXPORT_SYMBOL(iam_it_rec_delete);
2462
2463 /*
2464  * Convert iterator to cookie.
2465  *
2466  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2467  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2468  * postcondition: it_state(it) == IAM_IT_ATTACHED
2469  */
2470 iam_pos_t iam_it_store(const struct iam_iterator *it)
2471 {
2472         iam_pos_t result;
2473
2474         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2475         assert_corr(it_at_rec(it));
2476         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2477                     sizeof result);
2478
2479         result = 0;
2480         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2481 }
2482 EXPORT_SYMBOL(iam_it_store);
2483
2484 /*
2485  * Restore iterator from cookie.
2486  *
2487  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2488  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2489  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2490  *                                  iam_it_store(it) == pos)
2491  */
2492 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2493 {
2494         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2495                     it->ii_flags&IAM_IT_MOVE);
2496         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2497         return iam_it_iget(it, (struct iam_ikey *)&pos);
2498 }
2499 EXPORT_SYMBOL(iam_it_load);
2500
2501 /***********************************************************************/
2502 /* invariants                                                          */
2503 /***********************************************************************/
2504
2505 static inline int ptr_inside(void *base, size_t size, void *ptr)
2506 {
2507         return (base <= ptr) && (ptr < base + size);
2508 }
2509
2510 int iam_frame_invariant(struct iam_frame *f)
2511 {
2512         return
2513                 (f->bh != NULL &&
2514                 f->bh->b_data != NULL &&
2515                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2516                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2517                 f->entries <= f->at);
2518 }
2519 int iam_leaf_invariant(struct iam_leaf *l)
2520 {
2521         return
2522                 l->il_bh != NULL &&
2523                 l->il_bh->b_data != NULL &&
2524                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2525                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2526                 l->il_entries <= l->il_at;
2527 }
2528
2529 int iam_path_invariant(struct iam_path *p)
2530 {
2531         int i;
2532
2533         if (p->ip_container == NULL ||
2534             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2535             p->ip_frame != p->ip_frames + p->ip_indirect ||
2536             !iam_leaf_invariant(&p->ip_leaf))
2537                 return 0;
2538         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2539                 if (i <= p->ip_indirect) {
2540                         if (!iam_frame_invariant(&p->ip_frames[i]))
2541                                 return 0;
2542                 }
2543         }
2544         return 1;
2545 }
2546
2547 int iam_it_invariant(struct iam_iterator *it)
2548 {
2549         return
2550                 (it->ii_state == IAM_IT_DETACHED ||
2551                  it->ii_state == IAM_IT_ATTACHED ||
2552                  it->ii_state == IAM_IT_SKEWED) &&
2553                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2554                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2555                      it->ii_state == IAM_IT_SKEWED,
2556                      iam_path_invariant(&it->ii_path) &&
2557                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2558 }
2559
2560 /*
2561  * Search container @c for record with key @k. If record is found, its data
2562  * are moved into @r.
2563  *
2564  * Return values: 0: found, -ENOENT: not-found, -ve: error
2565  */
2566 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2567                struct iam_rec *r, struct iam_path_descr *pd)
2568 {
2569         struct iam_iterator it;
2570         int result;
2571
2572         iam_it_init(&it, c, 0, pd);
2573
2574         result = iam_it_get_exact(&it, k);
2575         if (result == 0)
2576                 /*
2577                  * record with required key found, copy it into user buffer
2578                  */
2579                 iam_reccpy(&it.ii_path.ip_leaf, r);
2580         iam_it_put(&it);
2581         iam_it_fini(&it);
2582         return result;
2583 }
2584 EXPORT_SYMBOL(iam_lookup);
2585
2586 /*
2587  * Insert new record @r with key @k into container @c (within context of
2588  * transaction @h).
2589  *
2590  * Return values: 0: success, -ve: error, including -EEXIST when record with
2591  * given key is already present.
2592  *
2593  * postcondition: ergo(result == 0 || result == -EEXIST,
2594  *                                  iam_lookup(c, k, r2) > 0;
2595  */
2596 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2597                const struct iam_rec *r, struct iam_path_descr *pd)
2598 {
2599         struct iam_iterator it;
2600         int result;
2601
2602         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2603
2604         result = iam_it_get_exact(&it, k);
2605         if (result == -ENOENT)
2606                 result = iam_it_rec_insert(h, &it, k, r);
2607         else if (result == 0)
2608                 result = -EEXIST;
2609         iam_it_put(&it);
2610         iam_it_fini(&it);
2611         return result;
2612 }
2613 EXPORT_SYMBOL(iam_insert);
2614
2615 /*
2616  * Update record with the key @k in container @c (within context of
2617  * transaction @h), new record is given by @r.
2618  *
2619  * Return values: +1: skip because of the same rec value, 0: success,
2620  * -ve: error, including -ENOENT if no record with the given key found.
2621  */
2622 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2623                const struct iam_rec *r, struct iam_path_descr *pd)
2624 {
2625         struct iam_iterator it;
2626         struct iam_leaf *folio;
2627         int result;
2628
2629         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2630
2631         result = iam_it_get_exact(&it, k);
2632         if (result == 0) {
2633                 folio = &it.ii_path.ip_leaf;
2634                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2635                 if (result == 0)
2636                         iam_it_rec_set(h, &it, r);
2637                 else
2638                         result = 1;
2639         }
2640         iam_it_put(&it);
2641         iam_it_fini(&it);
2642         return result;
2643 }
2644 EXPORT_SYMBOL(iam_update);
2645
2646 /*
2647  * Delete existing record with key @k.
2648  *
2649  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2650  *
2651  * postcondition: ergo(result == 0 || result == -ENOENT,
2652  *                                 !iam_lookup(c, k, *));
2653  */
2654 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2655                struct iam_path_descr *pd)
2656 {
2657         struct iam_iterator it;
2658         int result;
2659
2660         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2661
2662         result = iam_it_get_exact(&it, k);
2663         if (result == 0)
2664                 iam_it_rec_delete(h, &it);
2665         iam_it_put(&it);
2666         iam_it_fini(&it);
2667         return result;
2668 }
2669 EXPORT_SYMBOL(iam_delete);
2670
2671 int iam_root_limit(int rootgap, int blocksize, int size)
2672 {
2673         int limit;
2674         int nlimit;
2675
2676         limit = (blocksize - rootgap) / size;
2677         nlimit = blocksize / size;
2678         if (limit == nlimit)
2679                 limit--;
2680         return limit;
2681 }