Whamcloud - gitweb
c197e3519bd5f8bcea3b36f8a0f9bc295f0104c7
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * iam.c
33  * Top-level entry points into iam module
34  *
35  * Author: Wang Di <wangdi@clusterfs.com>
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 /*
40  * iam: big theory statement.
41  *
42  * iam (Index Access Module) is a module providing abstraction of persistent
43  * transactional container on top of generalized ldiskfs htree.
44  *
45  * iam supports:
46  *
47  *     - key, pointer, and record size specifiable per container.
48  *
49  *     - trees taller than 2 index levels.
50  *
51  *     - read/write to existing ldiskfs htree directories as iam containers.
52  *
53  * iam container is a tree, consisting of leaf nodes containing keys and
54  * records stored in this container, and index nodes, containing keys and
55  * pointers to leaf or index nodes.
56  *
57  * iam does not work with keys directly, instead it calls user-supplied key
58  * comparison function (->dpo_keycmp()).
59  *
60  * Pointers are (currently) interpreted as logical offsets (measured in
61  * blocksful) within underlying flat file on top of which iam tree lives.
62  *
63  * On-disk format:
64  *
65  * iam mostly tries to reuse existing htree formats.
66  *
67  * Format of index node:
68  *
69  * +-----+-------+-------+-------+------+-------+------------+
70  * |     | count |       |       |      |       |            |
71  * | gap |   /   | entry | entry | .... | entry | free space |
72  * |     | limit |       |       |      |       |            |
73  * +-----+-------+-------+-------+------+-------+------------+
74  *
75  *       gap           this part of node is never accessed by iam code. It
76  *                     exists for binary compatibility with ldiskfs htree (that,
77  *                     in turn, stores fake struct ext2_dirent for ext2
78  *                     compatibility), and to keep some unspecified per-node
79  *                     data. Gap can be different for root and non-root index
80  *                     nodes. Gap size can be specified for each container
81  *                     (gap of 0 is allowed).
82  *
83  *       count/limit   current number of entries in this node, and the maximal
84  *                     number of entries that can fit into node. count/limit
85  *                     has the same size as entry, and is itself counted in
86  *                     count.
87  *
88  *       entry         index entry: consists of a key immediately followed by
89  *                     a pointer to a child node. Size of a key and size of a
90  *                     pointer depends on container. Entry has neither
91  *                     alignment nor padding.
92  *
93  *       free space    portion of node new entries are added to
94  *
95  * Entries in index node are sorted by their key value.
96  *
97  * Format of a leaf node is not specified. Generic iam code accesses leaf
98  * nodes through ->id_leaf methods in struct iam_descr.
99  *
100  * The IAM root block is a special node, which contains the IAM descriptor.
101  * It is on disk format:
102  *
103  * +---------+-------+--------+---------+-------+------+-------+------------+
104  * |IAM desc | count |  idle  |         |       |      |       |            |
105  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
106  * |         | limit |        |         |       |      |       |            |
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  *
109  * The padding length is calculated with the parameters in the IAM descriptor.
110  *
111  * The field "idle_blocks" is used to record empty leaf nodes, which have not
112  * been released but all contained entries in them have been removed. Usually,
113  * the idle blocks in the IAM should be reused when need to allocate new leaf
114  * nodes for new entries, it depends on the IAM hash functions to map the new
115  * entries to these idle blocks. Unfortunately, it is not easy to design some
116  * hash functions for such clever mapping, especially considering the insert/
117  * lookup performance.
118  *
119  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
120  * idle blocks pool. If need some new leaf node, it will try to take idle block
121  * from such pool with priority, in spite of how the IAM hash functions to map
122  * the entry.
123  *
124  * The idle blocks pool is organized as a series of tables, and each table
125  * can be described as following (on-disk format):
126  *
127  * +---------+---------+---------+---------+------+---------+-------+
128  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
129  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
130  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
131  * +---------+---------+---------+---------+------+---------+-------+
132  *
133  * The logic blk# for the first table is stored in the root node "idle_blocks".
134  *
135  */
136
137 #include <linux/module.h>
138 #include <linux/fs.h>
139 #include <linux/pagemap.h>
140 #include <linux/time.h>
141 #include <linux/fcntl.h>
142 #include <linux/stat.h>
143 #include <linux/string.h>
144 #include <linux/quotaops.h>
145 #include <linux/buffer_head.h>
146
147 #include <ldiskfs/ldiskfs.h>
148 #include <ldiskfs/xattr.h>
149 #undef ENTRY
150
151 #include "osd_internal.h"
152
153 #include <ldiskfs/acl.h>
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         list_add(&fmt->if_linkage, &iam_formats);
165 }
166
167 static struct buffer_head *
168 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
169 {
170         struct inode *inode = c->ic_object;
171         struct iam_idle_head *head;
172         struct buffer_head *bh;
173
174         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
175
176         if (blk == 0)
177                 return NULL;
178
179         bh = __ldiskfs_bread(NULL, inode, blk, 0);
180         if (IS_ERR_OR_NULL(bh)) {
181                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %ld\n",
182                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
183                        bh ? PTR_ERR(bh) : -EIO);
184                 c->ic_idle_failed = 1;
185                 if (bh == NULL)
186                         bh = ERR_PTR(-EIO);
187                 return bh;
188         }
189
190         head = (struct iam_idle_head *)(bh->b_data);
191         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
192                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
193                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
194                        le16_to_cpu(head->iih_magic));
195                 brelse(bh);
196                 c->ic_idle_failed = 1;
197                 return ERR_PTR(-EBADF);
198         }
199
200         return bh;
201 }
202
203 /*
204  * Determine format of given container. This is done by scanning list of
205  * registered formats and calling ->if_guess() method of each in turn.
206  */
207 static int iam_format_guess(struct iam_container *c)
208 {
209         int result;
210         struct iam_format *fmt;
211
212         /*
213          * XXX temporary initialization hook.
214          */
215         {
216                 static int initialized = 0;
217
218                 if (!initialized) {
219                         iam_lvar_format_init();
220                         iam_lfix_format_init();
221                         initialized = 1;
222                 }
223         }
224
225         result = -ENOENT;
226         list_for_each_entry(fmt, &iam_formats, if_linkage) {
227                 result = fmt->if_guess(c);
228                 if (result == 0)
229                         break;
230         }
231
232         if (result == 0) {
233                 struct buffer_head *bh;
234                 __u32 *idle_blocks;
235
236                 LASSERT(c->ic_root_bh != NULL);
237
238                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
239                                         c->ic_descr->id_root_gap +
240                                         sizeof(struct dx_countlimit));
241                 mutex_lock(&c->ic_idle_mutex);
242                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
243                 if (bh != NULL && IS_ERR(bh))
244                         result = PTR_ERR(bh);
245                 else
246                         c->ic_idle_bh = bh;
247                 mutex_unlock(&c->ic_idle_mutex);
248         }
249
250         return result;
251 }
252
253 /*
254  * Initialize container @c.
255  */
256 int iam_container_init(struct iam_container *c,
257                        struct iam_descr *descr, struct inode *inode)
258 {
259         memset(c, 0, sizeof *c);
260         c->ic_descr  = descr;
261         c->ic_object = inode;
262         init_rwsem(&c->ic_sem);
263         dynlock_init(&c->ic_tree_lock);
264         mutex_init(&c->ic_idle_mutex);
265         return 0;
266 }
267
268 /*
269  * Determine container format.
270  */
271 int iam_container_setup(struct iam_container *c)
272 {
273         return iam_format_guess(c);
274 }
275
276 /*
277  * Finalize container @c, release all resources.
278  */
279 void iam_container_fini(struct iam_container *c)
280 {
281         brelse(c->ic_idle_bh);
282         c->ic_idle_bh = NULL;
283         brelse(c->ic_root_bh);
284         c->ic_root_bh = NULL;
285 }
286
287 void iam_path_init(struct iam_path *path, struct iam_container *c,
288                    struct iam_path_descr *pd)
289 {
290         memset(path, 0, sizeof *path);
291         path->ip_container = c;
292         path->ip_frame = path->ip_frames;
293         path->ip_data = pd;
294         path->ip_leaf.il_path = path;
295 }
296
297 static void iam_leaf_fini(struct iam_leaf *leaf);
298
299 void iam_path_release(struct iam_path *path)
300 {
301         int i;
302
303         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
304                 if (path->ip_frames[i].bh != NULL) {
305                         path->ip_frames[i].at_shifted = 0;
306                         brelse(path->ip_frames[i].bh);
307                         path->ip_frames[i].bh = NULL;
308                 }
309         }
310 }
311
312 void iam_path_fini(struct iam_path *path)
313 {
314         iam_leaf_fini(&path->ip_leaf);
315         iam_path_release(path);
316 }
317
318
319 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
320 {
321         int i;
322
323         path->ipc_hinfo = &path->ipc_hinfo_area;
324         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
325                 path->ipc_descr.ipd_key_scratch[i] =
326                         (struct iam_ikey *)&path->ipc_scratch[i];
327
328         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
329 }
330
331 void iam_path_compat_fini(struct iam_path_compat *path)
332 {
333         iam_path_fini(&path->ipc_path);
334 }
335
336 /*
337  * Helper function initializing iam_path_descr and its key scratch area.
338  */
339 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
340 {
341         struct iam_path_descr *ipd;
342         void *karea;
343         int i;
344
345         ipd = area;
346         karea = ipd + 1;
347         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
348                 ipd->ipd_key_scratch[i] = karea;
349         return ipd;
350 }
351
352 void iam_ipd_free(struct iam_path_descr *ipd)
353 {
354 }
355
356 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
357                   handle_t *h, struct buffer_head **bh)
358 {
359         /* NB: it can be called by iam_lfix_guess() which is still at
360          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
361          * haven't been intialized yet.
362          * Also, we don't have this for IAM dir.
363          */
364         if (c->ic_root_bh != NULL &&
365             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
366                 get_bh(c->ic_root_bh);
367                 *bh = c->ic_root_bh;
368                 return 0;
369         }
370
371         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
372         if (IS_ERR(*bh))
373                 return PTR_ERR(*bh);
374
375         if (*bh == NULL)
376                 return -EIO;
377
378         return 0;
379 }
380
381 /*
382  * Return pointer to current leaf record. Pointer is valid while corresponding
383  * leaf node is locked and pinned.
384  */
385 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
386 {
387         return iam_leaf_ops(leaf)->rec(leaf);
388 }
389
390 /*
391  * Return pointer to the current leaf key. This function returns pointer to
392  * the key stored in node.
393  *
394  * Caller should assume that returned pointer is only valid while leaf node is
395  * pinned and locked.
396  */
397 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
398 {
399         return iam_leaf_ops(leaf)->key(leaf);
400 }
401
402 static int iam_leaf_key_size(const struct iam_leaf *leaf)
403 {
404         return iam_leaf_ops(leaf)->key_size(leaf);
405 }
406
407 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
408                                       struct iam_ikey *key)
409 {
410         return iam_leaf_ops(leaf)->ikey(leaf, key);
411 }
412
413 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
414                            const struct iam_key *key)
415 {
416         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
417 }
418
419 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
420                           const struct iam_key *key)
421 {
422         return iam_leaf_ops(leaf)->key_eq(leaf, key);
423 }
424
425 #if LDISKFS_INVARIANT_ON
426 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
427
428 static int iam_path_check(struct iam_path *p)
429 {
430         int i;
431         int result;
432         struct iam_frame *f;
433         struct iam_descr *param;
434
435         result = 1;
436         param = iam_path_descr(p);
437         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
438                 f = &p->ip_frames[i];
439                 if (f->bh != NULL) {
440                         result = dx_node_check(p, f);
441                         if (result)
442                                 result = !param->id_ops->id_node_check(p, f);
443                 }
444         }
445         if (result && p->ip_leaf.il_bh != NULL)
446                 result = 1;
447         if (result == 0)
448                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
449
450         return result;
451 }
452 #endif
453
454 static int iam_leaf_load(struct iam_path *path)
455 {
456         iam_ptr_t block;
457         int err;
458         struct iam_container *c;
459         struct buffer_head   *bh;
460         struct iam_leaf      *leaf;
461         struct iam_descr     *descr;
462
463         c     = path->ip_container;
464         leaf  = &path->ip_leaf;
465         descr = iam_path_descr(path);
466         block = path->ip_frame->leaf;
467         if (block == 0) {
468                 /* XXX bug 11027 */
469                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
470                        (long unsigned)path->ip_frame->leaf,
471                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
472                        path->ip_frames[0].bh, path->ip_frames[1].bh,
473                        path->ip_frames[2].bh);
474         }
475         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
476         if (err == 0) {
477                 leaf->il_bh = bh;
478                 leaf->il_curidx = block;
479                 err = iam_leaf_ops(leaf)->init(leaf);
480         }
481         return err;
482 }
483
484 static void iam_unlock_htree(struct iam_container *ic,
485                              struct dynlock_handle *lh)
486 {
487         if (lh != NULL)
488                 dynlock_unlock(&ic->ic_tree_lock, lh);
489 }
490
491
492 static void iam_leaf_unlock(struct iam_leaf *leaf)
493 {
494         if (leaf->il_lock != NULL) {
495                 iam_unlock_htree(iam_leaf_container(leaf),
496                                  leaf->il_lock);
497                 do_corr(schedule());
498                 leaf->il_lock = NULL;
499         }
500 }
501
502 static void iam_leaf_fini(struct iam_leaf *leaf)
503 {
504         if (leaf->il_path != NULL) {
505                 iam_leaf_unlock(leaf);
506                 iam_leaf_ops(leaf)->fini(leaf);
507                 if (leaf->il_bh) {
508                         brelse(leaf->il_bh);
509                         leaf->il_bh = NULL;
510                         leaf->il_curidx = 0;
511                 }
512         }
513 }
514
515 static void iam_leaf_start(struct iam_leaf *folio)
516 {
517         iam_leaf_ops(folio)->start(folio);
518 }
519
520 void iam_leaf_next(struct iam_leaf *folio)
521 {
522         iam_leaf_ops(folio)->next(folio);
523 }
524
525 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
526                              const struct iam_rec *rec)
527 {
528         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
529 }
530
531 static void iam_rec_del(struct iam_leaf *leaf, int shift)
532 {
533         iam_leaf_ops(leaf)->rec_del(leaf, shift);
534 }
535
536 int iam_leaf_at_end(const struct iam_leaf *leaf)
537 {
538         return iam_leaf_ops(leaf)->at_end(leaf);
539 }
540
541 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
542                            iam_ptr_t nr)
543 {
544         iam_leaf_ops(l)->split(l, bh, nr);
545 }
546
547 static inline int iam_leaf_empty(struct iam_leaf *l)
548 {
549         return iam_leaf_ops(l)->leaf_empty(l);
550 }
551
552 int iam_leaf_can_add(const struct iam_leaf *l,
553                      const struct iam_key *k, const struct iam_rec *r)
554 {
555         return iam_leaf_ops(l)->can_add(l, k, r);
556 }
557
558 static int iam_txn_dirty(handle_t *handle,
559                          struct iam_path *path, struct buffer_head *bh)
560 {
561         int result;
562
563         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
564         if (result != 0)
565                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
566         return result;
567 }
568
569 static int iam_txn_add(handle_t *handle,
570                        struct iam_path *path, struct buffer_head *bh)
571 {
572         int result;
573
574         result = ldiskfs_journal_get_write_access(handle, bh);
575         if (result != 0)
576                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
577         return result;
578 }
579
580 /***********************************************************************/
581 /* iterator interface                                                  */
582 /***********************************************************************/
583
584 static enum iam_it_state it_state(const struct iam_iterator *it)
585 {
586         return it->ii_state;
587 }
588
589 /*
590  * Helper function returning scratch key.
591  */
592 static struct iam_container *iam_it_container(const struct iam_iterator *it)
593 {
594         return it->ii_path.ip_container;
595 }
596
597 static inline int it_keycmp(const struct iam_iterator *it,
598                             const struct iam_key *k)
599 {
600         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
601 }
602
603 static inline int it_keyeq(const struct iam_iterator *it,
604                            const struct iam_key *k)
605 {
606         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
607 }
608
609 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
610 {
611         return iam_ikeycmp(it->ii_path.ip_container,
612                            iam_leaf_ikey(&it->ii_path.ip_leaf,
613                                          iam_path_ikey(&it->ii_path, 0)), ik);
614 }
615
616 static inline int it_at_rec(const struct iam_iterator *it)
617 {
618         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
619 }
620
621 static inline int it_before(const struct iam_iterator *it)
622 {
623         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
624 }
625
626 /*
627  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
628  * with exactly the same key as asked is found.
629  */
630 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
631 {
632         int result;
633
634         result = iam_it_get(it, k);
635         if (result > 0)
636                 result = 0;
637         else if (result == 0)
638                 /*
639                  * Return -ENOENT if cursor is located above record with a key
640                  * different from one specified, or in the empty leaf.
641                  *
642                  * XXX returning -ENOENT only works if iam_it_get() never
643                  * returns -ENOENT as a legitimate error.
644                  */
645                 result = -ENOENT;
646         return result;
647 }
648
649 void iam_container_write_lock(struct iam_container *ic)
650 {
651         down_write(&ic->ic_sem);
652 }
653
654 void iam_container_write_unlock(struct iam_container *ic)
655 {
656         up_write(&ic->ic_sem);
657 }
658
659 void iam_container_read_lock(struct iam_container *ic)
660 {
661         down_read(&ic->ic_sem);
662 }
663
664 void iam_container_read_unlock(struct iam_container *ic)
665 {
666         up_read(&ic->ic_sem);
667 }
668
669 /*
670  * Initialize iterator to IAM_IT_DETACHED state.
671  *
672  * postcondition: it_state(it) == IAM_IT_DETACHED
673  */
674 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
675                  struct iam_path_descr *pd)
676 {
677         memset(it, 0, sizeof *it);
678         it->ii_flags  = flags;
679         it->ii_state  = IAM_IT_DETACHED;
680         iam_path_init(&it->ii_path, c, pd);
681         return 0;
682 }
683
684 /*
685  * Finalize iterator and release all resources.
686  *
687  * precondition: it_state(it) == IAM_IT_DETACHED
688  */
689 void iam_it_fini(struct iam_iterator *it)
690 {
691         assert_corr(it_state(it) == IAM_IT_DETACHED);
692         iam_path_fini(&it->ii_path);
693 }
694
695 /*
696  * this locking primitives are used to protect parts
697  * of dir's htree. protection unit is block: leaf or index
698  */
699 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
700                                              unsigned long value,
701                                              enum dynlock_type lt)
702 {
703         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
704 }
705
706 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
707 {
708         struct iam_frame *f;
709
710         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
711                 do_corr(schedule());
712                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
713                 if (*lh == NULL)
714                         return -ENOMEM;
715         }
716         return 0;
717 }
718
719 /*
720  * Fast check for frame consistency.
721  */
722 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
723 {
724         struct iam_container *bag;
725         struct iam_entry *next;
726         struct iam_entry *last;
727         struct iam_entry *entries;
728         struct iam_entry *at;
729
730         bag     = path->ip_container;
731         at      = frame->at;
732         entries = frame->entries;
733         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
734
735         if (unlikely(at > last))
736                 return -EAGAIN;
737
738         if (unlikely(dx_get_block(path, at) != frame->leaf))
739                 return -EAGAIN;
740
741         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
742                                  path->ip_ikey_target) > 0))
743                 return -EAGAIN;
744
745         next = iam_entry_shift(path, at, +1);
746         if (next <= last) {
747                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
748                                          path->ip_ikey_target) <= 0))
749                         return -EAGAIN;
750         }
751         return 0;
752 }
753
754 int dx_index_is_compat(struct iam_path *path)
755 {
756         return iam_path_descr(path) == NULL;
757 }
758
759 /*
760  * dx_find_position
761  *
762  * search position of specified hash in index
763  *
764  */
765
766 static struct iam_entry *iam_find_position(struct iam_path *path,
767                                            struct iam_frame *frame)
768 {
769         int count;
770         struct iam_entry *p;
771         struct iam_entry *q;
772         struct iam_entry *m;
773
774         count = dx_get_count(frame->entries);
775         assert_corr(count && count <= dx_get_limit(frame->entries));
776         p = iam_entry_shift(path, frame->entries,
777                             dx_index_is_compat(path) ? 1 : 2);
778         q = iam_entry_shift(path, frame->entries, count - 1);
779         while (p <= q) {
780                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
781                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
782                                 path->ip_ikey_target) > 0)
783                         q = iam_entry_shift(path, m, -1);
784                 else
785                         p = iam_entry_shift(path, m, +1);
786         }
787         return iam_entry_shift(path, p, -1);
788 }
789
790
791
792 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
793 {
794         return dx_get_block(path, iam_find_position(path, frame));
795 }
796
797 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
798                     const struct iam_ikey *key, iam_ptr_t ptr)
799 {
800         struct iam_entry *entries = frame->entries;
801         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
802         int count = dx_get_count(entries);
803
804         /*
805          * Unfortunately we cannot assert this, as this function is sometimes
806          * called by VFS under i_sem and without pdirops lock.
807          */
808         assert_corr(1 || iam_frame_is_locked(path, frame));
809         assert_corr(count < dx_get_limit(entries));
810         assert_corr(frame->at < iam_entry_shift(path, entries, count));
811         assert_inv(dx_node_check(path, frame));
812
813         memmove(iam_entry_shift(path, new, 1), new,
814                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
815         dx_set_ikey(path, new, key);
816         dx_set_block(path, new, ptr);
817         dx_set_count(entries, count + 1);
818         assert_inv(dx_node_check(path, frame));
819 }
820
821 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
822                          const struct iam_ikey *key, iam_ptr_t ptr)
823 {
824         iam_lock_bh(frame->bh);
825         iam_insert_key(path, frame, key, ptr);
826         iam_unlock_bh(frame->bh);
827 }
828 /*
829  * returns 0 if path was unchanged, -EAGAIN otherwise.
830  */
831 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
832 {
833         int equal;
834
835         iam_lock_bh(frame->bh);
836         equal = iam_check_fast(path, frame) == 0 ||
837                 frame->leaf == iam_find_ptr(path, frame);
838         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
839         iam_unlock_bh(frame->bh);
840
841         return equal ? 0 : -EAGAIN;
842 }
843
844 static int iam_lookup_try(struct iam_path *path)
845 {
846         u32 ptr;
847         int err = 0;
848         int i;
849
850         struct iam_descr *param;
851         struct iam_frame *frame;
852         struct iam_container *c;
853
854         param = iam_path_descr(path);
855         c = path->ip_container;
856
857         ptr = param->id_ops->id_root_ptr(c);
858         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
859              ++frame, ++i) {
860                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
861                                                   &frame->bh);
862                 do_corr(schedule());
863
864                 iam_lock_bh(frame->bh);
865                 /*
866                  * node must be initialized under bh lock because concurrent
867                  * creation procedure may change it and iam_lookup_try() will
868                  * see obsolete tree height. -bzzz
869                  */
870                 if (err != 0)
871                         break;
872
873                 if (LDISKFS_INVARIANT_ON) {
874                         err = param->id_ops->id_node_check(path, frame);
875                         if (err != 0)
876                                 break;
877                 }
878
879                 err = param->id_ops->id_node_load(path, frame);
880                 if (err != 0)
881                         break;
882
883                 assert_inv(dx_node_check(path, frame));
884                 /*
885                  * splitting may change root index block and move hash we're
886                  * looking for into another index block so, we have to check
887                  * this situation and repeat from begining if path got changed
888                  * -bzzz
889                  */
890                 if (i > 0) {
891                         err = iam_check_path(path, frame - 1);
892                         if (err != 0)
893                                 break;
894                 }
895
896                 frame->at = iam_find_position(path, frame);
897                 frame->curidx = ptr;
898                 frame->leaf = ptr = dx_get_block(path, frame->at);
899
900                 iam_unlock_bh(frame->bh);
901                 do_corr(schedule());
902         }
903         if (err != 0)
904                 iam_unlock_bh(frame->bh);
905         path->ip_frame = --frame;
906         return err;
907 }
908
909 static int __iam_path_lookup(struct iam_path *path)
910 {
911         int err;
912         int i;
913
914         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
915                 assert(path->ip_frames[i].bh == NULL);
916
917         do {
918                 err = iam_lookup_try(path);
919                 do_corr(schedule());
920                 if (err != 0)
921                         iam_path_fini(path);
922         } while (err == -EAGAIN);
923
924         return err;
925 }
926
927 /*
928  * returns 0 if path was unchanged, -EAGAIN otherwise.
929  */
930 static int iam_check_full_path(struct iam_path *path, int search)
931 {
932         struct iam_frame *bottom;
933         struct iam_frame *scan;
934         int i;
935         int result;
936
937         do_corr(schedule());
938
939         for (bottom = path->ip_frames, i = 0;
940              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
941                 ; /* find last filled in frame */
942         }
943
944         /*
945          * Lock frames, bottom to top.
946          */
947         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
948                 iam_lock_bh(scan->bh);
949         /*
950          * Check them top to bottom.
951          */
952         result = 0;
953         for (scan = path->ip_frames; scan < bottom; ++scan) {
954                 struct iam_entry *pos;
955
956                 if (search) {
957                         if (iam_check_fast(path, scan) == 0)
958                                 continue;
959
960                         pos = iam_find_position(path, scan);
961                         if (scan->leaf != dx_get_block(path, pos)) {
962                                 result = -EAGAIN;
963                                 break;
964                         }
965                         scan->at = pos;
966                 } else {
967                         pos = iam_entry_shift(path, scan->entries,
968                                               dx_get_count(scan->entries) - 1);
969                         if (scan->at > pos ||
970                             scan->leaf != dx_get_block(path, scan->at)) {
971                                 result = -EAGAIN;
972                                 break;
973                         }
974                 }
975         }
976
977         /*
978          * Unlock top to bottom.
979          */
980         for (scan = path->ip_frames; scan < bottom; ++scan)
981                 iam_unlock_bh(scan->bh);
982         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
983         do_corr(schedule());
984
985         return result;
986 }
987
988
989 /*
990  * Performs path lookup and returns with found leaf (if any) locked by htree
991  * lock.
992  */
993 static int iam_lookup_lock(struct iam_path *path,
994                            struct dynlock_handle **dl, enum dynlock_type lt)
995 {
996         int result;
997
998         while ((result = __iam_path_lookup(path)) == 0) {
999                 do_corr(schedule());
1000                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1001                                      lt);
1002                 if (*dl == NULL) {
1003                         iam_path_fini(path);
1004                         result = -ENOMEM;
1005                         break;
1006                 }
1007                 do_corr(schedule());
1008                 /*
1009                  * while locking leaf we just found may get split so we need
1010                  * to check this -bzzz
1011                  */
1012                 if (iam_check_full_path(path, 1) == 0)
1013                         break;
1014                 iam_unlock_htree(path->ip_container, *dl);
1015                 *dl = NULL;
1016                 iam_path_fini(path);
1017         }
1018         return result;
1019 }
1020 /*
1021  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1022  * node.
1023  */
1024 static int iam_path_lookup(struct iam_path *path, int index)
1025 {
1026         struct iam_container *c;
1027         struct iam_leaf  *leaf;
1028         int result;
1029
1030         c = path->ip_container;
1031         leaf = &path->ip_leaf;
1032         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1033         assert_inv(iam_path_check(path));
1034         do_corr(schedule());
1035         if (result == 0) {
1036                 result = iam_leaf_load(path);
1037                 if (result == 0) {
1038                         do_corr(schedule());
1039                         if (index)
1040                                 result = iam_leaf_ops(leaf)->
1041                                         ilookup(leaf, path->ip_ikey_target);
1042                         else
1043                                 result = iam_leaf_ops(leaf)->
1044                                         lookup(leaf, path->ip_key_target);
1045                         do_corr(schedule());
1046                 }
1047                 if (result < 0)
1048                         iam_leaf_unlock(leaf);
1049         }
1050         return result;
1051 }
1052
1053 /*
1054  * Common part of iam_it_{i,}get().
1055  */
1056 static int __iam_it_get(struct iam_iterator *it, int index)
1057 {
1058         int result;
1059         assert_corr(it_state(it) == IAM_IT_DETACHED);
1060
1061         result = iam_path_lookup(&it->ii_path, index);
1062         if (result >= 0) {
1063                 int collision;
1064
1065                 collision = result & IAM_LOOKUP_LAST;
1066                 switch (result & ~IAM_LOOKUP_LAST) {
1067                 case IAM_LOOKUP_EXACT:
1068                         result = +1;
1069                         it->ii_state = IAM_IT_ATTACHED;
1070                         break;
1071                 case IAM_LOOKUP_OK:
1072                         result = 0;
1073                         it->ii_state = IAM_IT_ATTACHED;
1074                         break;
1075                 case IAM_LOOKUP_BEFORE:
1076                 case IAM_LOOKUP_EMPTY:
1077                         result = 0;
1078                         it->ii_state = IAM_IT_SKEWED;
1079                         break;
1080                 default:
1081                         assert(0);
1082                 }
1083                 result |= collision;
1084         }
1085         /*
1086          * See iam_it_get_exact() for explanation.
1087          */
1088         assert_corr(result != -ENOENT);
1089         return result;
1090 }
1091
1092 /*
1093  * Correct hash, but not the same key was found, iterate through hash
1094  * collision chain, looking for correct record.
1095  */
1096 static int iam_it_collision(struct iam_iterator *it)
1097 {
1098         int result;
1099
1100         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1101
1102         while ((result = iam_it_next(it)) == 0) {
1103                 do_corr(schedule());
1104                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1105                         return -ENOENT;
1106                 if (it_keyeq(it, it->ii_path.ip_key_target))
1107                         return 0;
1108         }
1109         return result;
1110 }
1111
1112 /*
1113  * Attach iterator. After successful completion, @it points to record with
1114  * least key not larger than @k.
1115  *
1116  * Return value: 0: positioned on existing record,
1117  *             +ve: exact position found,
1118  *             -ve: error.
1119  *
1120  * precondition:  it_state(it) == IAM_IT_DETACHED
1121  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1122  *                     it_keycmp(it, k) <= 0)
1123  */
1124 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1125 {
1126         int result;
1127         assert_corr(it_state(it) == IAM_IT_DETACHED);
1128
1129         it->ii_path.ip_ikey_target = NULL;
1130         it->ii_path.ip_key_target  = k;
1131
1132         result = __iam_it_get(it, 0);
1133
1134         if (result == IAM_LOOKUP_LAST) {
1135                 result = iam_it_collision(it);
1136                 if (result != 0) {
1137                         iam_it_put(it);
1138                         iam_it_fini(it);
1139                         result = __iam_it_get(it, 0);
1140                 } else
1141                         result = +1;
1142         }
1143         if (result > 0)
1144                 result &= ~IAM_LOOKUP_LAST;
1145
1146         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1147         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1148                          it_keycmp(it, k) <= 0));
1149         return result;
1150 }
1151
1152 /*
1153  * Attach iterator by index key.
1154  */
1155 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1156 {
1157         assert_corr(it_state(it) == IAM_IT_DETACHED);
1158
1159         it->ii_path.ip_ikey_target = k;
1160         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1161 }
1162
1163 /*
1164  * Attach iterator, and assure it points to the record (not skewed).
1165  *
1166  * Return value: 0: positioned on existing record,
1167  *             +ve: exact position found,
1168  *             -ve: error.
1169  *
1170  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1171  *                !(it->ii_flags&IAM_IT_WRITE)
1172  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1173  */
1174 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1175 {
1176         int result;
1177         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1178                     !(it->ii_flags&IAM_IT_WRITE));
1179         result = iam_it_get(it, k);
1180         if (result == 0) {
1181                 if (it_state(it) != IAM_IT_ATTACHED) {
1182                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1183                         result = iam_it_next(it);
1184                 }
1185         }
1186         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1187         return result;
1188 }
1189
1190 /*
1191  * Duplicates iterator.
1192  *
1193  * postcondition: it_state(dst) == it_state(src) &&
1194  *                iam_it_container(dst) == iam_it_container(src) &&
1195  *                dst->ii_flags = src->ii_flags &&
1196  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1197  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1198  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1199  */
1200 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1201 {
1202         dst->ii_flags     = src->ii_flags;
1203         dst->ii_state     = src->ii_state;
1204         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1205         /*
1206          * XXX: duplicate lock.
1207          */
1208         assert_corr(it_state(dst) == it_state(src));
1209         assert_corr(iam_it_container(dst) == iam_it_container(src));
1210         assert_corr(dst->ii_flags = src->ii_flags);
1211         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1212                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1213                     iam_it_key_get(dst) == iam_it_key_get(src)));
1214
1215 }
1216
1217 /*
1218  * Detach iterator. Does nothing it detached state.
1219  *
1220  * postcondition: it_state(it) == IAM_IT_DETACHED
1221  */
1222 void iam_it_put(struct iam_iterator *it)
1223 {
1224         if (it->ii_state != IAM_IT_DETACHED) {
1225                 it->ii_state = IAM_IT_DETACHED;
1226                 iam_leaf_fini(&it->ii_path.ip_leaf);
1227         }
1228 }
1229
1230 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1231                                         struct iam_ikey *ikey);
1232
1233
1234 /*
1235  * This function increments the frame pointer to search the next leaf
1236  * block, and reads in the necessary intervening nodes if the search
1237  * should be necessary.  Whether or not the search is necessary is
1238  * controlled by the hash parameter.  If the hash value is even, then
1239  * the search is only continued if the next block starts with that
1240  * hash value.  This is used if we are searching for a specific file.
1241  *
1242  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1243  *
1244  * This function returns 1 if the caller should continue to search,
1245  * or 0 if it should not.  If there is an error reading one of the
1246  * index blocks, it will a negative error code.
1247  *
1248  * If start_hash is non-null, it will be filled in with the starting
1249  * hash of the next page.
1250  */
1251 static int iam_htree_advance(struct inode *dir, __u32 hash,
1252                               struct iam_path *path, __u32 *start_hash,
1253                               int compat)
1254 {
1255         struct iam_frame *p;
1256         struct buffer_head *bh;
1257         int err, num_frames = 0;
1258         __u32 bhash;
1259
1260         p = path->ip_frame;
1261         /*
1262          * Find the next leaf page by incrementing the frame pointer.
1263          * If we run out of entries in the interior node, loop around and
1264          * increment pointer in the parent node.  When we break out of
1265          * this loop, num_frames indicates the number of interior
1266          * nodes need to be read.
1267          */
1268         while (1) {
1269                 do_corr(schedule());
1270                 iam_lock_bh(p->bh);
1271                 if (p->at_shifted)
1272                         p->at_shifted = 0;
1273                 else
1274                         p->at = iam_entry_shift(path, p->at, +1);
1275                 if (p->at < iam_entry_shift(path, p->entries,
1276                                             dx_get_count(p->entries))) {
1277                         p->leaf = dx_get_block(path, p->at);
1278                         iam_unlock_bh(p->bh);
1279                         break;
1280                 }
1281                 iam_unlock_bh(p->bh);
1282                 if (p == path->ip_frames)
1283                         return 0;
1284                 num_frames++;
1285                 --p;
1286         }
1287
1288         if (compat) {
1289                 /*
1290                  * Htree hash magic.
1291                  */
1292         /*
1293          * If the hash is 1, then continue only if the next page has a
1294          * continuation hash of any value.  This is used for readdir
1295          * handling.  Otherwise, check to see if the hash matches the
1296          * desired contiuation hash.  If it doesn't, return since
1297          * there's no point to read in the successive index pages.
1298          */
1299                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1300         if (start_hash)
1301                 *start_hash = bhash;
1302         if ((hash & 1) == 0) {
1303                 if ((bhash & ~1) != hash)
1304                         return 0;
1305         }
1306         }
1307         /*
1308          * If the hash is HASH_NB_ALWAYS, we always go to the next
1309          * block so no check is necessary
1310          */
1311         while (num_frames--) {
1312                 iam_ptr_t idx;
1313
1314                 do_corr(schedule());
1315                 iam_lock_bh(p->bh);
1316                 idx = p->leaf = dx_get_block(path, p->at);
1317                 iam_unlock_bh(p->bh);
1318                 err = iam_path_descr(path)->id_ops->
1319                         id_node_read(path->ip_container, idx, NULL, &bh);
1320                 if (err != 0)
1321                         return err; /* Failure */
1322                 ++p;
1323                 brelse(p->bh);
1324                 assert_corr(p->bh != bh);
1325                 p->bh = bh;
1326                 p->entries = dx_node_get_entries(path, p);
1327                 p->at = iam_entry_shift(path, p->entries, !compat);
1328                 assert_corr(p->curidx != idx);
1329                 p->curidx = idx;
1330                 iam_lock_bh(p->bh);
1331                 assert_corr(p->leaf != dx_get_block(path, p->at));
1332                 p->leaf = dx_get_block(path, p->at);
1333                 iam_unlock_bh(p->bh);
1334                 assert_inv(dx_node_check(path, p));
1335         }
1336         return 1;
1337 }
1338
1339
1340 static inline int iam_index_advance(struct iam_path *path)
1341 {
1342         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1343 }
1344
1345 static void iam_unlock_array(struct iam_container *ic,
1346                              struct dynlock_handle **lh)
1347 {
1348         int i;
1349
1350         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1351                 if (*lh != NULL) {
1352                         iam_unlock_htree(ic, *lh);
1353                         *lh = NULL;
1354                 }
1355         }
1356 }
1357 /*
1358  * Advance index part of @path to point to the next leaf. Returns 1 on
1359  * success, 0, when end of container was reached. Leaf node is locked.
1360  */
1361 int iam_index_next(struct iam_container *c, struct iam_path *path)
1362 {
1363         iam_ptr_t cursor;
1364         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1365         int result;
1366         struct inode *object;
1367
1368         /*
1369          * Locking for iam_index_next()... is to be described.
1370          */
1371
1372         object = c->ic_object;
1373         cursor = path->ip_frame->leaf;
1374
1375         while (1) {
1376                 result = iam_index_lock(path, lh);
1377                 do_corr(schedule());
1378                 if (result < 0)
1379                         break;
1380
1381                 result = iam_check_full_path(path, 0);
1382                 if (result == 0 && cursor == path->ip_frame->leaf) {
1383                         result = iam_index_advance(path);
1384
1385                         assert_corr(result == 0 ||
1386                                     cursor != path->ip_frame->leaf);
1387                         break;
1388                 }
1389                 do {
1390                         iam_unlock_array(c, lh);
1391
1392                         iam_path_release(path);
1393                         do_corr(schedule());
1394
1395                         result = __iam_path_lookup(path);
1396                         if (result < 0)
1397                                 break;
1398
1399                         while (path->ip_frame->leaf != cursor) {
1400                                 do_corr(schedule());
1401
1402                                 result = iam_index_lock(path, lh);
1403                                 do_corr(schedule());
1404                                 if (result < 0)
1405                                         break;
1406
1407                                 result = iam_check_full_path(path, 0);
1408                                 if (result != 0)
1409                                         break;
1410
1411                                 result = iam_index_advance(path);
1412                                 if (result == 0) {
1413                                         CERROR("cannot find cursor : %u\n",
1414                                                 cursor);
1415                                         result = -EIO;
1416                                 }
1417                                 if (result < 0)
1418                                         break;
1419                                 result = iam_check_full_path(path, 0);
1420                                 if (result != 0)
1421                                         break;
1422                                 iam_unlock_array(c, lh);
1423                         }
1424                 } while (result == -EAGAIN);
1425                 if (result < 0)
1426                         break;
1427         }
1428         iam_unlock_array(c, lh);
1429         return result;
1430 }
1431
1432 /*
1433  * Move iterator one record right.
1434  *
1435  * Return value: 0: success,
1436  *              +1: end of container reached
1437  *             -ve: error
1438  *
1439  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1440  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1441  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1442  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1443  */
1444 int iam_it_next(struct iam_iterator *it)
1445 {
1446         int result;
1447         struct iam_path      *path;
1448         struct iam_leaf      *leaf;
1449         do_corr(struct iam_ikey *ik_orig);
1450
1451         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1452         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1453                     it_state(it) == IAM_IT_SKEWED);
1454
1455         path = &it->ii_path;
1456         leaf = &path->ip_leaf;
1457
1458         assert_corr(iam_leaf_is_locked(leaf));
1459
1460         result = 0;
1461         do_corr(ik_orig = it_at_rec(it) ?
1462                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1463         if (it_before(it)) {
1464                 assert_corr(!iam_leaf_at_end(leaf));
1465                 it->ii_state = IAM_IT_ATTACHED;
1466         } else {
1467                 if (!iam_leaf_at_end(leaf))
1468                         /* advance within leaf node */
1469                         iam_leaf_next(leaf);
1470                 /*
1471                  * multiple iterations may be necessary due to empty leaves.
1472                  */
1473                 while (result == 0 && iam_leaf_at_end(leaf)) {
1474                         do_corr(schedule());
1475                         /* advance index portion of the path */
1476                         result = iam_index_next(iam_it_container(it), path);
1477                         assert_corr(iam_leaf_is_locked(leaf));
1478                         if (result == 1) {
1479                                 struct dynlock_handle *lh;
1480                                 lh = iam_lock_htree(iam_it_container(it),
1481                                                     path->ip_frame->leaf,
1482                                                     DLT_WRITE);
1483                                 if (lh != NULL) {
1484                                         iam_leaf_fini(leaf);
1485                                         leaf->il_lock = lh;
1486                                         result = iam_leaf_load(path);
1487                                         if (result == 0)
1488                                                 iam_leaf_start(leaf);
1489                                 } else
1490                                         result = -ENOMEM;
1491                         } else if (result == 0)
1492                                 /* end of container reached */
1493                                 result = +1;
1494                         if (result != 0)
1495                                 iam_it_put(it);
1496                 }
1497                 if (result == 0)
1498                         it->ii_state = IAM_IT_ATTACHED;
1499         }
1500         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1501         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1502         assert_corr(ergo(result == 0 && ik_orig != NULL,
1503                          it_ikeycmp(it, ik_orig) >= 0));
1504         return result;
1505 }
1506
1507 /*
1508  * Return pointer to the record under iterator.
1509  *
1510  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1511  * postcondition: it_state(it) == IAM_IT_ATTACHED
1512  */
1513 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1514 {
1515         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1516         assert_corr(it_at_rec(it));
1517         return iam_leaf_rec(&it->ii_path.ip_leaf);
1518 }
1519
1520 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1521 {
1522         struct iam_leaf *folio;
1523
1524         folio = &it->ii_path.ip_leaf;
1525         iam_leaf_ops(folio)->rec_set(folio, r);
1526 }
1527
1528 /*
1529  * Replace contents of record under iterator.
1530  *
1531  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1532  *                it->ii_flags&IAM_IT_WRITE
1533  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1534  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1535  */
1536 int iam_it_rec_set(handle_t *h,
1537                    struct iam_iterator *it, const struct iam_rec *r)
1538 {
1539         int result;
1540         struct iam_path *path;
1541         struct buffer_head *bh;
1542
1543         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1544                     it->ii_flags&IAM_IT_WRITE);
1545         assert_corr(it_at_rec(it));
1546
1547         path = &it->ii_path;
1548         bh   = path->ip_leaf.il_bh;
1549         result = iam_txn_add(h, path, bh);
1550         if (result == 0) {
1551                 iam_it_reccpy(it, r);
1552                 result = iam_txn_dirty(h, path, bh);
1553         }
1554         return result;
1555 }
1556
1557 /*
1558  * Return pointer to the index key under iterator.
1559  *
1560  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1561  *                it_state(it) == IAM_IT_SKEWED
1562  */
1563 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1564                                         struct iam_ikey *ikey)
1565 {
1566         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1567                     it_state(it) == IAM_IT_SKEWED);
1568         assert_corr(it_at_rec(it));
1569         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1570 }
1571
1572 /*
1573  * Return pointer to the key under iterator.
1574  *
1575  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1576  *                it_state(it) == IAM_IT_SKEWED
1577  */
1578 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1579 {
1580         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1581                     it_state(it) == IAM_IT_SKEWED);
1582         assert_corr(it_at_rec(it));
1583         return iam_leaf_key(&it->ii_path.ip_leaf);
1584 }
1585
1586 /*
1587  * Return size of key under iterator (in bytes)
1588  *
1589  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1590  *                it_state(it) == IAM_IT_SKEWED
1591  */
1592 int iam_it_key_size(const struct iam_iterator *it)
1593 {
1594         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1595                     it_state(it) == IAM_IT_SKEWED);
1596         assert_corr(it_at_rec(it));
1597         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1598 }
1599
1600 static struct buffer_head *
1601 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1602 {
1603         struct inode *inode = c->ic_object;
1604         struct buffer_head *bh = NULL;
1605         struct iam_idle_head *head;
1606         struct buffer_head *idle;
1607         __u32 *idle_blocks;
1608         __u16 count;
1609
1610         if (c->ic_idle_bh == NULL)
1611                 goto newblock;
1612
1613         mutex_lock(&c->ic_idle_mutex);
1614         if (unlikely(c->ic_idle_bh == NULL)) {
1615                 mutex_unlock(&c->ic_idle_mutex);
1616                 goto newblock;
1617         }
1618
1619         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1620         count = le16_to_cpu(head->iih_count);
1621         if (count > 0) {
1622                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1623                 if (*e != 0)
1624                         goto fail;
1625
1626                 --count;
1627                 *b = le32_to_cpu(head->iih_blks[count]);
1628                 head->iih_count = cpu_to_le16(count);
1629                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1630                 if (*e != 0)
1631                         goto fail;
1632
1633                 mutex_unlock(&c->ic_idle_mutex);
1634                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1635                 if (IS_ERR_OR_NULL(bh)) {
1636                         if (IS_ERR(bh))
1637                                 *e = PTR_ERR(bh);
1638                         else
1639                                 *e = -EIO;
1640                         return NULL;
1641                 }
1642                 goto got;
1643         }
1644
1645         /* The block itself which contains the iam_idle_head is
1646          * also an idle block, and can be used as the new node. */
1647         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1648                                 c->ic_descr->id_root_gap +
1649                                 sizeof(struct dx_countlimit));
1650         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1651         if (*e != 0)
1652                 goto fail;
1653
1654         *b = le32_to_cpu(*idle_blocks);
1655         iam_lock_bh(c->ic_root_bh);
1656         *idle_blocks = head->iih_next;
1657         iam_unlock_bh(c->ic_root_bh);
1658         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1659         if (*e != 0) {
1660                 iam_lock_bh(c->ic_root_bh);
1661                 *idle_blocks = cpu_to_le32(*b);
1662                 iam_unlock_bh(c->ic_root_bh);
1663                 goto fail;
1664         }
1665
1666         bh = c->ic_idle_bh;
1667         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1668         if (idle != NULL && IS_ERR(idle)) {
1669                 *e = PTR_ERR(idle);
1670                 c->ic_idle_bh = NULL;
1671                 brelse(bh);
1672                 goto fail;
1673         }
1674
1675         c->ic_idle_bh = idle;
1676         mutex_unlock(&c->ic_idle_mutex);
1677
1678 got:
1679         /* get write access for the found buffer head */
1680         *e = ldiskfs_journal_get_write_access(h, bh);
1681         if (*e != 0) {
1682                 brelse(bh);
1683                 bh = NULL;
1684                 ldiskfs_std_error(inode->i_sb, *e);
1685         } else {
1686                 /* Clear the reused node as new node does. */
1687                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1688                 set_buffer_uptodate(bh);
1689         }
1690         return bh;
1691
1692 newblock:
1693         bh = osd_ldiskfs_append(h, inode, b);
1694         if (IS_ERR(bh)) {
1695                 *e = PTR_ERR(bh);
1696                 bh = NULL;
1697         }
1698
1699         return bh;
1700
1701 fail:
1702         mutex_unlock(&c->ic_idle_mutex);
1703         ldiskfs_std_error(inode->i_sb, *e);
1704         return NULL;
1705 }
1706
1707 /*
1708  * Insertion of new record. Interaction with jbd during non-trivial case (when
1709  * split happens) is as following:
1710  *
1711  *  - new leaf node is involved into transaction by iam_new_node();
1712  *
1713  *  - old leaf node is involved into transaction by iam_add_rec();
1714  *
1715  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1716  *
1717  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1718  *  iam_new_leaf();
1719  *
1720  *  - split index nodes are involved into transaction and marked dirty by
1721  *  split_index_node().
1722  *
1723  *  - "safe" index node, which is no split, but where new pointer is inserted
1724  *  is involved into transaction and marked dirty by split_index_node().
1725  *
1726  *  - index node where pointer to new leaf is inserted is involved into
1727  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1728  *
1729  *  - inode is marked dirty by iam_add_rec().
1730  *
1731  */
1732
1733 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1734 {
1735         int err;
1736         iam_ptr_t blknr;
1737         struct buffer_head   *new_leaf;
1738         struct buffer_head   *old_leaf;
1739         struct iam_container *c;
1740         struct inode         *obj;
1741         struct iam_path      *path;
1742
1743         c = iam_leaf_container(leaf);
1744         path = leaf->il_path;
1745
1746         obj = c->ic_object;
1747         new_leaf = iam_new_node(handle, c, &blknr, &err);
1748         do_corr(schedule());
1749         if (new_leaf != NULL) {
1750                 struct dynlock_handle *lh;
1751
1752                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1753                 do_corr(schedule());
1754                 if (lh != NULL) {
1755                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1756                         do_corr(schedule());
1757                         old_leaf = leaf->il_bh;
1758                         iam_leaf_split(leaf, &new_leaf, blknr);
1759                         if (old_leaf != leaf->il_bh) {
1760                                 /*
1761                                  * Switched to the new leaf.
1762                                  */
1763                                 iam_leaf_unlock(leaf);
1764                                 leaf->il_lock = lh;
1765                                 path->ip_frame->leaf = blknr;
1766                         } else
1767                                 iam_unlock_htree(path->ip_container, lh);
1768                         do_corr(schedule());
1769                         err = iam_txn_dirty(handle, path, new_leaf);
1770                         if (err == 0)
1771                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1772                         do_corr(schedule());
1773                 } else
1774                         err = -ENOMEM;
1775                 brelse(new_leaf);
1776         }
1777         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1778         return err;
1779 }
1780
1781 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1782 {
1783         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1784 }
1785
1786 static int iam_shift_entries(struct iam_path *path,
1787                          struct iam_frame *frame, unsigned count,
1788                          struct iam_entry *entries, struct iam_entry *entries2,
1789                          u32 newblock)
1790 {
1791         unsigned count1;
1792         unsigned count2;
1793         int delta;
1794
1795         struct iam_frame *parent = frame - 1;
1796         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1797
1798         delta = dx_index_is_compat(path) ? 0 : +1;
1799
1800         count1 = count/2 + delta;
1801         count2 = count - count1;
1802         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1803
1804         dxtrace(printk("Split index %d/%d\n", count1, count2));
1805
1806         memcpy((char *) iam_entry_shift(path, entries2, delta),
1807                (char *) iam_entry_shift(path, entries, count1),
1808                count2 * iam_entry_size(path));
1809
1810         dx_set_count(entries2, count2 + delta);
1811         dx_set_limit(entries2, dx_node_limit(path));
1812
1813         /*
1814          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1815          * level index in root index, then we insert new index here and set
1816          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1817          * index w/o hash it looks for. the solution is to check root index
1818          * after we locked just founded 2nd level index -bzzz
1819          */
1820         iam_insert_key_lock(path, parent, pivot, newblock);
1821
1822         /*
1823          * now old and new 2nd level index blocks contain all pointers, so
1824          * dx_probe() may find it in the both.  it's OK -bzzz
1825          */
1826         iam_lock_bh(frame->bh);
1827         dx_set_count(entries, count1);
1828         iam_unlock_bh(frame->bh);
1829
1830         /*
1831          * now old 2nd level index block points to first half of leafs. it's
1832          * importand that dx_probe() must check root index block for changes
1833          * under dx_lock_bh(frame->bh) -bzzz
1834          */
1835
1836         return count1;
1837 }
1838
1839
1840 int split_index_node(handle_t *handle, struct iam_path *path,
1841                      struct dynlock_handle **lh)
1842 {
1843
1844         struct iam_entry *entries;   /* old block contents */
1845         struct iam_entry *entries2;  /* new block contents */
1846         struct iam_frame *frame, *safe;
1847         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1848         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1849         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1850         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1851         struct inode *dir = iam_path_obj(path);
1852         struct iam_descr *descr;
1853         int nr_splet;
1854         int i, err;
1855
1856         descr = iam_path_descr(path);
1857         /*
1858          * Algorithm below depends on this.
1859          */
1860         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1861
1862         frame = path->ip_frame;
1863         entries = frame->entries;
1864
1865         /*
1866          * Tall-tree handling: we might have to split multiple index blocks
1867          * all the way up to tree root. Tricky point here is error handling:
1868          * to avoid complicated undo/rollback we
1869          *
1870          *   - first allocate all necessary blocks
1871          *
1872          *   - insert pointers into them atomically.
1873          */
1874
1875         /*
1876          * Locking: leaf is already locked. htree-locks are acquired on all
1877          * index nodes that require split bottom-to-top, on the "safe" node,
1878          * and on all new nodes
1879          */
1880
1881         dxtrace(printk("using %u of %u node entries\n",
1882                        dx_get_count(entries), dx_get_limit(entries)));
1883
1884         /* What levels need split? */
1885         for (nr_splet = 0; frame >= path->ip_frames &&
1886              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1887              --frame, ++nr_splet) {
1888                 do_corr(schedule());
1889                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1890                         /*
1891                         CWARN(dir->i_sb, __FUNCTION__,
1892                                      "Directory index full!\n");
1893                                      */
1894                         err = -ENOSPC;
1895                         goto cleanup;
1896                 }
1897         }
1898
1899         safe = frame;
1900
1901         /*
1902          * Lock all nodes, bottom to top.
1903          */
1904         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1905                 do_corr(schedule());
1906                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1907                                          DLT_WRITE);
1908                 if (lock[i] == NULL) {
1909                         err = -ENOMEM;
1910                         goto cleanup;
1911                 }
1912         }
1913
1914         /*
1915          * Check for concurrent index modification.
1916          */
1917         err = iam_check_full_path(path, 1);
1918         if (err)
1919                 goto cleanup;
1920         /*
1921          * And check that the same number of nodes is to be split.
1922          */
1923         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1924              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1925              --frame, ++i) {
1926                 ;
1927         }
1928         if (i != nr_splet) {
1929                 err = -EAGAIN;
1930                 goto cleanup;
1931         }
1932
1933         /* Go back down, allocating blocks, locking them, and adding into
1934          * transaction... */
1935         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1936                 bh_new[i] = iam_new_node(handle, path->ip_container,
1937                                          &newblock[i], &err);
1938                 do_corr(schedule());
1939                 if (!bh_new[i] ||
1940                     descr->id_ops->id_node_init(path->ip_container,
1941                                                 bh_new[i], 0) != 0)
1942                         goto cleanup;
1943                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1944                                              DLT_WRITE);
1945                 if (new_lock[i] == NULL) {
1946                         err = -ENOMEM;
1947                         goto cleanup;
1948                 }
1949                 do_corr(schedule());
1950                 BUFFER_TRACE(frame->bh, "get_write_access");
1951                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1952                 if (err)
1953                         goto journal_error;
1954         }
1955         /* Add "safe" node to transaction too */
1956         if (safe + 1 != path->ip_frames) {
1957                 do_corr(schedule());
1958                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1959                 if (err)
1960                         goto journal_error;
1961         }
1962
1963         /* Go through nodes once more, inserting pointers */
1964         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1965                 unsigned count;
1966                 int idx;
1967                 struct buffer_head *bh2;
1968                 struct buffer_head *bh;
1969
1970                 entries = frame->entries;
1971                 count = dx_get_count(entries);
1972                 idx = iam_entry_diff(path, frame->at, entries);
1973
1974                 bh2 = bh_new[i];
1975                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1976
1977                 bh = frame->bh;
1978                 if (frame == path->ip_frames) {
1979                         /* splitting root node. Tricky point:
1980                          *
1981                          * In the "normal" B-tree we'd split root *and* add
1982                          * new root to the tree with pointers to the old root
1983                          * and its sibling (thus introducing two new nodes).
1984                          *
1985                          * In htree it's enough to add one node, because
1986                          * capacity of the root node is smaller than that of
1987                          * non-root one.
1988                          */
1989                         struct iam_frame *frames;
1990                         struct iam_entry *next;
1991
1992                         assert_corr(i == 0);
1993
1994                         do_corr(schedule());
1995
1996                         frames = path->ip_frames;
1997                         memcpy((char *) entries2, (char *) entries,
1998                                count * iam_entry_size(path));
1999                         dx_set_limit(entries2, dx_node_limit(path));
2000
2001                         /* Set up root */
2002                           iam_lock_bh(frame->bh);
2003                         next = descr->id_ops->id_root_inc(path->ip_container,
2004                                                           path, frame);
2005                         dx_set_block(path, next, newblock[0]);
2006                           iam_unlock_bh(frame->bh);
2007
2008                         do_corr(schedule());
2009                         /* Shift frames in the path */
2010                         memmove(frames + 2, frames + 1,
2011                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2012                         /* Add new access path frame */
2013                         frames[1].at = iam_entry_shift(path, entries2, idx);
2014                         frames[1].entries = entries = entries2;
2015                         frames[1].bh = bh2;
2016                         assert_inv(dx_node_check(path, frame));
2017                         ++ path->ip_frame;
2018                         ++ frame;
2019                         assert_inv(dx_node_check(path, frame));
2020                         bh_new[0] = NULL; /* buffer head is "consumed" */
2021                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2022                         if (err)
2023                                 goto journal_error;
2024                         do_corr(schedule());
2025                 } else {
2026                         /* splitting non-root index node. */
2027                         struct iam_frame *parent = frame - 1;
2028
2029                         do_corr(schedule());
2030                         count = iam_shift_entries(path, frame, count,
2031                                               entries, entries2, newblock[i]);
2032                         /* Which index block gets the new entry? */
2033                         if (idx >= count) {
2034                                 int d = dx_index_is_compat(path) ? 0 : +1;
2035
2036                                 frame->at = iam_entry_shift(path, entries2,
2037                                                             idx - count + d);
2038                                 frame->entries = entries = entries2;
2039                                 frame->curidx = newblock[i];
2040                                 swap(frame->bh, bh2);
2041                                 assert_corr(lock[i + 1] != NULL);
2042                                 assert_corr(new_lock[i] != NULL);
2043                                 swap(lock[i + 1], new_lock[i]);
2044                                 bh_new[i] = bh2;
2045                                 parent->at = iam_entry_shift(path,
2046                                                              parent->at, +1);
2047                         }
2048                         assert_inv(dx_node_check(path, frame));
2049                         assert_inv(dx_node_check(path, parent));
2050                         dxtrace(dx_show_index ("node", frame->entries));
2051                         dxtrace(dx_show_index ("node",
2052                                ((struct dx_node *) bh2->b_data)->entries));
2053                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2054                         if (err)
2055                                 goto journal_error;
2056                         do_corr(schedule());
2057                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2058                                                             parent->bh);
2059                         if (err)
2060                                 goto journal_error;
2061                 }
2062                 do_corr(schedule());
2063                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2064                 if (err)
2065                         goto journal_error;
2066         }
2067                 /*
2068                  * This function was called to make insertion of new leaf
2069                  * possible. Check that it fulfilled its obligations.
2070                  */
2071                 assert_corr(dx_get_count(path->ip_frame->entries) <
2072                             dx_get_limit(path->ip_frame->entries));
2073         assert_corr(lock[nr_splet] != NULL);
2074         *lh = lock[nr_splet];
2075         lock[nr_splet] = NULL;
2076         if (nr_splet > 0) {
2077                 /*
2078                  * Log ->i_size modification.
2079                  */
2080                 err = ldiskfs_mark_inode_dirty(handle, dir);
2081                 if (err)
2082                         goto journal_error;
2083         }
2084         goto cleanup;
2085 journal_error:
2086         ldiskfs_std_error(dir->i_sb, err);
2087
2088 cleanup:
2089         iam_unlock_array(path->ip_container, lock);
2090         iam_unlock_array(path->ip_container, new_lock);
2091
2092         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2093
2094         do_corr(schedule());
2095         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2096                 if (bh_new[i] != NULL)
2097                         brelse(bh_new[i]);
2098         }
2099         return err;
2100 }
2101
2102 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2103                        struct iam_path *path,
2104                        const struct iam_key *k, const struct iam_rec *r)
2105 {
2106         int err;
2107         struct iam_leaf *leaf;
2108
2109         leaf = &path->ip_leaf;
2110         assert_inv(iam_path_check(path));
2111         err = iam_txn_add(handle, path, leaf->il_bh);
2112         if (err == 0) {
2113                 do_corr(schedule());
2114                 if (!iam_leaf_can_add(leaf, k, r)) {
2115                         struct dynlock_handle *lh = NULL;
2116
2117                         do {
2118                                 assert_corr(lh == NULL);
2119                                 do_corr(schedule());
2120                                 err = split_index_node(handle, path, &lh);
2121                                 if (err == -EAGAIN) {
2122                                         assert_corr(lh == NULL);
2123
2124                                         iam_path_fini(path);
2125                                         it->ii_state = IAM_IT_DETACHED;
2126
2127                                         do_corr(schedule());
2128                                         err = iam_it_get_exact(it, k);
2129                                         if (err == -ENOENT)
2130                                                 err = +1; /* repeat split */
2131                                         else if (err == 0)
2132                                                 err = -EEXIST;
2133                                 }
2134                         } while (err > 0);
2135                         assert_inv(iam_path_check(path));
2136                         if (err == 0) {
2137                                 assert_corr(lh != NULL);
2138                                 do_corr(schedule());
2139                                 err = iam_new_leaf(handle, leaf);
2140                                 if (err == 0)
2141                                         err = iam_txn_dirty(handle, path,
2142                                                             path->ip_frame->bh);
2143                         }
2144                         iam_unlock_htree(path->ip_container, lh);
2145                         do_corr(schedule());
2146                 }
2147                 if (err == 0) {
2148                         iam_leaf_rec_add(leaf, k, r);
2149                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2150                 }
2151         }
2152         assert_inv(iam_path_check(path));
2153         return err;
2154 }
2155
2156 /*
2157  * Insert new record with key @k and contents from @r, shifting records to the
2158  * right. On success, iterator is positioned on the newly inserted record.
2159  *
2160  * precondition: it->ii_flags&IAM_IT_WRITE &&
2161  *               (it_state(it) == IAM_IT_ATTACHED ||
2162  *                it_state(it) == IAM_IT_SKEWED) &&
2163  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2164  *                    it_keycmp(it, k) <= 0) &&
2165  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2166  * postcondition: ergo(result == 0,
2167  *                     it_state(it) == IAM_IT_ATTACHED &&
2168  *                     it_keycmp(it, k) == 0 &&
2169  *                     !memcmp(iam_it_rec_get(it), r, ...))
2170  */
2171 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2172                       const struct iam_key *k, const struct iam_rec *r)
2173 {
2174         int result;
2175         struct iam_path *path;
2176
2177         path = &it->ii_path;
2178
2179         assert_corr(it->ii_flags&IAM_IT_WRITE);
2180         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2181                     it_state(it) == IAM_IT_SKEWED);
2182         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2183                          it_keycmp(it, k) <= 0));
2184         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2185         result = iam_add_rec(h, it, path, k, r);
2186         if (result == 0)
2187                 it->ii_state = IAM_IT_ATTACHED;
2188         assert_corr(ergo(result == 0,
2189                          it_state(it) == IAM_IT_ATTACHED &&
2190                          it_keycmp(it, k) == 0));
2191         return result;
2192 }
2193
2194 static inline int iam_idle_blocks_limit(struct inode *inode)
2195 {
2196         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2197 }
2198
2199 /*
2200  * If the leaf cannnot be recycled, we will lose one block for reusing.
2201  * It is not a serious issue because it almost the same of non-recycle.
2202  */
2203 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2204                                   struct iam_leaf *l, struct buffer_head **bh)
2205 {
2206         struct iam_container *c = p->ip_container;
2207         struct inode *inode = c->ic_object;
2208         struct iam_frame *frame = p->ip_frame;
2209         struct iam_entry *entries;
2210         struct iam_entry *pos;
2211         struct dynlock_handle *lh;
2212         int count;
2213         int rc;
2214
2215         if (c->ic_idle_failed)
2216                 return 0;
2217
2218         if (unlikely(frame == NULL))
2219                 return 0;
2220
2221         if (!iam_leaf_empty(l))
2222                 return 0;
2223
2224         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2225         if (lh == NULL) {
2226                 CWARN("%.16s: No memory to recycle idle blocks\n",
2227                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2228                 return 0;
2229         }
2230
2231         rc = iam_txn_add(h, p, frame->bh);
2232         if (rc != 0) {
2233                 iam_unlock_htree(c, lh);
2234                 return 0;
2235         }
2236
2237         iam_lock_bh(frame->bh);
2238         entries = frame->entries;
2239         count = dx_get_count(entries);
2240         /* NOT shrink the last entry in the index node, which can be reused
2241          * directly by next new node. */
2242         if (count == 2) {
2243                 iam_unlock_bh(frame->bh);
2244                 iam_unlock_htree(c, lh);
2245                 return 0;
2246         }
2247
2248         pos = iam_find_position(p, frame);
2249         /* There may be some new leaf nodes have been added or empty leaf nodes
2250          * have been shrinked during my delete operation.
2251          *
2252          * If the empty leaf is not under current index node because the index
2253          * node has been split, then just skip the empty leaf, which is rare. */
2254         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2255                 iam_unlock_bh(frame->bh);
2256                 iam_unlock_htree(c, lh);
2257                 return 0;
2258         }
2259
2260         frame->at = pos;
2261         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2262                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2263
2264                 memmove(frame->at, n,
2265                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2266                 frame->at_shifted = 1;
2267         }
2268         dx_set_count(entries, count - 1);
2269         iam_unlock_bh(frame->bh);
2270         rc = iam_txn_dirty(h, p, frame->bh);
2271         iam_unlock_htree(c, lh);
2272         if (rc != 0)
2273                 return 0;
2274
2275         get_bh(l->il_bh);
2276         *bh = l->il_bh;
2277         return frame->leaf;
2278 }
2279
2280 static int
2281 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2282                         __u32 *idle_blocks, iam_ptr_t blk)
2283 {
2284         struct iam_container *c = p->ip_container;
2285         struct buffer_head *old = c->ic_idle_bh;
2286         struct iam_idle_head *head;
2287         int rc;
2288
2289         head = (struct iam_idle_head *)(bh->b_data);
2290         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2291         head->iih_count = 0;
2292         head->iih_next = *idle_blocks;
2293         /* The bh already get_write_accessed. */
2294         rc = iam_txn_dirty(h, p, bh);
2295         if (rc != 0)
2296                 return rc;
2297
2298         rc = iam_txn_add(h, p, c->ic_root_bh);
2299         if (rc != 0)
2300                 return rc;
2301
2302         iam_lock_bh(c->ic_root_bh);
2303         *idle_blocks = cpu_to_le32(blk);
2304         iam_unlock_bh(c->ic_root_bh);
2305         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2306         if (rc == 0) {
2307                 /* NOT release old before new assigned. */
2308                 get_bh(bh);
2309                 c->ic_idle_bh = bh;
2310                 brelse(old);
2311         } else {
2312                 iam_lock_bh(c->ic_root_bh);
2313                 *idle_blocks = head->iih_next;
2314                 iam_unlock_bh(c->ic_root_bh);
2315         }
2316         return rc;
2317 }
2318
2319 /*
2320  * If the leaf cannnot be recycled, we will lose one block for reusing.
2321  * It is not a serious issue because it almost the same of non-recycle.
2322  */
2323 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2324                              struct buffer_head *bh, iam_ptr_t blk)
2325 {
2326         struct iam_container *c = p->ip_container;
2327         struct inode *inode = c->ic_object;
2328         struct iam_idle_head *head;
2329         __u32 *idle_blocks;
2330         int count;
2331         int rc;
2332
2333         mutex_lock(&c->ic_idle_mutex);
2334         if (unlikely(c->ic_idle_failed)) {
2335                 rc = -EFAULT;
2336                 goto unlock;
2337         }
2338
2339         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2340                                 c->ic_descr->id_root_gap +
2341                                 sizeof(struct dx_countlimit));
2342         /* It is the first idle block. */
2343         if (c->ic_idle_bh == NULL) {
2344                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2345                 goto unlock;
2346         }
2347
2348         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2349         count = le16_to_cpu(head->iih_count);
2350         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2351         if (count == iam_idle_blocks_limit(inode)) {
2352                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2353                 goto unlock;
2354         }
2355
2356         /* Just add to ic_idle_bh. */
2357         rc = iam_txn_add(h, p, c->ic_idle_bh);
2358         if (rc != 0)
2359                 goto unlock;
2360
2361         head->iih_blks[count] = cpu_to_le32(blk);
2362         head->iih_count = cpu_to_le16(count + 1);
2363         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2364
2365 unlock:
2366         mutex_unlock(&c->ic_idle_mutex);
2367         if (rc != 0)
2368                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2369                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2370 }
2371
2372 /*
2373  * Delete record under iterator.
2374  *
2375  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2376  *                it->ii_flags&IAM_IT_WRITE &&
2377  *                it_at_rec(it)
2378  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2379  *                it_state(it) == IAM_IT_DETACHED
2380  */
2381 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2382 {
2383         int result;
2384         struct iam_leaf *leaf;
2385         struct iam_path *path;
2386
2387         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2388                     it->ii_flags&IAM_IT_WRITE);
2389         assert_corr(it_at_rec(it));
2390
2391         path = &it->ii_path;
2392         leaf = &path->ip_leaf;
2393
2394         assert_inv(iam_path_check(path));
2395
2396         result = iam_txn_add(h, path, leaf->il_bh);
2397         /*
2398          * no compaction for now.
2399          */
2400         if (result == 0) {
2401                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2402                 result = iam_txn_dirty(h, path, leaf->il_bh);
2403                 if (result == 0 && iam_leaf_at_end(leaf)) {
2404                         struct buffer_head *bh = NULL;
2405                         iam_ptr_t blk;
2406
2407                         blk = iam_index_shrink(h, path, leaf, &bh);
2408                         if (it->ii_flags & IAM_IT_MOVE) {
2409                                 result = iam_it_next(it);
2410                                 if (result > 0)
2411                                         result = 0;
2412                         }
2413
2414                         if (bh != NULL) {
2415                                 iam_recycle_leaf(h, path, bh, blk);
2416                                 brelse(bh);
2417                         }
2418                 }
2419         }
2420         assert_inv(iam_path_check(path));
2421         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2422                     it_state(it) == IAM_IT_DETACHED);
2423         return result;
2424 }
2425
2426 /*
2427  * Convert iterator to cookie.
2428  *
2429  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2430  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2431  * postcondition: it_state(it) == IAM_IT_ATTACHED
2432  */
2433 iam_pos_t iam_it_store(const struct iam_iterator *it)
2434 {
2435         iam_pos_t result;
2436
2437         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2438         assert_corr(it_at_rec(it));
2439         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2440                     sizeof result);
2441
2442         result = 0;
2443         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2444 }
2445
2446 /*
2447  * Restore iterator from cookie.
2448  *
2449  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2450  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2451  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2452  *                                  iam_it_store(it) == pos)
2453  */
2454 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2455 {
2456         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2457                     it->ii_flags&IAM_IT_MOVE);
2458         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2459         return iam_it_iget(it, (struct iam_ikey *)&pos);
2460 }
2461
2462 /***********************************************************************/
2463 /* invariants                                                          */
2464 /***********************************************************************/
2465
2466 static inline int ptr_inside(void *base, size_t size, void *ptr)
2467 {
2468         return (base <= ptr) && (ptr < base + size);
2469 }
2470
2471 static int iam_frame_invariant(struct iam_frame *f)
2472 {
2473         return
2474                 (f->bh != NULL &&
2475                 f->bh->b_data != NULL &&
2476                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2477                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2478                 f->entries <= f->at);
2479 }
2480
2481 static int iam_leaf_invariant(struct iam_leaf *l)
2482 {
2483         return
2484                 l->il_bh != NULL &&
2485                 l->il_bh->b_data != NULL &&
2486                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2487                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2488                 l->il_entries <= l->il_at;
2489 }
2490
2491 static int iam_path_invariant(struct iam_path *p)
2492 {
2493         int i;
2494
2495         if (p->ip_container == NULL ||
2496             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2497             p->ip_frame != p->ip_frames + p->ip_indirect ||
2498             !iam_leaf_invariant(&p->ip_leaf))
2499                 return 0;
2500         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2501                 if (i <= p->ip_indirect) {
2502                         if (!iam_frame_invariant(&p->ip_frames[i]))
2503                                 return 0;
2504                 }
2505         }
2506         return 1;
2507 }
2508
2509 int iam_it_invariant(struct iam_iterator *it)
2510 {
2511         return
2512                 (it->ii_state == IAM_IT_DETACHED ||
2513                  it->ii_state == IAM_IT_ATTACHED ||
2514                  it->ii_state == IAM_IT_SKEWED) &&
2515                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2516                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2517                      it->ii_state == IAM_IT_SKEWED,
2518                      iam_path_invariant(&it->ii_path) &&
2519                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2520 }
2521
2522 /*
2523  * Search container @c for record with key @k. If record is found, its data
2524  * are moved into @r.
2525  *
2526  * Return values: 0: found, -ENOENT: not-found, -ve: error
2527  */
2528 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2529                struct iam_rec *r, struct iam_path_descr *pd)
2530 {
2531         struct iam_iterator it;
2532         int result;
2533
2534         iam_it_init(&it, c, 0, pd);
2535
2536         result = iam_it_get_exact(&it, k);
2537         if (result == 0)
2538                 /*
2539                  * record with required key found, copy it into user buffer
2540                  */
2541                 iam_reccpy(&it.ii_path.ip_leaf, r);
2542         iam_it_put(&it);
2543         iam_it_fini(&it);
2544         return result;
2545 }
2546
2547 /*
2548  * Insert new record @r with key @k into container @c (within context of
2549  * transaction @h).
2550  *
2551  * Return values: 0: success, -ve: error, including -EEXIST when record with
2552  * given key is already present.
2553  *
2554  * postcondition: ergo(result == 0 || result == -EEXIST,
2555  *                                  iam_lookup(c, k, r2) > 0;
2556  */
2557 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2558                const struct iam_rec *r, struct iam_path_descr *pd)
2559 {
2560         struct iam_iterator it;
2561         int result;
2562
2563         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2564
2565         result = iam_it_get_exact(&it, k);
2566         if (result == -ENOENT)
2567                 result = iam_it_rec_insert(h, &it, k, r);
2568         else if (result == 0)
2569                 result = -EEXIST;
2570         iam_it_put(&it);
2571         iam_it_fini(&it);
2572         return result;
2573 }
2574
2575 /*
2576  * Update record with the key @k in container @c (within context of
2577  * transaction @h), new record is given by @r.
2578  *
2579  * Return values: +1: skip because of the same rec value, 0: success,
2580  * -ve: error, including -ENOENT if no record with the given key found.
2581  */
2582 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2583                const struct iam_rec *r, struct iam_path_descr *pd)
2584 {
2585         struct iam_iterator it;
2586         struct iam_leaf *folio;
2587         int result;
2588
2589         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2590
2591         result = iam_it_get_exact(&it, k);
2592         if (result == 0) {
2593                 folio = &it.ii_path.ip_leaf;
2594                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2595                 if (result == 0)
2596                         iam_it_rec_set(h, &it, r);
2597                 else
2598                         result = 1;
2599         }
2600         iam_it_put(&it);
2601         iam_it_fini(&it);
2602         return result;
2603 }
2604
2605 /*
2606  * Delete existing record with key @k.
2607  *
2608  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2609  *
2610  * postcondition: ergo(result == 0 || result == -ENOENT,
2611  *                                 !iam_lookup(c, k, *));
2612  */
2613 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2614                struct iam_path_descr *pd)
2615 {
2616         struct iam_iterator it;
2617         int result;
2618
2619         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2620
2621         result = iam_it_get_exact(&it, k);
2622         if (result == 0)
2623                 iam_it_rec_delete(h, &it);
2624         iam_it_put(&it);
2625         iam_it_fini(&it);
2626         return result;
2627 }
2628
2629 int iam_root_limit(int rootgap, int blocksize, int size)
2630 {
2631         int limit;
2632         int nlimit;
2633
2634         limit = (blocksize - rootgap) / size;
2635         nlimit = blocksize / size;
2636         if (limit == nlimit)
2637                 limit--;
2638         return limit;
2639 }