Whamcloud - gitweb
LU-15544 ldiskfs: SUSE 15 SP4 kernel 5.14.21 SUSE
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * iam.c
32  * Top-level entry points into iam module
33  *
34  * Author: Wang Di <wangdi@clusterfs.com>
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  */
37
38 /*
39  * iam: big theory statement.
40  *
41  * iam (Index Access Module) is a module providing abstraction of persistent
42  * transactional container on top of generalized ldiskfs htree.
43  *
44  * iam supports:
45  *
46  *     - key, pointer, and record size specifiable per container.
47  *
48  *     - trees taller than 2 index levels.
49  *
50  *     - read/write to existing ldiskfs htree directories as iam containers.
51  *
52  * iam container is a tree, consisting of leaf nodes containing keys and
53  * records stored in this container, and index nodes, containing keys and
54  * pointers to leaf or index nodes.
55  *
56  * iam does not work with keys directly, instead it calls user-supplied key
57  * comparison function (->dpo_keycmp()).
58  *
59  * Pointers are (currently) interpreted as logical offsets (measured in
60  * blocksful) within underlying flat file on top of which iam tree lives.
61  *
62  * On-disk format:
63  *
64  * iam mostly tries to reuse existing htree formats.
65  *
66  * Format of index node:
67  *
68  * +-----+-------+-------+-------+------+-------+------------+
69  * |     | count |       |       |      |       |            |
70  * | gap |   /   | entry | entry | .... | entry | free space |
71  * |     | limit |       |       |      |       |            |
72  * +-----+-------+-------+-------+------+-------+------------+
73  *
74  *       gap           this part of node is never accessed by iam code. It
75  *                     exists for binary compatibility with ldiskfs htree (that,
76  *                     in turn, stores fake struct ext2_dirent for ext2
77  *                     compatibility), and to keep some unspecified per-node
78  *                     data. Gap can be different for root and non-root index
79  *                     nodes. Gap size can be specified for each container
80  *                     (gap of 0 is allowed).
81  *
82  *       count/limit   current number of entries in this node, and the maximal
83  *                     number of entries that can fit into node. count/limit
84  *                     has the same size as entry, and is itself counted in
85  *                     count.
86  *
87  *       entry         index entry: consists of a key immediately followed by
88  *                     a pointer to a child node. Size of a key and size of a
89  *                     pointer depends on container. Entry has neither
90  *                     alignment nor padding.
91  *
92  *       free space    portion of node new entries are added to
93  *
94  * Entries in index node are sorted by their key value.
95  *
96  * Format of a leaf node is not specified. Generic iam code accesses leaf
97  * nodes through ->id_leaf methods in struct iam_descr.
98  *
99  * The IAM root block is a special node, which contains the IAM descriptor.
100  * It is on disk format:
101  *
102  * +---------+-------+--------+---------+-------+------+-------+------------+
103  * |IAM desc | count |  idle  |         |       |      |       |            |
104  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
105  * |         | limit |        |         |       |      |       |            |
106  * +---------+-------+--------+---------+-------+------+-------+------------+
107  *
108  * The padding length is calculated with the parameters in the IAM descriptor.
109  *
110  * The field "idle_blocks" is used to record empty leaf nodes, which have not
111  * been released but all contained entries in them have been removed. Usually,
112  * the idle blocks in the IAM should be reused when need to allocate new leaf
113  * nodes for new entries, it depends on the IAM hash functions to map the new
114  * entries to these idle blocks. Unfortunately, it is not easy to design some
115  * hash functions for such clever mapping, especially considering the insert/
116  * lookup performance.
117  *
118  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
119  * idle blocks pool. If need some new leaf node, it will try to take idle block
120  * from such pool with priority, in spite of how the IAM hash functions to map
121  * the entry.
122  *
123  * The idle blocks pool is organized as a series of tables, and each table
124  * can be described as following (on-disk format):
125  *
126  * +---------+---------+---------+---------+------+---------+-------+
127  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
128  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
129  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
130  * +---------+---------+---------+---------+------+---------+-------+
131  *
132  * The logic blk# for the first table is stored in the root node "idle_blocks".
133  *
134  */
135
136 #include <linux/module.h>
137 #include <linux/fs.h>
138 #include <linux/pagemap.h>
139 #include <linux/time.h>
140 #include <linux/fcntl.h>
141 #include <linux/stat.h>
142 #include <linux/string.h>
143 #include <linux/quotaops.h>
144 #include <linux/buffer_head.h>
145
146 #include <ldiskfs/ldiskfs.h>
147 #include <ldiskfs/xattr.h>
148 #undef ENTRY
149
150 #include "osd_internal.h"
151
152 #include <ldiskfs/acl.h>
153
154 static struct buffer_head *
155 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
156 {
157         struct inode *inode = c->ic_object;
158         struct iam_idle_head *head;
159         struct buffer_head *bh;
160
161         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
162
163         if (blk == 0)
164                 return NULL;
165
166         bh = __ldiskfs_bread(NULL, inode, blk, 0);
167         if (IS_ERR_OR_NULL(bh)) {
168                 CERROR("%s: cannot load idle blocks, blk = %u: rc = %ld\n",
169                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
170                 c->ic_idle_failed = 1;
171                 if (bh == NULL)
172                         bh = ERR_PTR(-EIO);
173                 return bh;
174         }
175
176         head = (struct iam_idle_head *)(bh->b_data);
177         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
178                 int rc = -EBADF;
179
180                 CERROR("%s: invalid idle block head, blk = %u, magic = %x: rc = %d\n",
181                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic),
182                        rc);
183                 brelse(bh);
184                 c->ic_idle_failed = 1;
185                 return ERR_PTR(rc);
186         }
187
188         return bh;
189 }
190
191 /*
192  * Determine format of given container. This is done by scanning list of
193  * registered formats and calling ->if_guess() method of each in turn.
194  */
195 static int iam_format_guess(struct iam_container *c)
196 {
197         int result;
198
199         result = iam_lvar_guess(c);
200         if (result)
201                 result = iam_lfix_guess(c);
202
203         if (result == 0) {
204                 struct buffer_head *bh;
205                 __u32 *idle_blocks;
206
207                 LASSERT(c->ic_root_bh != NULL);
208
209                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
210                                         c->ic_descr->id_root_gap +
211                                         sizeof(struct dx_countlimit));
212                 mutex_lock(&c->ic_idle_mutex);
213                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
214                 if (bh != NULL && IS_ERR(bh))
215                         result = PTR_ERR(bh);
216                 else
217                         c->ic_idle_bh = bh;
218                 mutex_unlock(&c->ic_idle_mutex);
219         }
220
221         return result;
222 }
223
224 /*
225  * Initialize container @c.
226  */
227 int iam_container_init(struct iam_container *c,
228                        struct iam_descr *descr, struct inode *inode)
229 {
230         memset(c, 0, sizeof *c);
231         c->ic_descr = descr;
232         c->ic_object = inode;
233         dynlock_init(&c->ic_tree_lock);
234         mutex_init(&c->ic_idle_mutex);
235         return 0;
236 }
237
238 /*
239  * Determine container format.
240  */
241 int iam_container_setup(struct iam_container *c)
242 {
243         return iam_format_guess(c);
244 }
245
246 /*
247  * Finalize container @c, release all resources.
248  */
249 void iam_container_fini(struct iam_container *c)
250 {
251         brelse(c->ic_idle_bh);
252         c->ic_idle_bh = NULL;
253         brelse(c->ic_root_bh);
254         c->ic_root_bh = NULL;
255 }
256
257 void iam_path_init(struct iam_path *path, struct iam_container *c,
258                    struct iam_path_descr *pd)
259 {
260         memset(path, 0, sizeof *path);
261         path->ip_container = c;
262         path->ip_frame = path->ip_frames;
263         path->ip_data = pd;
264         path->ip_leaf.il_path = path;
265 }
266
267 static void iam_leaf_fini(struct iam_leaf *leaf);
268
269 void iam_path_release(struct iam_path *path)
270 {
271         int i;
272
273         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
274                 if (path->ip_frames[i].bh != NULL) {
275                         path->ip_frames[i].at_shifted = 0;
276                         brelse(path->ip_frames[i].bh);
277                         path->ip_frames[i].bh = NULL;
278                 }
279         }
280 }
281
282 void iam_path_fini(struct iam_path *path)
283 {
284         iam_leaf_fini(&path->ip_leaf);
285         iam_path_release(path);
286 }
287
288
289 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
290 {
291         int i;
292
293         path->ipc_hinfo = &path->ipc_hinfo_area;
294         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
295                 path->ipc_descr.ipd_key_scratch[i] =
296                         (struct iam_ikey *)&path->ipc_scratch[i];
297
298         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
299 }
300
301 void iam_path_compat_fini(struct iam_path_compat *path)
302 {
303         iam_path_fini(&path->ipc_path);
304 }
305
306 /*
307  * Helper function initializing iam_path_descr and its key scratch area.
308  */
309 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
310 {
311         struct iam_path_descr *ipd;
312         void *karea;
313         int i;
314
315         ipd = area;
316         karea = ipd + 1;
317         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
318                 ipd->ipd_key_scratch[i] = karea;
319         return ipd;
320 }
321
322 void iam_ipd_free(struct iam_path_descr *ipd)
323 {
324 }
325
326 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
327                   handle_t *h, struct buffer_head **bh)
328 {
329         /*
330          * NB: it can be called by iam_lfix_guess() which is still at
331          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
332          * haven't been intialized yet.
333          * Also, we don't have this for IAM dir.
334          */
335         if (c->ic_root_bh != NULL &&
336             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
337                 get_bh(c->ic_root_bh);
338                 *bh = c->ic_root_bh;
339                 return 0;
340         }
341
342         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
343         if (IS_ERR(*bh))
344                 return PTR_ERR(*bh);
345
346         if (*bh == NULL)
347                 return -EIO;
348
349         return 0;
350 }
351
352 /*
353  * Return pointer to current leaf record. Pointer is valid while corresponding
354  * leaf node is locked and pinned.
355  */
356 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
357 {
358         return iam_leaf_ops(leaf)->rec(leaf);
359 }
360
361 /*
362  * Return pointer to the current leaf key. This function returns pointer to
363  * the key stored in node.
364  *
365  * Caller should assume that returned pointer is only valid while leaf node is
366  * pinned and locked.
367  */
368 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
369 {
370         return iam_leaf_ops(leaf)->key(leaf);
371 }
372
373 static int iam_leaf_key_size(const struct iam_leaf *leaf)
374 {
375         return iam_leaf_ops(leaf)->key_size(leaf);
376 }
377
378 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
379                                       struct iam_ikey *key)
380 {
381         return iam_leaf_ops(leaf)->ikey(leaf, key);
382 }
383
384 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
385                            const struct iam_key *key)
386 {
387         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
388 }
389
390 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
391                           const struct iam_key *key)
392 {
393         return iam_leaf_ops(leaf)->key_eq(leaf, key);
394 }
395
396 #if LDISKFS_INVARIANT_ON
397 static int iam_path_check(struct iam_path *p)
398 {
399         int i;
400         int result;
401         struct iam_frame *f;
402         struct iam_descr *param;
403
404         result = 1;
405         param = iam_path_descr(p);
406         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
407                 f = &p->ip_frames[i];
408                 if (f->bh != NULL) {
409                         result = dx_node_check(p, f);
410                         if (result)
411                                 result = !param->id_ops->id_node_check(p, f);
412                 }
413         }
414         if (result && p->ip_leaf.il_bh != NULL)
415                 result = 1;
416         if (result == 0)
417                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
418
419         return result;
420 }
421 #endif
422
423 static int iam_leaf_load(struct iam_path *path)
424 {
425         iam_ptr_t block;
426         int err;
427         struct iam_container *c;
428         struct buffer_head *bh;
429         struct iam_leaf *leaf;
430         struct iam_descr *descr;
431
432         c     = path->ip_container;
433         leaf  = &path->ip_leaf;
434         descr = iam_path_descr(path);
435         block = path->ip_frame->leaf;
436         if (block == 0) {
437                 /* XXX bug 11027 */
438                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
439                        (long unsigned)path->ip_frame->leaf,
440                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
441                        path->ip_frames[0].bh, path->ip_frames[1].bh,
442                        path->ip_frames[2].bh);
443         }
444         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
445         if (err == 0) {
446                 leaf->il_bh = bh;
447                 leaf->il_curidx = block;
448                 err = iam_leaf_ops(leaf)->init(leaf);
449         }
450         return err;
451 }
452
453 static void iam_unlock_htree(struct iam_container *ic,
454                              struct dynlock_handle *lh)
455 {
456         if (lh != NULL)
457                 dynlock_unlock(&ic->ic_tree_lock, lh);
458 }
459
460
461 static void iam_leaf_unlock(struct iam_leaf *leaf)
462 {
463         if (leaf->il_lock != NULL) {
464                 iam_unlock_htree(iam_leaf_container(leaf),
465                                  leaf->il_lock);
466                 do_corr(schedule());
467                 leaf->il_lock = NULL;
468         }
469 }
470
471 static void iam_leaf_fini(struct iam_leaf *leaf)
472 {
473         if (leaf->il_path != NULL) {
474                 iam_leaf_unlock(leaf);
475                 iam_leaf_ops(leaf)->fini(leaf);
476                 if (leaf->il_bh) {
477                         brelse(leaf->il_bh);
478                         leaf->il_bh = NULL;
479                         leaf->il_curidx = 0;
480                 }
481         }
482 }
483
484 static void iam_leaf_start(struct iam_leaf *folio)
485 {
486         iam_leaf_ops(folio)->start(folio);
487 }
488
489 void iam_leaf_next(struct iam_leaf *folio)
490 {
491         iam_leaf_ops(folio)->next(folio);
492 }
493
494 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
495                              const struct iam_rec *rec)
496 {
497         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
498 }
499
500 static void iam_rec_del(struct iam_leaf *leaf, int shift)
501 {
502         iam_leaf_ops(leaf)->rec_del(leaf, shift);
503 }
504
505 int iam_leaf_at_end(const struct iam_leaf *leaf)
506 {
507         return iam_leaf_ops(leaf)->at_end(leaf);
508 }
509
510 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
511                            iam_ptr_t nr)
512 {
513         iam_leaf_ops(l)->split(l, bh, nr);
514 }
515
516 static inline int iam_leaf_empty(struct iam_leaf *l)
517 {
518         return iam_leaf_ops(l)->leaf_empty(l);
519 }
520
521 int iam_leaf_can_add(const struct iam_leaf *l,
522                      const struct iam_key *k, const struct iam_rec *r)
523 {
524         return iam_leaf_ops(l)->can_add(l, k, r);
525 }
526
527 static int iam_txn_dirty(handle_t *handle,
528                          struct iam_path *path, struct buffer_head *bh)
529 {
530         int result;
531
532         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
533         if (result != 0)
534                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
535         return result;
536 }
537
538 static int iam_txn_add(handle_t *handle,
539                        struct iam_path *path, struct buffer_head *bh)
540 {
541         int result;
542         struct super_block *sb = iam_path_obj(path)->i_sb;
543
544         result = osd_ldiskfs_journal_get_write_access(handle, sb, bh,
545                                                       LDISKFS_JTR_NONE);
546         if (result != 0)
547                 ldiskfs_std_error(sb, result);
548         return result;
549 }
550
551 /***********************************************************************/
552 /* iterator interface                                                  */
553 /***********************************************************************/
554
555 static enum iam_it_state it_state(const struct iam_iterator *it)
556 {
557         return it->ii_state;
558 }
559
560 /*
561  * Helper function returning scratch key.
562  */
563 static struct iam_container *iam_it_container(const struct iam_iterator *it)
564 {
565         return it->ii_path.ip_container;
566 }
567
568 static inline int it_keycmp(const struct iam_iterator *it,
569                             const struct iam_key *k)
570 {
571         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
572 }
573
574 static inline int it_keyeq(const struct iam_iterator *it,
575                            const struct iam_key *k)
576 {
577         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
578 }
579
580 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
581 {
582         return iam_ikeycmp(it->ii_path.ip_container,
583                            iam_leaf_ikey(&it->ii_path.ip_leaf,
584                                         iam_path_ikey(&it->ii_path, 0)), ik);
585 }
586
587 static inline int it_at_rec(const struct iam_iterator *it)
588 {
589         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
590 }
591
592 static inline int it_before(const struct iam_iterator *it)
593 {
594         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
595 }
596
597 /*
598  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
599  * with exactly the same key as asked is found.
600  */
601 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
602 {
603         int result;
604
605         result = iam_it_get(it, k);
606         if (result > 0)
607                 result = 0;
608         else if (result == 0)
609                 /*
610                  * Return -ENOENT if cursor is located above record with a key
611                  * different from one specified, or in the empty leaf.
612                  *
613                  * XXX returning -ENOENT only works if iam_it_get() never
614                  * returns -ENOENT as a legitimate error.
615                  */
616                 result = -ENOENT;
617         return result;
618 }
619
620 /*
621  * Initialize iterator to IAM_IT_DETACHED state.
622  *
623  * postcondition: it_state(it) == IAM_IT_DETACHED
624  */
625 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
626                  struct iam_path_descr *pd)
627 {
628         memset(it, 0, sizeof *it);
629         it->ii_flags  = flags;
630         it->ii_state  = IAM_IT_DETACHED;
631         iam_path_init(&it->ii_path, c, pd);
632         return 0;
633 }
634
635 /*
636  * Finalize iterator and release all resources.
637  *
638  * precondition: it_state(it) == IAM_IT_DETACHED
639  */
640 void iam_it_fini(struct iam_iterator *it)
641 {
642         assert_corr(it_state(it) == IAM_IT_DETACHED);
643         iam_path_fini(&it->ii_path);
644 }
645
646 /*
647  * this locking primitives are used to protect parts
648  * of dir's htree. protection unit is block: leaf or index
649  */
650 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
651                                              unsigned long value,
652                                              enum dynlock_type lt)
653 {
654         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
655 }
656
657 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
658 {
659         struct iam_frame *f;
660
661         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
662                 do_corr(schedule());
663                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
664                 if (*lh == NULL)
665                         return -ENOMEM;
666         }
667         return 0;
668 }
669
670 /*
671  * Fast check for frame consistency.
672  */
673 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
674 {
675         struct iam_container *bag;
676         struct iam_entry *next;
677         struct iam_entry *last;
678         struct iam_entry *entries;
679         struct iam_entry *at;
680
681         bag = path->ip_container;
682         at = frame->at;
683         entries = frame->entries;
684         last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
685
686         if (unlikely(at > last))
687                 return -EAGAIN;
688
689         if (unlikely(dx_get_block(path, at) != frame->leaf))
690                 return -EAGAIN;
691
692         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
693                      path->ip_ikey_target) > 0))
694                 return -EAGAIN;
695
696         next = iam_entry_shift(path, at, +1);
697         if (next <= last) {
698                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
699                                          path->ip_ikey_target) <= 0))
700                         return -EAGAIN;
701         }
702         return 0;
703 }
704
705 int dx_index_is_compat(struct iam_path *path)
706 {
707         return iam_path_descr(path) == NULL;
708 }
709
710 /*
711  * dx_find_position
712  *
713  * search position of specified hash in index
714  *
715  */
716
717 static struct iam_entry *iam_find_position(struct iam_path *path,
718                                            struct iam_frame *frame)
719 {
720         int count;
721         struct iam_entry *p;
722         struct iam_entry *q;
723         struct iam_entry *m;
724
725         count = dx_get_count(frame->entries);
726         assert_corr(count && count <= dx_get_limit(frame->entries));
727         p = iam_entry_shift(path, frame->entries,
728                             dx_index_is_compat(path) ? 1 : 2);
729         q = iam_entry_shift(path, frame->entries, count - 1);
730         while (p <= q) {
731                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
732                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
733                                 path->ip_ikey_target) > 0)
734                         q = iam_entry_shift(path, m, -1);
735                 else
736                         p = iam_entry_shift(path, m, +1);
737         }
738         return iam_entry_shift(path, p, -1);
739 }
740
741
742
743 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
744 {
745         return dx_get_block(path, iam_find_position(path, frame));
746 }
747
748 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
749                     const struct iam_ikey *key, iam_ptr_t ptr)
750 {
751         struct iam_entry *entries = frame->entries;
752         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
753         int count = dx_get_count(entries);
754
755         /*
756          * Unfortunately we cannot assert this, as this function is sometimes
757          * called by VFS under i_sem and without pdirops lock.
758          */
759         assert_corr(1 || iam_frame_is_locked(path, frame));
760         assert_corr(count < dx_get_limit(entries));
761         assert_corr(frame->at < iam_entry_shift(path, entries, count));
762         assert_inv(dx_node_check(path, frame));
763         /* Prevent memory corruption outside of buffer_head */
764         BUG_ON(count >= dx_get_limit(entries));
765         BUG_ON((char *)iam_entry_shift(path, entries, count + 1) >
766                (frame->bh->b_data + frame->bh->b_size));
767
768         memmove(iam_entry_shift(path, new, 1), new,
769                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
770         dx_set_ikey(path, new, key);
771         dx_set_block(path, new, ptr);
772         dx_set_count(entries, count + 1);
773
774         BUG_ON(count > dx_get_limit(entries));
775         assert_inv(dx_node_check(path, frame));
776 }
777
778 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
779                          const struct iam_ikey *key, iam_ptr_t ptr)
780 {
781         iam_lock_bh(frame->bh);
782         iam_insert_key(path, frame, key, ptr);
783         iam_unlock_bh(frame->bh);
784 }
785 /*
786  * returns 0 if path was unchanged, -EAGAIN otherwise.
787  */
788 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
789 {
790         int equal;
791
792         iam_lock_bh(frame->bh);
793         equal = iam_check_fast(path, frame) == 0 ||
794                 frame->leaf == iam_find_ptr(path, frame);
795         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
796         iam_unlock_bh(frame->bh);
797
798         return equal ? 0 : -EAGAIN;
799 }
800
801 static int iam_lookup_try(struct iam_path *path)
802 {
803         u32 ptr;
804         int err = 0;
805         int i;
806
807         struct iam_descr *param;
808         struct iam_frame *frame;
809         struct iam_container *c;
810
811         param = iam_path_descr(path);
812         c = path->ip_container;
813
814         ptr = param->id_ops->id_root_ptr(c);
815         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
816              ++frame, ++i) {
817                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
818                                                   &frame->bh);
819                 do_corr(schedule());
820
821                 iam_lock_bh(frame->bh);
822                 /*
823                  * node must be initialized under bh lock because concurrent
824                  * creation procedure may change it and iam_lookup_try() will
825                  * see obsolete tree height. -bzzz
826                  */
827                 if (err != 0)
828                         break;
829
830                 if (LDISKFS_INVARIANT_ON) {
831                         err = param->id_ops->id_node_check(path, frame);
832                         if (err != 0)
833                                 break;
834                 }
835
836                 err = param->id_ops->id_node_load(path, frame);
837                 if (err != 0)
838                         break;
839
840                 assert_inv(dx_node_check(path, frame));
841                 /*
842                  * splitting may change root index block and move hash we're
843                  * looking for into another index block so, we have to check
844                  * this situation and repeat from begining if path got changed
845                  * -bzzz
846                  */
847                 if (i > 0) {
848                         err = iam_check_path(path, frame - 1);
849                         if (err != 0)
850                                 break;
851                 }
852
853                 frame->at = iam_find_position(path, frame);
854                 frame->curidx = ptr;
855                 frame->leaf = ptr = dx_get_block(path, frame->at);
856
857                 iam_unlock_bh(frame->bh);
858                 do_corr(schedule());
859         }
860         if (err != 0)
861                 iam_unlock_bh(frame->bh);
862         path->ip_frame = --frame;
863         return err;
864 }
865
866 static int __iam_path_lookup(struct iam_path *path)
867 {
868         int err;
869         int i;
870
871         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
872                 assert(path->ip_frames[i].bh == NULL);
873
874         do {
875                 err = iam_lookup_try(path);
876                 do_corr(schedule());
877                 if (err != 0)
878                         iam_path_fini(path);
879         } while (err == -EAGAIN);
880
881         return err;
882 }
883
884 /*
885  * returns 0 if path was unchanged, -EAGAIN otherwise.
886  */
887 static int iam_check_full_path(struct iam_path *path, int search)
888 {
889         struct iam_frame *bottom;
890         struct iam_frame *scan;
891         int i;
892         int result;
893
894         do_corr(schedule());
895
896         for (bottom = path->ip_frames, i = 0;
897              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
898                 ; /* find last filled in frame */
899         }
900
901         /*
902          * Lock frames, bottom to top.
903          */
904         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
905                 iam_lock_bh(scan->bh);
906         /*
907          * Check them top to bottom.
908          */
909         result = 0;
910         for (scan = path->ip_frames; scan < bottom; ++scan) {
911                 struct iam_entry *pos;
912
913                 if (search) {
914                         if (iam_check_fast(path, scan) == 0)
915                                 continue;
916
917                         pos = iam_find_position(path, scan);
918                         if (scan->leaf != dx_get_block(path, pos)) {
919                                 result = -EAGAIN;
920                                 break;
921                         }
922                         scan->at = pos;
923                 } else {
924                         pos = iam_entry_shift(path, scan->entries,
925                                               dx_get_count(scan->entries) - 1);
926                         if (scan->at > pos ||
927                             scan->leaf != dx_get_block(path, scan->at)) {
928                                 result = -EAGAIN;
929                                 break;
930                         }
931                 }
932         }
933
934         /*
935          * Unlock top to bottom.
936          */
937         for (scan = path->ip_frames; scan < bottom; ++scan)
938                 iam_unlock_bh(scan->bh);
939         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
940         do_corr(schedule());
941
942         return result;
943 }
944
945
946 /*
947  * Performs path lookup and returns with found leaf (if any) locked by htree
948  * lock.
949  */
950 static int iam_lookup_lock(struct iam_path *path,
951                            struct dynlock_handle **dl, enum dynlock_type lt)
952 {
953         int result;
954
955         while ((result = __iam_path_lookup(path)) == 0) {
956                 do_corr(schedule());
957                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
958                                      lt);
959                 if (*dl == NULL) {
960                         iam_path_fini(path);
961                         result = -ENOMEM;
962                         break;
963                 }
964                 do_corr(schedule());
965                 /*
966                  * while locking leaf we just found may get split so we need
967                  * to check this -bzzz
968                  */
969                 if (iam_check_full_path(path, 1) == 0)
970                         break;
971                 iam_unlock_htree(path->ip_container, *dl);
972                 *dl = NULL;
973                 iam_path_fini(path);
974         }
975         return result;
976 }
977 /*
978  * Performs tree top-to-bottom traversal starting from root, and loads leaf
979  * node.
980  */
981 static int iam_path_lookup(struct iam_path *path, int index)
982 {
983         struct iam_leaf  *leaf;
984         int result;
985
986         leaf = &path->ip_leaf;
987         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
988         assert_inv(iam_path_check(path));
989         do_corr(schedule());
990         if (result == 0) {
991                 result = iam_leaf_load(path);
992                 if (result == 0) {
993                         do_corr(schedule());
994                         if (index)
995                                 result = iam_leaf_ops(leaf)->
996                                         ilookup(leaf, path->ip_ikey_target);
997                         else
998                                 result = iam_leaf_ops(leaf)->
999                                         lookup(leaf, path->ip_key_target);
1000                         do_corr(schedule());
1001                 }
1002                 if (result < 0)
1003                         iam_leaf_unlock(leaf);
1004         }
1005         return result;
1006 }
1007
1008 /*
1009  * Common part of iam_it_{i,}get().
1010  */
1011 static int __iam_it_get(struct iam_iterator *it, int index)
1012 {
1013         int result;
1014
1015         assert_corr(it_state(it) == IAM_IT_DETACHED);
1016
1017         result = iam_path_lookup(&it->ii_path, index);
1018         if (result >= 0) {
1019                 int collision;
1020
1021                 collision = result & IAM_LOOKUP_LAST;
1022                 switch (result & ~IAM_LOOKUP_LAST) {
1023                 case IAM_LOOKUP_EXACT:
1024                         result = +1;
1025                         it->ii_state = IAM_IT_ATTACHED;
1026                         break;
1027                 case IAM_LOOKUP_OK:
1028                         result = 0;
1029                         it->ii_state = IAM_IT_ATTACHED;
1030                         break;
1031                 case IAM_LOOKUP_BEFORE:
1032                 case IAM_LOOKUP_EMPTY:
1033                         result = 0;
1034                         it->ii_state = IAM_IT_SKEWED;
1035                         break;
1036                 default:
1037                         assert(0);
1038                 }
1039                 result |= collision;
1040         }
1041         /*
1042          * See iam_it_get_exact() for explanation.
1043          */
1044         assert_corr(result != -ENOENT);
1045         return result;
1046 }
1047
1048 /*
1049  * Correct hash, but not the same key was found, iterate through hash
1050  * collision chain, looking for correct record.
1051  */
1052 static int iam_it_collision(struct iam_iterator *it)
1053 {
1054         int result;
1055
1056         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1057
1058         while ((result = iam_it_next(it)) == 0) {
1059                 do_corr(schedule());
1060                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1061                         return -ENOENT;
1062                 if (it_keyeq(it, it->ii_path.ip_key_target))
1063                         return 0;
1064         }
1065         return result;
1066 }
1067
1068 /*
1069  * Attach iterator. After successful completion, @it points to record with
1070  * least key not larger than @k.
1071  *
1072  * Return value: 0: positioned on existing record,
1073  *             +ve: exact position found,
1074  *             -ve: error.
1075  *
1076  * precondition:  it_state(it) == IAM_IT_DETACHED
1077  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1078  *                     it_keycmp(it, k) <= 0)
1079  */
1080 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1081 {
1082         int result;
1083
1084         assert_corr(it_state(it) == IAM_IT_DETACHED);
1085
1086         it->ii_path.ip_ikey_target = NULL;
1087         it->ii_path.ip_key_target  = k;
1088
1089         result = __iam_it_get(it, 0);
1090
1091         if (result == IAM_LOOKUP_LAST) {
1092                 result = iam_it_collision(it);
1093                 if (result != 0) {
1094                         iam_it_put(it);
1095                         iam_it_fini(it);
1096                         result = __iam_it_get(it, 0);
1097                 } else
1098                         result = +1;
1099         }
1100         if (result > 0)
1101                 result &= ~IAM_LOOKUP_LAST;
1102
1103         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1104         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1105                     it_keycmp(it, k) <= 0));
1106         return result;
1107 }
1108
1109 /*
1110  * Attach iterator by index key.
1111  */
1112 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1113 {
1114         assert_corr(it_state(it) == IAM_IT_DETACHED);
1115
1116         it->ii_path.ip_ikey_target = k;
1117         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1118 }
1119
1120 /*
1121  * Attach iterator, and assure it points to the record (not skewed).
1122  *
1123  * Return value: 0: positioned on existing record,
1124  *             +ve: exact position found,
1125  *             -ve: error.
1126  *
1127  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1128  *                !(it->ii_flags&IAM_IT_WRITE)
1129  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1130  */
1131 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1132 {
1133         int result;
1134
1135         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1136                     !(it->ii_flags&IAM_IT_WRITE));
1137         result = iam_it_get(it, k);
1138         if (result == 0) {
1139                 if (it_state(it) != IAM_IT_ATTACHED) {
1140                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1141                         result = iam_it_next(it);
1142                 }
1143         }
1144         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1145         return result;
1146 }
1147
1148 /*
1149  * Duplicates iterator.
1150  *
1151  * postcondition: it_state(dst) == it_state(src) &&
1152  *                iam_it_container(dst) == iam_it_container(src) &&
1153  *                dst->ii_flags = src->ii_flags &&
1154  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1155  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1156  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1157  */
1158 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1159 {
1160         dst->ii_flags = src->ii_flags;
1161         dst->ii_state = src->ii_state;
1162         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1163         /*
1164          * XXX: duplicate lock.
1165          */
1166         assert_corr(it_state(dst) == it_state(src));
1167         assert_corr(iam_it_container(dst) == iam_it_container(src));
1168         assert_corr(dst->ii_flags = src->ii_flags);
1169         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1170                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1171                     iam_it_key_get(dst) == iam_it_key_get(src)));
1172 }
1173
1174 /*
1175  * Detach iterator. Does nothing it detached state.
1176  *
1177  * postcondition: it_state(it) == IAM_IT_DETACHED
1178  */
1179 void iam_it_put(struct iam_iterator *it)
1180 {
1181         if (it->ii_state != IAM_IT_DETACHED) {
1182                 it->ii_state = IAM_IT_DETACHED;
1183                 iam_leaf_fini(&it->ii_path.ip_leaf);
1184         }
1185 }
1186
1187 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1188                                         struct iam_ikey *ikey);
1189
1190
1191 /*
1192  * This function increments the frame pointer to search the next leaf
1193  * block, and reads in the necessary intervening nodes if the search
1194  * should be necessary.  Whether or not the search is necessary is
1195  * controlled by the hash parameter.  If the hash value is even, then
1196  * the search is only continued if the next block starts with that
1197  * hash value.  This is used if we are searching for a specific file.
1198  *
1199  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1200  *
1201  * This function returns 1 if the caller should continue to search,
1202  * or 0 if it should not.  If there is an error reading one of the
1203  * index blocks, it will a negative error code.
1204  *
1205  * If start_hash is non-null, it will be filled in with the starting
1206  * hash of the next page.
1207  */
1208 static int iam_htree_advance(struct inode *dir, __u32 hash,
1209                               struct iam_path *path, __u32 *start_hash,
1210                               int compat)
1211 {
1212         struct iam_frame *p;
1213         struct buffer_head *bh;
1214         int err, num_frames = 0;
1215         __u32 bhash;
1216
1217         p = path->ip_frame;
1218         /*
1219          * Find the next leaf page by incrementing the frame pointer.
1220          * If we run out of entries in the interior node, loop around and
1221          * increment pointer in the parent node.  When we break out of
1222          * this loop, num_frames indicates the number of interior
1223          * nodes need to be read.
1224          */
1225         while (1) {
1226                 do_corr(schedule());
1227                 iam_lock_bh(p->bh);
1228                 if (p->at_shifted)
1229                         p->at_shifted = 0;
1230                 else
1231                         p->at = iam_entry_shift(path, p->at, +1);
1232                 if (p->at < iam_entry_shift(path, p->entries,
1233                                             dx_get_count(p->entries))) {
1234                         p->leaf = dx_get_block(path, p->at);
1235                         iam_unlock_bh(p->bh);
1236                         break;
1237                 }
1238                 iam_unlock_bh(p->bh);
1239                 if (p == path->ip_frames)
1240                         return 0;
1241                 num_frames++;
1242                 --p;
1243         }
1244
1245         if (compat) {
1246                 /*
1247                  * Htree hash magic.
1248                  */
1249
1250                 /*
1251                  * If the hash is 1, then continue only if the next page has a
1252                  * continuation hash of any value.  This is used for readdir
1253                  * handling.  Otherwise, check to see if the hash matches the
1254                  * desired contiuation hash.  If it doesn't, return since
1255                  * there's no point to read in the successive index pages.
1256                  */
1257                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1258                 if (start_hash)
1259                         *start_hash = bhash;
1260                 if ((hash & 1) == 0) {
1261                         if ((bhash & ~1) != hash)
1262                                 return 0;
1263                 }
1264         }
1265         /*
1266          * If the hash is HASH_NB_ALWAYS, we always go to the next
1267          * block so no check is necessary
1268          */
1269         while (num_frames--) {
1270                 iam_ptr_t idx;
1271
1272                 do_corr(schedule());
1273                 iam_lock_bh(p->bh);
1274                 idx = p->leaf = dx_get_block(path, p->at);
1275                 iam_unlock_bh(p->bh);
1276                 err = iam_path_descr(path)->id_ops->
1277                         id_node_read(path->ip_container, idx, NULL, &bh);
1278                 if (err != 0)
1279                         return err; /* Failure */
1280                 ++p;
1281                 brelse(p->bh);
1282                 assert_corr(p->bh != bh);
1283                 p->bh = bh;
1284                 p->entries = dx_node_get_entries(path, p);
1285                 p->at = iam_entry_shift(path, p->entries, !compat);
1286                 assert_corr(p->curidx != idx);
1287                 p->curidx = idx;
1288                 iam_lock_bh(p->bh);
1289                 assert_corr(p->leaf != dx_get_block(path, p->at));
1290                 p->leaf = dx_get_block(path, p->at);
1291                 iam_unlock_bh(p->bh);
1292                 assert_inv(dx_node_check(path, p));
1293         }
1294         return 1;
1295 }
1296
1297 static inline int iam_index_advance(struct iam_path *path)
1298 {
1299         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1300 }
1301
1302 static void iam_unlock_array(struct iam_container *ic,
1303                              struct dynlock_handle **lh)
1304 {
1305         int i;
1306
1307         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1308                 if (*lh != NULL) {
1309                         iam_unlock_htree(ic, *lh);
1310                         *lh = NULL;
1311                 }
1312         }
1313 }
1314 /*
1315  * Advance index part of @path to point to the next leaf. Returns 1 on
1316  * success, 0, when end of container was reached. Leaf node is locked.
1317  */
1318 int iam_index_next(struct iam_container *c, struct iam_path *path)
1319 {
1320         iam_ptr_t cursor;
1321         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1322         int result;
1323
1324         /*
1325          * Locking for iam_index_next()... is to be described.
1326          */
1327
1328         cursor = path->ip_frame->leaf;
1329
1330         while (1) {
1331                 result = iam_index_lock(path, lh);
1332                 do_corr(schedule());
1333                 if (result < 0)
1334                         break;
1335
1336                 result = iam_check_full_path(path, 0);
1337                 if (result == 0 && cursor == path->ip_frame->leaf) {
1338                         result = iam_index_advance(path);
1339
1340                         assert_corr(result == 0 ||
1341                                     cursor != path->ip_frame->leaf);
1342                         break;
1343                 }
1344                 do {
1345                         iam_unlock_array(c, lh);
1346
1347                         iam_path_release(path);
1348                         do_corr(schedule());
1349
1350                         result = __iam_path_lookup(path);
1351                         if (result < 0)
1352                                 break;
1353
1354                         while (path->ip_frame->leaf != cursor) {
1355                                 do_corr(schedule());
1356
1357                                 result = iam_index_lock(path, lh);
1358                                 do_corr(schedule());
1359                                 if (result < 0)
1360                                         break;
1361
1362                                 result = iam_check_full_path(path, 0);
1363                                 if (result != 0)
1364                                         break;
1365
1366                                 result = iam_index_advance(path);
1367                                 if (result == 0) {
1368                                         CERROR("cannot find cursor : %u\n",
1369                                                 cursor);
1370                                         result = -EIO;
1371                                 }
1372                                 if (result < 0)
1373                                         break;
1374                                 result = iam_check_full_path(path, 0);
1375                                 if (result != 0)
1376                                         break;
1377                                 iam_unlock_array(c, lh);
1378                         }
1379                 } while (result == -EAGAIN);
1380                 if (result < 0)
1381                         break;
1382         }
1383         iam_unlock_array(c, lh);
1384         return result;
1385 }
1386
1387 /*
1388  * Move iterator one record right.
1389  *
1390  * Return value: 0: success,
1391  *              +1: end of container reached
1392  *             -ve: error
1393  *
1394  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1395  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1396  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1397  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1398  */
1399 int iam_it_next(struct iam_iterator *it)
1400 {
1401         int result;
1402         struct iam_path *path;
1403         struct iam_leaf *leaf;
1404
1405         do_corr(struct iam_ikey *ik_orig);
1406
1407         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1408         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1409                     it_state(it) == IAM_IT_SKEWED);
1410
1411         path = &it->ii_path;
1412         leaf = &path->ip_leaf;
1413
1414         assert_corr(iam_leaf_is_locked(leaf));
1415
1416         result = 0;
1417         do_corr(ik_orig = it_at_rec(it) ?
1418                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1419         if (it_before(it)) {
1420                 assert_corr(!iam_leaf_at_end(leaf));
1421                 it->ii_state = IAM_IT_ATTACHED;
1422         } else {
1423                 if (!iam_leaf_at_end(leaf))
1424                         /* advance within leaf node */
1425                         iam_leaf_next(leaf);
1426                 /*
1427                  * multiple iterations may be necessary due to empty leaves.
1428                  */
1429                 while (result == 0 && iam_leaf_at_end(leaf)) {
1430                         do_corr(schedule());
1431                         /* advance index portion of the path */
1432                         result = iam_index_next(iam_it_container(it), path);
1433                         assert_corr(iam_leaf_is_locked(leaf));
1434                         if (result == 1) {
1435                                 struct dynlock_handle *lh;
1436                                 lh = iam_lock_htree(iam_it_container(it),
1437                                                     path->ip_frame->leaf,
1438                                                     DLT_WRITE);
1439                                 if (lh != NULL) {
1440                                         iam_leaf_fini(leaf);
1441                                         leaf->il_lock = lh;
1442                                         result = iam_leaf_load(path);
1443                                         if (result == 0)
1444                                                 iam_leaf_start(leaf);
1445                                 } else
1446                                         result = -ENOMEM;
1447                         } else if (result == 0)
1448                                 /* end of container reached */
1449                                 result = +1;
1450                         if (result != 0)
1451                                 iam_it_put(it);
1452                 }
1453                 if (result == 0)
1454                         it->ii_state = IAM_IT_ATTACHED;
1455         }
1456         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1457         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1458         assert_corr(ergo(result == 0 && ik_orig != NULL,
1459                     it_ikeycmp(it, ik_orig) >= 0));
1460         return result;
1461 }
1462
1463 /*
1464  * Return pointer to the record under iterator.
1465  *
1466  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1467  * postcondition: it_state(it) == IAM_IT_ATTACHED
1468  */
1469 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1470 {
1471         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1472         assert_corr(it_at_rec(it));
1473         return iam_leaf_rec(&it->ii_path.ip_leaf);
1474 }
1475
1476 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1477 {
1478         struct iam_leaf *folio;
1479
1480         folio = &it->ii_path.ip_leaf;
1481         iam_leaf_ops(folio)->rec_set(folio, r);
1482 }
1483
1484 /*
1485  * Replace contents of record under iterator.
1486  *
1487  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1488  *                it->ii_flags&IAM_IT_WRITE
1489  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1490  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1491  */
1492 int iam_it_rec_set(handle_t *h,
1493                    struct iam_iterator *it, const struct iam_rec *r)
1494 {
1495         int result;
1496         struct iam_path *path;
1497         struct buffer_head *bh;
1498
1499         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1500                     it->ii_flags&IAM_IT_WRITE);
1501         assert_corr(it_at_rec(it));
1502
1503         path = &it->ii_path;
1504         bh = path->ip_leaf.il_bh;
1505         result = iam_txn_add(h, path, bh);
1506         if (result == 0) {
1507                 iam_it_reccpy(it, r);
1508                 result = iam_txn_dirty(h, path, bh);
1509         }
1510         return result;
1511 }
1512
1513 /*
1514  * Return pointer to the index key under iterator.
1515  *
1516  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1517  *                it_state(it) == IAM_IT_SKEWED
1518  */
1519 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1520                                         struct iam_ikey *ikey)
1521 {
1522         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1523                     it_state(it) == IAM_IT_SKEWED);
1524         assert_corr(it_at_rec(it));
1525         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1526 }
1527
1528 /*
1529  * Return pointer to the key under iterator.
1530  *
1531  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1532  *                it_state(it) == IAM_IT_SKEWED
1533  */
1534 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1535 {
1536         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1537                     it_state(it) == IAM_IT_SKEWED);
1538         assert_corr(it_at_rec(it));
1539         return iam_leaf_key(&it->ii_path.ip_leaf);
1540 }
1541
1542 /*
1543  * Return size of key under iterator (in bytes)
1544  *
1545  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1546  *                it_state(it) == IAM_IT_SKEWED
1547  */
1548 int iam_it_key_size(const struct iam_iterator *it)
1549 {
1550         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1551                     it_state(it) == IAM_IT_SKEWED);
1552         assert_corr(it_at_rec(it));
1553         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1554 }
1555
1556 static struct buffer_head *
1557 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1558 {
1559         struct inode *inode = c->ic_object;
1560         struct buffer_head *bh = NULL;
1561         struct iam_idle_head *head;
1562         struct buffer_head *idle;
1563         __u32 *idle_blocks;
1564         __u16 count;
1565
1566         if (c->ic_idle_bh == NULL)
1567                 goto newblock;
1568
1569         mutex_lock(&c->ic_idle_mutex);
1570         if (unlikely(c->ic_idle_bh == NULL)) {
1571                 mutex_unlock(&c->ic_idle_mutex);
1572                 goto newblock;
1573         }
1574
1575         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1576         count = le16_to_cpu(head->iih_count);
1577         if (count > 0) {
1578                 *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1579                                                           c->ic_idle_bh,
1580                                                           LDISKFS_JTR_NONE);
1581                 if (*e != 0)
1582                         goto fail;
1583
1584                 --count;
1585                 *b = le32_to_cpu(head->iih_blks[count]);
1586                 head->iih_count = cpu_to_le16(count);
1587                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1588                 if (*e != 0)
1589                         goto fail;
1590
1591                 mutex_unlock(&c->ic_idle_mutex);
1592                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1593                 if (IS_ERR_OR_NULL(bh)) {
1594                         if (IS_ERR(bh))
1595                                 *e = PTR_ERR(bh);
1596                         else
1597                                 *e = -EIO;
1598                         return NULL;
1599                 }
1600                 goto got;
1601         }
1602
1603         /* The block itself which contains the iam_idle_head is
1604          * also an idle block, and can be used as the new node. */
1605         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1606                                 c->ic_descr->id_root_gap +
1607                                 sizeof(struct dx_countlimit));
1608         *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1609                                                   c->ic_root_bh,
1610                                                   LDISKFS_JTR_NONE);
1611         if (*e != 0)
1612                 goto fail;
1613
1614         *b = le32_to_cpu(*idle_blocks);
1615         iam_lock_bh(c->ic_root_bh);
1616         *idle_blocks = head->iih_next;
1617         iam_unlock_bh(c->ic_root_bh);
1618         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1619         if (*e != 0) {
1620                 iam_lock_bh(c->ic_root_bh);
1621                 *idle_blocks = cpu_to_le32(*b);
1622                 iam_unlock_bh(c->ic_root_bh);
1623                 goto fail;
1624         }
1625
1626         bh = c->ic_idle_bh;
1627         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1628         if (idle != NULL && IS_ERR(idle)) {
1629                 *e = PTR_ERR(idle);
1630                 c->ic_idle_bh = NULL;
1631                 brelse(bh);
1632                 goto fail;
1633         }
1634
1635         c->ic_idle_bh = idle;
1636         mutex_unlock(&c->ic_idle_mutex);
1637
1638 got:
1639         /* get write access for the found buffer head */
1640         *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb, bh,
1641                                                   LDISKFS_JTR_NONE);
1642         if (*e != 0) {
1643                 brelse(bh);
1644                 bh = NULL;
1645                 ldiskfs_std_error(inode->i_sb, *e);
1646         } else {
1647                 /* Clear the reused node as new node does. */
1648                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1649                 set_buffer_uptodate(bh);
1650         }
1651         return bh;
1652
1653 newblock:
1654         bh = osd_ldiskfs_append(h, inode, b);
1655         if (IS_ERR(bh)) {
1656                 *e = PTR_ERR(bh);
1657                 bh = NULL;
1658         }
1659
1660         return bh;
1661
1662 fail:
1663         mutex_unlock(&c->ic_idle_mutex);
1664         ldiskfs_std_error(inode->i_sb, *e);
1665         return NULL;
1666 }
1667
1668 /*
1669  * Insertion of new record. Interaction with jbd during non-trivial case (when
1670  * split happens) is as following:
1671  *
1672  *  - new leaf node is involved into transaction by iam_new_node();
1673  *
1674  *  - old leaf node is involved into transaction by iam_add_rec();
1675  *
1676  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1677  *
1678  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1679  *  iam_new_leaf();
1680  *
1681  *  - split index nodes are involved into transaction and marked dirty by
1682  *  split_index_node().
1683  *
1684  *  - "safe" index node, which is no split, but where new pointer is inserted
1685  *  is involved into transaction and marked dirty by split_index_node().
1686  *
1687  *  - index node where pointer to new leaf is inserted is involved into
1688  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1689  *
1690  *  - inode is marked dirty by iam_add_rec().
1691  *
1692  */
1693
1694 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1695 {
1696         int err;
1697         iam_ptr_t blknr;
1698         struct buffer_head *new_leaf;
1699         struct buffer_head *old_leaf;
1700         struct iam_container *c;
1701         struct inode *obj;
1702         struct iam_path *path;
1703
1704         c = iam_leaf_container(leaf);
1705         path = leaf->il_path;
1706
1707         obj = c->ic_object;
1708         new_leaf = iam_new_node(handle, c, &blknr, &err);
1709         do_corr(schedule());
1710         if (new_leaf != NULL) {
1711                 struct dynlock_handle *lh;
1712
1713                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1714                 do_corr(schedule());
1715                 if (lh != NULL) {
1716                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1717                         do_corr(schedule());
1718                         old_leaf = leaf->il_bh;
1719                         iam_leaf_split(leaf, &new_leaf, blknr);
1720                         if (old_leaf != leaf->il_bh) {
1721                                 /*
1722                                  * Switched to the new leaf.
1723                                  */
1724                                 iam_leaf_unlock(leaf);
1725                                 leaf->il_lock = lh;
1726                                 path->ip_frame->leaf = blknr;
1727                         } else
1728                                 iam_unlock_htree(path->ip_container, lh);
1729                         do_corr(schedule());
1730                         err = iam_txn_dirty(handle, path, new_leaf);
1731                         if (err == 0)
1732                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1733                         do_corr(schedule());
1734                 } else
1735                         err = -ENOMEM;
1736                 brelse(new_leaf);
1737         }
1738         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1739         return err;
1740 }
1741
1742 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1743 {
1744         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1745 }
1746
1747 static int iam_shift_entries(struct iam_path *path,
1748                          struct iam_frame *frame, unsigned count,
1749                          struct iam_entry *entries, struct iam_entry *entries2,
1750                          u32 newblock)
1751 {
1752         unsigned count1;
1753         unsigned count2;
1754         int delta;
1755
1756         struct iam_frame *parent = frame - 1;
1757         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1758
1759         delta = dx_index_is_compat(path) ? 0 : +1;
1760
1761         count1 = count/2 + delta;
1762         count2 = count - count1;
1763         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1764
1765         dxtrace(printk("Split index %d/%d\n", count1, count2));
1766
1767         memcpy((char *) iam_entry_shift(path, entries2, delta),
1768                (char *) iam_entry_shift(path, entries, count1),
1769                count2 * iam_entry_size(path));
1770
1771         dx_set_count(entries2, count2 + delta);
1772         dx_set_limit(entries2, dx_node_limit(path));
1773
1774         /*
1775          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1776          * level index in root index, then we insert new index here and set
1777          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1778          * index w/o hash it looks for. the solution is to check root index
1779          * after we locked just founded 2nd level index -bzzz
1780          */
1781         iam_insert_key_lock(path, parent, pivot, newblock);
1782
1783         /*
1784          * now old and new 2nd level index blocks contain all pointers, so
1785          * dx_probe() may find it in the both.  it's OK -bzzz
1786          */
1787         iam_lock_bh(frame->bh);
1788         dx_set_count(entries, count1);
1789         iam_unlock_bh(frame->bh);
1790
1791         /*
1792          * now old 2nd level index block points to first half of leafs. it's
1793          * importand that dx_probe() must check root index block for changes
1794          * under dx_lock_bh(frame->bh) -bzzz
1795          */
1796
1797         return count1;
1798 }
1799
1800
1801 int split_index_node(handle_t *handle, struct iam_path *path,
1802                      struct dynlock_handle **lh)
1803 {
1804         struct iam_entry *entries;   /* old block contents */
1805         struct iam_entry *entries2;  /* new block contents */
1806         struct iam_frame *frame, *safe;
1807         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1808         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1809         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1810         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1811         struct inode *dir = iam_path_obj(path);
1812         struct iam_descr *descr;
1813         int nr_splet;
1814         int i, err;
1815
1816         descr = iam_path_descr(path);
1817         /*
1818          * Algorithm below depends on this.
1819          */
1820         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1821
1822         frame = path->ip_frame;
1823         entries = frame->entries;
1824
1825         /*
1826          * Tall-tree handling: we might have to split multiple index blocks
1827          * all the way up to tree root. Tricky point here is error handling:
1828          * to avoid complicated undo/rollback we
1829          *
1830          *   - first allocate all necessary blocks
1831          *
1832          *   - insert pointers into them atomically.
1833          */
1834
1835         /*
1836          * Locking: leaf is already locked. htree-locks are acquired on all
1837          * index nodes that require split bottom-to-top, on the "safe" node,
1838          * and on all new nodes
1839          */
1840
1841         dxtrace(printk("using %u of %u node entries\n",
1842                        dx_get_count(entries), dx_get_limit(entries)));
1843
1844         /* What levels need split? */
1845         for (nr_splet = 0; frame >= path->ip_frames &&
1846              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1847              --frame, ++nr_splet) {
1848                 do_corr(schedule());
1849                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1850                         /*
1851                          * CWARN(dir->i_sb, __FUNCTION__,
1852                          * "Directory index full!\n");
1853                          */
1854                         err = -ENOSPC;
1855                         goto cleanup;
1856                 }
1857         }
1858
1859         safe = frame;
1860
1861         /*
1862          * Lock all nodes, bottom to top.
1863          */
1864         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1865                 do_corr(schedule());
1866                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1867                                          DLT_WRITE);
1868                 if (lock[i] == NULL) {
1869                         err = -ENOMEM;
1870                         goto cleanup;
1871                 }
1872         }
1873
1874         /*
1875          * Check for concurrent index modification.
1876          */
1877         err = iam_check_full_path(path, 1);
1878         if (err)
1879                 goto cleanup;
1880         /*
1881          * And check that the same number of nodes is to be split.
1882          */
1883         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1884              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1885              --frame, ++i) {
1886                 ;
1887         }
1888         if (i != nr_splet) {
1889                 err = -EAGAIN;
1890                 goto cleanup;
1891         }
1892
1893         /*
1894          * Go back down, allocating blocks, locking them, and adding into
1895          * transaction...
1896          */
1897         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1898                 bh_new[i] = iam_new_node(handle, path->ip_container,
1899                                          &newblock[i], &err);
1900                 do_corr(schedule());
1901                 if (!bh_new[i] ||
1902                     descr->id_ops->id_node_init(path->ip_container,
1903                                                 bh_new[i], 0) != 0)
1904                         goto cleanup;
1905
1906                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1907                                              DLT_WRITE);
1908                 if (new_lock[i] == NULL) {
1909                         err = -ENOMEM;
1910                         goto cleanup;
1911                 }
1912                 do_corr(schedule());
1913                 BUFFER_TRACE(frame->bh, "get_write_access");
1914                 err = osd_ldiskfs_journal_get_write_access(handle,
1915                                                            dir->i_sb,
1916                                                            frame->bh,
1917                                                            LDISKFS_JTR_NONE);
1918                 if (err)
1919                         goto journal_error;
1920         }
1921         /* Add "safe" node to transaction too */
1922         if (safe + 1 != path->ip_frames) {
1923                 do_corr(schedule());
1924                 err = osd_ldiskfs_journal_get_write_access(handle,
1925                                                            dir->i_sb,
1926                                                            safe->bh,
1927                                                            LDISKFS_JTR_NONE);
1928                 if (err)
1929                         goto journal_error;
1930         }
1931
1932         /* Go through nodes once more, inserting pointers */
1933         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1934                 unsigned count;
1935                 int idx;
1936                 struct buffer_head *bh2;
1937                 struct buffer_head *bh;
1938
1939                 entries = frame->entries;
1940                 count = dx_get_count(entries);
1941                 idx = iam_entry_diff(path, frame->at, entries);
1942
1943                 bh2 = bh_new[i];
1944                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1945
1946                 bh = frame->bh;
1947                 if (frame == path->ip_frames) {
1948                         /* splitting root node. Tricky point:
1949                          *
1950                          * In the "normal" B-tree we'd split root *and* add
1951                          * new root to the tree with pointers to the old root
1952                          * and its sibling (thus introducing two new nodes).
1953                          *
1954                          * In htree it's enough to add one node, because
1955                          * capacity of the root node is smaller than that of
1956                          * non-root one.
1957                          */
1958                         struct iam_frame *frames;
1959                         struct iam_entry *next;
1960
1961                         assert_corr(i == 0);
1962
1963                         do_corr(schedule());
1964
1965                         frames = path->ip_frames;
1966                         memcpy((char *) entries2, (char *) entries,
1967                                count * iam_entry_size(path));
1968                         dx_set_limit(entries2, dx_node_limit(path));
1969
1970                         /* Set up root */
1971                         iam_lock_bh(frame->bh);
1972                         next = descr->id_ops->id_root_inc(path->ip_container,
1973                                                           path, frame);
1974                         dx_set_block(path, next, newblock[0]);
1975                         iam_unlock_bh(frame->bh);
1976
1977                         do_corr(schedule());
1978                         /* Shift frames in the path */
1979                         memmove(frames + 2, frames + 1,
1980                                (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1981                         /* Add new access path frame */
1982                         frames[1].at = iam_entry_shift(path, entries2, idx);
1983                         frames[1].entries = entries = entries2;
1984                         frames[1].bh = bh2;
1985                         assert_inv(dx_node_check(path, frame));
1986                         ++ path->ip_frame;
1987                         ++ frame;
1988                         assert_inv(dx_node_check(path, frame));
1989                         bh_new[0] = NULL; /* buffer head is "consumed" */
1990                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1991                         if (err)
1992                                 goto journal_error;
1993                         do_corr(schedule());
1994                 } else {
1995                         /* splitting non-root index node. */
1996                         struct iam_frame *parent = frame - 1;
1997
1998                         do_corr(schedule());
1999                         count = iam_shift_entries(path, frame, count,
2000                                                 entries, entries2, newblock[i]);
2001                         /* Which index block gets the new entry? */
2002                         if (idx >= count) {
2003                                 int d = dx_index_is_compat(path) ? 0 : +1;
2004
2005                                 frame->at = iam_entry_shift(path, entries2,
2006                                                             idx - count + d);
2007                                 frame->entries = entries = entries2;
2008                                 frame->curidx = newblock[i];
2009                                 swap(frame->bh, bh2);
2010                                 assert_corr(lock[i + 1] != NULL);
2011                                 assert_corr(new_lock[i] != NULL);
2012                                 swap(lock[i + 1], new_lock[i]);
2013                                 bh_new[i] = bh2;
2014                                 parent->at = iam_entry_shift(path,
2015                                                              parent->at, +1);
2016                         }
2017                         assert_inv(dx_node_check(path, frame));
2018                         assert_inv(dx_node_check(path, parent));
2019                         dxtrace(dx_show_index("node", frame->entries));
2020                         dxtrace(dx_show_index("node",
2021                                 ((struct dx_node *) bh2->b_data)->entries));
2022                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2023                         if (err)
2024                                 goto journal_error;
2025                         do_corr(schedule());
2026                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2027                                                             parent->bh);
2028                         if (err)
2029                                 goto journal_error;
2030                 }
2031                 do_corr(schedule());
2032                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2033                 if (err)
2034                         goto journal_error;
2035         }
2036                 /*
2037                  * This function was called to make insertion of new leaf
2038                  * possible. Check that it fulfilled its obligations.
2039                  */
2040                 assert_corr(dx_get_count(path->ip_frame->entries) <
2041                             dx_get_limit(path->ip_frame->entries));
2042         assert_corr(lock[nr_splet] != NULL);
2043         *lh = lock[nr_splet];
2044         lock[nr_splet] = NULL;
2045         if (nr_splet > 0) {
2046                 /*
2047                  * Log ->i_size modification.
2048                  */
2049                 err = ldiskfs_mark_inode_dirty(handle, dir);
2050                 if (err)
2051                         goto journal_error;
2052         }
2053         goto cleanup;
2054 journal_error:
2055         ldiskfs_std_error(dir->i_sb, err);
2056
2057 cleanup:
2058         iam_unlock_array(path->ip_container, lock);
2059         iam_unlock_array(path->ip_container, new_lock);
2060
2061         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2062
2063         do_corr(schedule());
2064         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2065                 if (bh_new[i] != NULL)
2066                         brelse(bh_new[i]);
2067         }
2068         return err;
2069 }
2070
2071 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2072                        struct iam_path *path,
2073                        const struct iam_key *k, const struct iam_rec *r)
2074 {
2075         int err;
2076         struct iam_leaf *leaf;
2077
2078         leaf = &path->ip_leaf;
2079         assert_inv(iam_path_check(path));
2080         err = iam_txn_add(handle, path, leaf->il_bh);
2081         if (err == 0) {
2082                 do_corr(schedule());
2083                 if (!iam_leaf_can_add(leaf, k, r)) {
2084                         struct dynlock_handle *lh = NULL;
2085
2086                         do {
2087                                 assert_corr(lh == NULL);
2088                                 do_corr(schedule());
2089                                 err = split_index_node(handle, path, &lh);
2090                                 if (err == -EAGAIN) {
2091                                         assert_corr(lh == NULL);
2092
2093                                         iam_path_fini(path);
2094                                         it->ii_state = IAM_IT_DETACHED;
2095
2096                                         do_corr(schedule());
2097                                         err = iam_it_get_exact(it, k);
2098                                         if (err == -ENOENT)
2099                                                 err = +1; /* repeat split */
2100                                         else if (err == 0)
2101                                                 err = -EEXIST;
2102                                 }
2103                         } while (err > 0);
2104                         assert_inv(iam_path_check(path));
2105                         if (err == 0) {
2106                                 assert_corr(lh != NULL);
2107                                 do_corr(schedule());
2108                                 err = iam_new_leaf(handle, leaf);
2109                                 if (err == 0)
2110                                         err = iam_txn_dirty(handle, path,
2111                                                             path->ip_frame->bh);
2112                         }
2113                         iam_unlock_htree(path->ip_container, lh);
2114                         do_corr(schedule());
2115                 }
2116                 if (err == 0) {
2117                         iam_leaf_rec_add(leaf, k, r);
2118                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2119                 }
2120         }
2121         assert_inv(iam_path_check(path));
2122         return err;
2123 }
2124
2125 /*
2126  * Insert new record with key @k and contents from @r, shifting records to the
2127  * right. On success, iterator is positioned on the newly inserted record.
2128  *
2129  * precondition: it->ii_flags&IAM_IT_WRITE &&
2130  *               (it_state(it) == IAM_IT_ATTACHED ||
2131  *                it_state(it) == IAM_IT_SKEWED) &&
2132  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2133  *                    it_keycmp(it, k) <= 0) &&
2134  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2135  * postcondition: ergo(result == 0,
2136  *                     it_state(it) == IAM_IT_ATTACHED &&
2137  *                     it_keycmp(it, k) == 0 &&
2138  *                     !memcmp(iam_it_rec_get(it), r, ...))
2139  */
2140 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2141                       const struct iam_key *k, const struct iam_rec *r)
2142 {
2143         int result;
2144         struct iam_path *path;
2145
2146         path = &it->ii_path;
2147
2148         assert_corr(it->ii_flags&IAM_IT_WRITE);
2149         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2150                     it_state(it) == IAM_IT_SKEWED);
2151         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2152                     it_keycmp(it, k) <= 0));
2153         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2154         result = iam_add_rec(h, it, path, k, r);
2155         if (result == 0)
2156                 it->ii_state = IAM_IT_ATTACHED;
2157         assert_corr(ergo(result == 0,
2158                          it_state(it) == IAM_IT_ATTACHED &&
2159                          it_keycmp(it, k) == 0));
2160         return result;
2161 }
2162
2163 static inline int iam_idle_blocks_limit(struct inode *inode)
2164 {
2165         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2166 }
2167
2168 /*
2169  * If the leaf cannnot be recycled, we will lose one block for reusing.
2170  * It is not a serious issue because it almost the same of non-recycle.
2171  */
2172 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2173                                   struct iam_leaf *l, struct buffer_head **bh)
2174 {
2175         struct iam_container *c = p->ip_container;
2176         struct inode *inode = c->ic_object;
2177         struct iam_frame *frame = p->ip_frame;
2178         struct iam_entry *entries;
2179         struct iam_entry *pos;
2180         struct dynlock_handle *lh;
2181         int count;
2182         int rc;
2183
2184         if (c->ic_idle_failed)
2185                 return 0;
2186
2187         if (unlikely(frame == NULL))
2188                 return 0;
2189
2190         if (!iam_leaf_empty(l))
2191                 return 0;
2192
2193         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2194         if (lh == NULL) {
2195                 CWARN("%s: No memory to recycle idle blocks\n",
2196                       osd_ino2name(inode));
2197                 return 0;
2198         }
2199
2200         rc = iam_txn_add(h, p, frame->bh);
2201         if (rc != 0) {
2202                 iam_unlock_htree(c, lh);
2203                 return 0;
2204         }
2205
2206         iam_lock_bh(frame->bh);
2207         entries = frame->entries;
2208         count = dx_get_count(entries);
2209         /*
2210          * NOT shrink the last entry in the index node, which can be reused
2211          * directly by next new node.
2212          */
2213         if (count == 2) {
2214                 iam_unlock_bh(frame->bh);
2215                 iam_unlock_htree(c, lh);
2216                 return 0;
2217         }
2218
2219         pos = iam_find_position(p, frame);
2220         /*
2221          * There may be some new leaf nodes have been added or empty leaf nodes
2222          * have been shrinked during my delete operation.
2223          *
2224          * If the empty leaf is not under current index node because the index
2225          * node has been split, then just skip the empty leaf, which is rare.
2226          */
2227         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2228                 iam_unlock_bh(frame->bh);
2229                 iam_unlock_htree(c, lh);
2230                 return 0;
2231         }
2232
2233         frame->at = pos;
2234         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2235                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2236
2237                 memmove(frame->at, n,
2238                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2239                 frame->at_shifted = 1;
2240         }
2241         dx_set_count(entries, count - 1);
2242         iam_unlock_bh(frame->bh);
2243         rc = iam_txn_dirty(h, p, frame->bh);
2244         iam_unlock_htree(c, lh);
2245         if (rc != 0)
2246                 return 0;
2247
2248         get_bh(l->il_bh);
2249         *bh = l->il_bh;
2250         return frame->leaf;
2251 }
2252
2253 static int
2254 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2255                         __u32 *idle_blocks, iam_ptr_t blk)
2256 {
2257         struct iam_container *c = p->ip_container;
2258         struct buffer_head *old = c->ic_idle_bh;
2259         struct iam_idle_head *head;
2260         int rc;
2261
2262         head = (struct iam_idle_head *)(bh->b_data);
2263         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2264         head->iih_count = 0;
2265         head->iih_next = *idle_blocks;
2266         /* The bh already get_write_accessed. */
2267         rc = iam_txn_dirty(h, p, bh);
2268         if (rc != 0)
2269                 return rc;
2270
2271         rc = iam_txn_add(h, p, c->ic_root_bh);
2272         if (rc != 0)
2273                 return rc;
2274
2275         iam_lock_bh(c->ic_root_bh);
2276         *idle_blocks = cpu_to_le32(blk);
2277         iam_unlock_bh(c->ic_root_bh);
2278         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2279         if (rc == 0) {
2280                 /* NOT release old before new assigned. */
2281                 get_bh(bh);
2282                 c->ic_idle_bh = bh;
2283                 brelse(old);
2284         } else {
2285                 iam_lock_bh(c->ic_root_bh);
2286                 *idle_blocks = head->iih_next;
2287                 iam_unlock_bh(c->ic_root_bh);
2288         }
2289         return rc;
2290 }
2291
2292 /*
2293  * If the leaf cannnot be recycled, we will lose one block for reusing.
2294  * It is not a serious issue because it almost the same of non-recycle.
2295  */
2296 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2297                              struct buffer_head *bh, iam_ptr_t blk)
2298 {
2299         struct iam_container *c = p->ip_container;
2300         struct inode *inode = c->ic_object;
2301         struct iam_idle_head *head;
2302         __u32 *idle_blocks;
2303         int count;
2304         int rc;
2305
2306         mutex_lock(&c->ic_idle_mutex);
2307         if (unlikely(c->ic_idle_failed)) {
2308                 rc = -EFAULT;
2309                 goto unlock;
2310         }
2311
2312         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2313                                 c->ic_descr->id_root_gap +
2314                                 sizeof(struct dx_countlimit));
2315         /* It is the first idle block. */
2316         if (c->ic_idle_bh == NULL) {
2317                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2318                 goto unlock;
2319         }
2320
2321         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2322         count = le16_to_cpu(head->iih_count);
2323         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2324         if (count == iam_idle_blocks_limit(inode)) {
2325                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2326                 goto unlock;
2327         }
2328
2329         /* Just add to ic_idle_bh. */
2330         rc = iam_txn_add(h, p, c->ic_idle_bh);
2331         if (rc != 0)
2332                 goto unlock;
2333
2334         head->iih_blks[count] = cpu_to_le32(blk);
2335         head->iih_count = cpu_to_le16(count + 1);
2336         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2337
2338 unlock:
2339         mutex_unlock(&c->ic_idle_mutex);
2340         if (rc != 0)
2341                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2342                       osd_ino2name(inode), blk);
2343 }
2344
2345 /*
2346  * Delete record under iterator.
2347  *
2348  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2349  *                it->ii_flags&IAM_IT_WRITE &&
2350  *                it_at_rec(it)
2351  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2352  *                it_state(it) == IAM_IT_DETACHED
2353  */
2354 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2355 {
2356         int result;
2357         struct iam_leaf *leaf;
2358         struct iam_path *path;
2359
2360         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2361                     it->ii_flags&IAM_IT_WRITE);
2362         assert_corr(it_at_rec(it));
2363
2364         path = &it->ii_path;
2365         leaf = &path->ip_leaf;
2366
2367         assert_inv(iam_path_check(path));
2368
2369         result = iam_txn_add(h, path, leaf->il_bh);
2370         /*
2371          * no compaction for now.
2372          */
2373         if (result == 0) {
2374                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2375                 result = iam_txn_dirty(h, path, leaf->il_bh);
2376                 if (result == 0 && iam_leaf_at_end(leaf)) {
2377                         struct buffer_head *bh = NULL;
2378                         iam_ptr_t blk;
2379
2380                         blk = iam_index_shrink(h, path, leaf, &bh);
2381                         if (it->ii_flags & IAM_IT_MOVE) {
2382                                 result = iam_it_next(it);
2383                                 if (result > 0)
2384                                         result = 0;
2385                         }
2386
2387                         if (bh != NULL) {
2388                                 iam_recycle_leaf(h, path, bh, blk);
2389                                 brelse(bh);
2390                         }
2391                 }
2392         }
2393         assert_inv(iam_path_check(path));
2394         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2395                     it_state(it) == IAM_IT_DETACHED);
2396         return result;
2397 }
2398
2399 /*
2400  * Convert iterator to cookie.
2401  *
2402  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2403  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2404  * postcondition: it_state(it) == IAM_IT_ATTACHED
2405  */
2406 iam_pos_t iam_it_store(const struct iam_iterator *it)
2407 {
2408         iam_pos_t result;
2409
2410         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2411         assert_corr(it_at_rec(it));
2412         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2413                     sizeof result);
2414
2415         result = 0;
2416         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2417 }
2418
2419 /*
2420  * Restore iterator from cookie.
2421  *
2422  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2423  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2424  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2425  *                                  iam_it_store(it) == pos)
2426  */
2427 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2428 {
2429         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2430                 it->ii_flags&IAM_IT_MOVE);
2431         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2432         return iam_it_iget(it, (struct iam_ikey *)&pos);
2433 }
2434
2435 /***********************************************************************/
2436 /* invariants                                                          */
2437 /***********************************************************************/
2438
2439 static inline int ptr_inside(void *base, size_t size, void *ptr)
2440 {
2441         return (base <= ptr) && (ptr < base + size);
2442 }
2443
2444 static int iam_frame_invariant(struct iam_frame *f)
2445 {
2446         return
2447                 (f->bh != NULL &&
2448                 f->bh->b_data != NULL &&
2449                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2450                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2451                 f->entries <= f->at);
2452 }
2453
2454 static int iam_leaf_invariant(struct iam_leaf *l)
2455 {
2456         return
2457                 l->il_bh != NULL &&
2458                 l->il_bh->b_data != NULL &&
2459                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2460                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2461                 l->il_entries <= l->il_at;
2462 }
2463
2464 static int iam_path_invariant(struct iam_path *p)
2465 {
2466         int i;
2467
2468         if (p->ip_container == NULL ||
2469             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2470             p->ip_frame != p->ip_frames + p->ip_indirect ||
2471             !iam_leaf_invariant(&p->ip_leaf))
2472                 return 0;
2473         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2474                 if (i <= p->ip_indirect) {
2475                         if (!iam_frame_invariant(&p->ip_frames[i]))
2476                                 return 0;
2477                 }
2478         }
2479         return 1;
2480 }
2481
2482 int iam_it_invariant(struct iam_iterator *it)
2483 {
2484         return
2485                 (it->ii_state == IAM_IT_DETACHED ||
2486                 it->ii_state == IAM_IT_ATTACHED ||
2487                 it->ii_state == IAM_IT_SKEWED) &&
2488                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2489                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2490                 it->ii_state == IAM_IT_SKEWED,
2491                 iam_path_invariant(&it->ii_path) &&
2492                 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2493 }
2494
2495 /*
2496  * Search container @c for record with key @k. If record is found, its data
2497  * are moved into @r.
2498  *
2499  * Return values: 0: found, -ENOENT: not-found, -ve: error
2500  */
2501 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2502                struct iam_rec *r, struct iam_path_descr *pd)
2503 {
2504         struct iam_iterator it;
2505         int result;
2506
2507         iam_it_init(&it, c, 0, pd);
2508
2509         result = iam_it_get_exact(&it, k);
2510         if (result == 0)
2511                 /*
2512                  * record with required key found, copy it into user buffer
2513                  */
2514                 iam_reccpy(&it.ii_path.ip_leaf, r);
2515         iam_it_put(&it);
2516         iam_it_fini(&it);
2517         return result;
2518 }
2519
2520 /*
2521  * Insert new record @r with key @k into container @c (within context of
2522  * transaction @h).
2523  *
2524  * Return values: 0: success, -ve: error, including -EEXIST when record with
2525  * given key is already present.
2526  *
2527  * postcondition: ergo(result == 0 || result == -EEXIST,
2528  *                                  iam_lookup(c, k, r2) > 0;
2529  */
2530 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2531                const struct iam_rec *r, struct iam_path_descr *pd)
2532 {
2533         struct iam_iterator it;
2534         int result;
2535
2536         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2537
2538         result = iam_it_get_exact(&it, k);
2539         if (result == -ENOENT)
2540                 result = iam_it_rec_insert(h, &it, k, r);
2541         else if (result == 0)
2542                 result = -EEXIST;
2543         iam_it_put(&it);
2544         iam_it_fini(&it);
2545         return result;
2546 }
2547
2548 /*
2549  * Update record with the key @k in container @c (within context of
2550  * transaction @h), new record is given by @r.
2551  *
2552  * Return values: +1: skip because of the same rec value, 0: success,
2553  * -ve: error, including -ENOENT if no record with the given key found.
2554  */
2555 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2556                const struct iam_rec *r, struct iam_path_descr *pd)
2557 {
2558         struct iam_iterator it;
2559         struct iam_leaf *folio;
2560         int result;
2561
2562         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2563
2564         result = iam_it_get_exact(&it, k);
2565         if (result == 0) {
2566                 folio = &it.ii_path.ip_leaf;
2567                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2568                 if (result == 0)
2569                         iam_it_rec_set(h, &it, r);
2570                 else
2571                         result = 1;
2572         }
2573         iam_it_put(&it);
2574         iam_it_fini(&it);
2575         return result;
2576 }
2577
2578 /*
2579  * Delete existing record with key @k.
2580  *
2581  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2582  *
2583  * postcondition: ergo(result == 0 || result == -ENOENT,
2584  *                                 !iam_lookup(c, k, *));
2585  */
2586 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2587                struct iam_path_descr *pd)
2588 {
2589         struct iam_iterator it;
2590         int result;
2591
2592         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2593
2594         result = iam_it_get_exact(&it, k);
2595         if (result == 0)
2596                 iam_it_rec_delete(h, &it);
2597         iam_it_put(&it);
2598         iam_it_fini(&it);
2599         return result;
2600 }
2601
2602 int iam_root_limit(int rootgap, int blocksize, int size)
2603 {
2604         int limit;
2605         int nlimit;
2606
2607         limit = (blocksize - rootgap) / size;
2608         nlimit = blocksize / size;
2609         if (limit == nlimit)
2610                 limit--;
2611         return limit;
2612 }