Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * iam.c
32  * Top-level entry points into iam module
33  *
34  * Author: Wang Di <wangdi@clusterfs.com>
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  */
37
38 /*
39  * iam: big theory statement.
40  *
41  * iam (Index Access Module) is a module providing abstraction of persistent
42  * transactional container on top of generalized ldiskfs htree.
43  *
44  * iam supports:
45  *
46  *     - key, pointer, and record size specifiable per container.
47  *
48  *     - trees taller than 2 index levels.
49  *
50  *     - read/write to existing ldiskfs htree directories as iam containers.
51  *
52  * iam container is a tree, consisting of leaf nodes containing keys and
53  * records stored in this container, and index nodes, containing keys and
54  * pointers to leaf or index nodes.
55  *
56  * iam does not work with keys directly, instead it calls user-supplied key
57  * comparison function (->dpo_keycmp()).
58  *
59  * Pointers are (currently) interpreted as logical offsets (measured in
60  * blocksful) within underlying flat file on top of which iam tree lives.
61  *
62  * On-disk format:
63  *
64  * iam mostly tries to reuse existing htree formats.
65  *
66  * Format of index node:
67  *
68  * +-----+-------+-------+-------+------+-------+------------+
69  * |     | count |       |       |      |       |            |
70  * | gap |   /   | entry | entry | .... | entry | free space |
71  * |     | limit |       |       |      |       |            |
72  * +-----+-------+-------+-------+------+-------+------------+
73  *
74  *       gap           this part of node is never accessed by iam code. It
75  *                     exists for binary compatibility with ldiskfs htree (that,
76  *                     in turn, stores fake struct ext2_dirent for ext2
77  *                     compatibility), and to keep some unspecified per-node
78  *                     data. Gap can be different for root and non-root index
79  *                     nodes. Gap size can be specified for each container
80  *                     (gap of 0 is allowed).
81  *
82  *       count/limit   current number of entries in this node, and the maximal
83  *                     number of entries that can fit into node. count/limit
84  *                     has the same size as entry, and is itself counted in
85  *                     count.
86  *
87  *       entry         index entry: consists of a key immediately followed by
88  *                     a pointer to a child node. Size of a key and size of a
89  *                     pointer depends on container. Entry has neither
90  *                     alignment nor padding.
91  *
92  *       free space    portion of node new entries are added to
93  *
94  * Entries in index node are sorted by their key value.
95  *
96  * Format of a leaf node is not specified. Generic iam code accesses leaf
97  * nodes through ->id_leaf methods in struct iam_descr.
98  *
99  * The IAM root block is a special node, which contains the IAM descriptor.
100  * It is on disk format:
101  *
102  * +---------+-------+--------+---------+-------+------+-------+------------+
103  * |IAM desc | count |  idle  |         |       |      |       |            |
104  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
105  * |         | limit |        |         |       |      |       |            |
106  * +---------+-------+--------+---------+-------+------+-------+------------+
107  *
108  * The padding length is calculated with the parameters in the IAM descriptor.
109  *
110  * The field "idle_blocks" is used to record empty leaf nodes, which have not
111  * been released but all contained entries in them have been removed. Usually,
112  * the idle blocks in the IAM should be reused when need to allocate new leaf
113  * nodes for new entries, it depends on the IAM hash functions to map the new
114  * entries to these idle blocks. Unfortunately, it is not easy to design some
115  * hash functions for such clever mapping, especially considering the insert/
116  * lookup performance.
117  *
118  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
119  * idle blocks pool. If need some new leaf node, it will try to take idle block
120  * from such pool with priority, in spite of how the IAM hash functions to map
121  * the entry.
122  *
123  * The idle blocks pool is organized as a series of tables, and each table
124  * can be described as following (on-disk format):
125  *
126  * +---------+---------+---------+---------+------+---------+-------+
127  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
128  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
129  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
130  * +---------+---------+---------+---------+------+---------+-------+
131  *
132  * The logic blk# for the first table is stored in the root node "idle_blocks".
133  *
134  */
135
136 #include <linux/module.h>
137 #include <linux/fs.h>
138 #include <linux/pagemap.h>
139 #include <linux/time.h>
140 #include <linux/fcntl.h>
141 #include <linux/stat.h>
142 #include <linux/string.h>
143 #include <linux/quotaops.h>
144 #include <linux/buffer_head.h>
145
146 #include <ldiskfs/ldiskfs.h>
147 #include <ldiskfs/xattr.h>
148 #undef ENTRY
149
150 #include "osd_internal.h"
151
152 #include <ldiskfs/acl.h>
153
154 static struct buffer_head *
155 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
156 {
157         struct inode *inode = c->ic_object;
158         struct iam_idle_head *head;
159         struct buffer_head *bh;
160
161         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
162
163         if (blk == 0)
164                 return NULL;
165
166         bh = __ldiskfs_bread(NULL, inode, blk, 0);
167         if (IS_ERR_OR_NULL(bh)) {
168                 CERROR("%s: cannot load idle blocks, blk = %u: rc = %ld\n",
169                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
170                 c->ic_idle_failed = 1;
171                 if (bh == NULL)
172                         bh = ERR_PTR(-EIO);
173                 return bh;
174         }
175
176         head = (struct iam_idle_head *)(bh->b_data);
177         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
178                 int rc = -EBADF;
179
180                 CERROR("%s: invalid idle block head, blk = %u, magic = %x: rc = %d\n",
181                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic),
182                        rc);
183                 brelse(bh);
184                 c->ic_idle_failed = 1;
185                 return ERR_PTR(rc);
186         }
187
188         return bh;
189 }
190
191 /*
192  * Determine format of given container. This is done by scanning list of
193  * registered formats and calling ->if_guess() method of each in turn.
194  */
195 static int iam_format_guess(struct iam_container *c)
196 {
197         int result;
198
199         result = iam_lvar_guess(c);
200         if (result)
201                 result = iam_lfix_guess(c);
202
203         if (result == 0) {
204                 struct buffer_head *bh;
205                 __u32 *idle_blocks;
206
207                 LASSERT(c->ic_root_bh != NULL);
208
209                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
210                                         c->ic_descr->id_root_gap +
211                                         sizeof(struct dx_countlimit));
212                 mutex_lock(&c->ic_idle_mutex);
213                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
214                 if (bh != NULL && IS_ERR(bh))
215                         result = PTR_ERR(bh);
216                 else
217                         c->ic_idle_bh = bh;
218                 mutex_unlock(&c->ic_idle_mutex);
219         }
220
221         return result;
222 }
223
224 /*
225  * Initialize container @c.
226  */
227 int iam_container_init(struct iam_container *c,
228                        struct iam_descr *descr, struct inode *inode)
229 {
230         memset(c, 0, sizeof(*c));
231         c->ic_descr = descr;
232         c->ic_object = inode;
233         dynlock_init(&c->ic_tree_lock);
234         mutex_init(&c->ic_idle_mutex);
235         return 0;
236 }
237
238 /*
239  * Determine container format.
240  */
241 int iam_container_setup(struct iam_container *c)
242 {
243         return iam_format_guess(c);
244 }
245
246 /*
247  * Finalize container @c, release all resources.
248  */
249 void iam_container_fini(struct iam_container *c)
250 {
251         brelse(c->ic_idle_bh);
252         c->ic_idle_bh = NULL;
253         brelse(c->ic_root_bh);
254         c->ic_root_bh = NULL;
255 }
256
257 void iam_path_init(struct iam_path *path, struct iam_container *c,
258                    struct iam_path_descr *pd)
259 {
260         memset(path, 0, sizeof(*path));
261         path->ip_container = c;
262         path->ip_frame = path->ip_frames;
263         path->ip_data = pd;
264         path->ip_leaf.il_path = path;
265 }
266
267 static void iam_leaf_fini(struct iam_leaf *leaf);
268
269 void iam_path_release(struct iam_path *path)
270 {
271         int i;
272
273         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
274                 if (path->ip_frames[i].bh != NULL) {
275                         path->ip_frames[i].at_shifted = 0;
276                         brelse(path->ip_frames[i].bh);
277                         path->ip_frames[i].bh = NULL;
278                 }
279         }
280 }
281
282 void iam_path_fini(struct iam_path *path)
283 {
284         iam_leaf_fini(&path->ip_leaf);
285         iam_path_release(path);
286 }
287
288
289 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
290 {
291         int i;
292
293         path->ipc_hinfo = &path->ipc_hinfo_area;
294         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
295                 path->ipc_descr.ipd_key_scratch[i] =
296                         (struct iam_ikey *)&path->ipc_scratch[i];
297
298         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
299 }
300
301 void iam_path_compat_fini(struct iam_path_compat *path)
302 {
303         iam_path_fini(&path->ipc_path);
304 }
305
306 /*
307  * Helper function initializing iam_path_descr and its key scratch area.
308  */
309 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
310 {
311         struct iam_path_descr *ipd;
312         void *karea;
313         int i;
314
315         ipd = area;
316         karea = ipd + 1;
317         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
318                 ipd->ipd_key_scratch[i] = karea;
319         return ipd;
320 }
321
322 void iam_ipd_free(struct iam_path_descr *ipd)
323 {
324 }
325
326 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
327                   handle_t *h, struct buffer_head **bh)
328 {
329         /*
330          * NB: it can be called by iam_lfix_guess() which is still at
331          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
332          * haven't been intialized yet.
333          * Also, we don't have this for IAM dir.
334          */
335         if (c->ic_root_bh != NULL &&
336             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
337                 get_bh(c->ic_root_bh);
338                 *bh = c->ic_root_bh;
339                 return 0;
340         }
341
342         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
343         if (IS_ERR(*bh))
344                 return PTR_ERR(*bh);
345
346         if (*bh == NULL)
347                 return -EIO;
348
349         return 0;
350 }
351
352 /*
353  * Return pointer to current leaf record. Pointer is valid while corresponding
354  * leaf node is locked and pinned.
355  */
356 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
357 {
358         return iam_leaf_ops(leaf)->rec(leaf);
359 }
360
361 /*
362  * Return pointer to the current leaf key. This function returns pointer to
363  * the key stored in node.
364  *
365  * Caller should assume that returned pointer is only valid while leaf node is
366  * pinned and locked.
367  */
368 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
369 {
370         return iam_leaf_ops(leaf)->key(leaf);
371 }
372
373 static int iam_leaf_key_size(const struct iam_leaf *leaf)
374 {
375         return iam_leaf_ops(leaf)->key_size(leaf);
376 }
377
378 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
379                                       struct iam_ikey *key)
380 {
381         return iam_leaf_ops(leaf)->ikey(leaf, key);
382 }
383
384 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
385                            const struct iam_key *key)
386 {
387         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
388 }
389
390 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
391                           const struct iam_key *key)
392 {
393         return iam_leaf_ops(leaf)->key_eq(leaf, key);
394 }
395
396 #if LDISKFS_INVARIANT_ON
397 static int iam_path_check(struct iam_path *p)
398 {
399         int i;
400         int result;
401         struct iam_frame *f;
402         struct iam_descr *param;
403
404         result = 1;
405         param = iam_path_descr(p);
406         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
407                 f = &p->ip_frames[i];
408                 if (f->bh != NULL) {
409                         result = dx_node_check(p, f);
410                         if (result)
411                                 result = !param->id_ops->id_node_check(p, f);
412                 }
413         }
414         if (result && p->ip_leaf.il_bh != NULL)
415                 result = 1;
416         if (result == 0)
417                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
418
419         return result;
420 }
421 #endif
422
423 static int iam_leaf_load(struct iam_path *path)
424 {
425         iam_ptr_t block;
426         int err;
427         struct iam_container *c;
428         struct buffer_head *bh;
429         struct iam_leaf *leaf;
430         struct iam_descr *descr;
431
432         c     = path->ip_container;
433         leaf  = &path->ip_leaf;
434         descr = iam_path_descr(path);
435         block = path->ip_frame->leaf;
436         if (block == 0) {
437                 /* XXX bug 11027 */
438                 pr_err("wrong leaf: %lu %d [%p %p %p]\n",
439                        (unsigned long)path->ip_frame->leaf,
440                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
441                        path->ip_frames[0].bh, path->ip_frames[1].bh,
442                        path->ip_frames[2].bh);
443         }
444         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
445         if (err == 0) {
446                 leaf->il_bh = bh;
447                 leaf->il_curidx = block;
448                 err = iam_leaf_ops(leaf)->init(leaf);
449         }
450         return err;
451 }
452
453 static void iam_unlock_htree(struct iam_container *ic,
454                              struct dynlock_handle *lh)
455 {
456         if (lh != NULL)
457                 dynlock_unlock(&ic->ic_tree_lock, lh);
458 }
459
460
461 static void iam_leaf_unlock(struct iam_leaf *leaf)
462 {
463         if (leaf->il_lock != NULL) {
464                 iam_unlock_htree(iam_leaf_container(leaf),
465                                  leaf->il_lock);
466                 do_corr(schedule());
467                 leaf->il_lock = NULL;
468         }
469 }
470
471 static void iam_leaf_fini(struct iam_leaf *leaf)
472 {
473         if (leaf->il_path != NULL) {
474                 iam_leaf_unlock(leaf);
475                 iam_leaf_ops(leaf)->fini(leaf);
476                 if (leaf->il_bh) {
477                         brelse(leaf->il_bh);
478                         leaf->il_bh = NULL;
479                         leaf->il_curidx = 0;
480                 }
481         }
482 }
483
484 static void iam_leaf_start(struct iam_leaf *folio)
485 {
486         iam_leaf_ops(folio)->start(folio);
487 }
488
489 void iam_leaf_next(struct iam_leaf *folio)
490 {
491         iam_leaf_ops(folio)->next(folio);
492 }
493
494 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
495                              const struct iam_rec *rec)
496 {
497         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
498 }
499
500 static void iam_rec_del(struct iam_leaf *leaf, int shift)
501 {
502         iam_leaf_ops(leaf)->rec_del(leaf, shift);
503 }
504
505 int iam_leaf_at_end(const struct iam_leaf *leaf)
506 {
507         return iam_leaf_ops(leaf)->at_end(leaf);
508 }
509
510 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
511                            iam_ptr_t nr)
512 {
513         iam_leaf_ops(l)->split(l, bh, nr);
514 }
515
516 static inline int iam_leaf_empty(struct iam_leaf *l)
517 {
518         return iam_leaf_ops(l)->leaf_empty(l);
519 }
520
521 int iam_leaf_can_add(const struct iam_leaf *l,
522                      const struct iam_key *k, const struct iam_rec *r)
523 {
524         return iam_leaf_ops(l)->can_add(l, k, r);
525 }
526
527 static int iam_txn_dirty(handle_t *handle,
528                          struct iam_path *path, struct buffer_head *bh)
529 {
530         int result;
531
532         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
533         if (result != 0)
534                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
535         return result;
536 }
537
538 static int iam_txn_add(handle_t *handle,
539                        struct iam_path *path, struct buffer_head *bh)
540 {
541         int result;
542         struct super_block *sb = iam_path_obj(path)->i_sb;
543
544         result = osd_ldiskfs_journal_get_write_access(handle, sb, bh,
545                                                       LDISKFS_JTR_NONE);
546         if (result != 0)
547                 ldiskfs_std_error(sb, result);
548         return result;
549 }
550
551 /* iterator interface */
552 static enum iam_it_state it_state(const struct iam_iterator *it)
553 {
554         return it->ii_state;
555 }
556
557 /*
558  * Helper function returning scratch key.
559  */
560 static struct iam_container *iam_it_container(const struct iam_iterator *it)
561 {
562         return it->ii_path.ip_container;
563 }
564
565 static inline int it_keycmp(const struct iam_iterator *it,
566                             const struct iam_key *k)
567 {
568         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
569 }
570
571 static inline int it_keyeq(const struct iam_iterator *it,
572                            const struct iam_key *k)
573 {
574         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
575 }
576
577 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
578 {
579         return iam_ikeycmp(it->ii_path.ip_container,
580                            iam_leaf_ikey(&it->ii_path.ip_leaf,
581                                         iam_path_ikey(&it->ii_path, 0)), ik);
582 }
583
584 static inline int it_at_rec(const struct iam_iterator *it)
585 {
586         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
587 }
588
589 static inline int it_before(const struct iam_iterator *it)
590 {
591         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
592 }
593
594 /*
595  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
596  * with exactly the same key as asked is found.
597  */
598 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
599 {
600         int result;
601
602         result = iam_it_get(it, k);
603         if (result > 0)
604                 result = 0;
605         else if (result == 0)
606                 /*
607                  * Return -ENOENT if cursor is located above record with a key
608                  * different from one specified, or in the empty leaf.
609                  *
610                  * XXX returning -ENOENT only works if iam_it_get() never
611                  * returns -ENOENT as a legitimate error.
612                  */
613                 result = -ENOENT;
614         return result;
615 }
616
617 /*
618  * Initialize iterator to IAM_IT_DETACHED state.
619  *
620  * postcondition: it_state(it) == IAM_IT_DETACHED
621  */
622 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
623                  struct iam_path_descr *pd)
624 {
625         memset(it, 0, sizeof(*it));
626         it->ii_flags  = flags;
627         it->ii_state  = IAM_IT_DETACHED;
628         iam_path_init(&it->ii_path, c, pd);
629         return 0;
630 }
631
632 /*
633  * Finalize iterator and release all resources.
634  *
635  * precondition: it_state(it) == IAM_IT_DETACHED
636  */
637 void iam_it_fini(struct iam_iterator *it)
638 {
639         assert_corr(it_state(it) == IAM_IT_DETACHED);
640         iam_path_fini(&it->ii_path);
641 }
642
643 /*
644  * this locking primitives are used to protect parts
645  * of dir's htree. protection unit is block: leaf or index
646  */
647 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
648                                              unsigned long value,
649                                              enum dynlock_type lt)
650 {
651         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
652 }
653
654 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
655 {
656         struct iam_frame *f;
657
658         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
659                 do_corr(schedule());
660                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
661                 if (*lh == NULL)
662                         return -ENOMEM;
663         }
664         return 0;
665 }
666
667 /*
668  * Fast check for frame consistency.
669  */
670 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
671 {
672         struct iam_container *bag;
673         struct iam_entry *next;
674         struct iam_entry *last;
675         struct iam_entry *entries;
676         struct iam_entry *at;
677
678         bag = path->ip_container;
679         at = frame->at;
680         entries = frame->entries;
681         last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
682
683         if (unlikely(at > last))
684                 return -EAGAIN;
685
686         if (unlikely(dx_get_block(path, at) != frame->leaf))
687                 return -EAGAIN;
688
689         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
690                      path->ip_ikey_target) > 0))
691                 return -EAGAIN;
692
693         next = iam_entry_shift(path, at, +1);
694         if (next <= last) {
695                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
696                                          path->ip_ikey_target) <= 0))
697                         return -EAGAIN;
698         }
699         return 0;
700 }
701
702 int dx_index_is_compat(struct iam_path *path)
703 {
704         return iam_path_descr(path) == NULL;
705 }
706
707 /*
708  * dx_find_position
709  *
710  * search position of specified hash in index
711  *
712  */
713
714 static struct iam_entry *iam_find_position(struct iam_path *path,
715                                            struct iam_frame *frame)
716 {
717         int count;
718         struct iam_entry *p;
719         struct iam_entry *q;
720         struct iam_entry *m;
721
722         count = dx_get_count(frame->entries);
723         assert_corr(count && count <= dx_get_limit(frame->entries));
724         p = iam_entry_shift(path, frame->entries,
725                             dx_index_is_compat(path) ? 1 : 2);
726         q = iam_entry_shift(path, frame->entries, count - 1);
727         while (p <= q) {
728                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
729                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
730                                 path->ip_ikey_target) > 0)
731                         q = iam_entry_shift(path, m, -1);
732                 else
733                         p = iam_entry_shift(path, m, +1);
734         }
735         return iam_entry_shift(path, p, -1);
736 }
737
738
739
740 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
741 {
742         return dx_get_block(path, iam_find_position(path, frame));
743 }
744
745 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
746                     const struct iam_ikey *key, iam_ptr_t ptr)
747 {
748         struct iam_entry *entries = frame->entries;
749         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
750         int count = dx_get_count(entries);
751
752         /*
753          * Unfortunately we cannot assert this, as this function is sometimes
754          * called by VFS under i_sem and without pdirops lock.
755          */
756         assert_corr(1 || iam_frame_is_locked(path, frame));
757         assert_corr(count < dx_get_limit(entries));
758         assert_corr(frame->at < iam_entry_shift(path, entries, count));
759         assert_inv(dx_node_check(path, frame));
760         /* Prevent memory corruption outside of buffer_head */
761         BUG_ON(count >= dx_get_limit(entries));
762         BUG_ON((char *)iam_entry_shift(path, entries, count + 1) >
763                (frame->bh->b_data + frame->bh->b_size));
764
765         memmove(iam_entry_shift(path, new, 1), new,
766                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
767         dx_set_ikey(path, new, key);
768         dx_set_block(path, new, ptr);
769         dx_set_count(entries, count + 1);
770
771         BUG_ON(count > dx_get_limit(entries));
772         assert_inv(dx_node_check(path, frame));
773 }
774
775 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
776                          const struct iam_ikey *key, iam_ptr_t ptr)
777 {
778         iam_lock_bh(frame->bh);
779         iam_insert_key(path, frame, key, ptr);
780         iam_unlock_bh(frame->bh);
781 }
782 /*
783  * returns 0 if path was unchanged, -EAGAIN otherwise.
784  */
785 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
786 {
787         int equal;
788
789         iam_lock_bh(frame->bh);
790         equal = iam_check_fast(path, frame) == 0 ||
791                 frame->leaf == iam_find_ptr(path, frame);
792         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
793         iam_unlock_bh(frame->bh);
794
795         return equal ? 0 : -EAGAIN;
796 }
797
798 static int iam_lookup_try(struct iam_path *path)
799 {
800         u32 ptr;
801         int err = 0;
802         int i;
803
804         struct iam_descr *param;
805         struct iam_frame *frame;
806         struct iam_container *c;
807
808         param = iam_path_descr(path);
809         c = path->ip_container;
810
811         ptr = param->id_ops->id_root_ptr(c);
812         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
813              ++frame, ++i) {
814                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
815                                                   &frame->bh);
816                 do_corr(schedule());
817
818                 iam_lock_bh(frame->bh);
819                 /*
820                  * node must be initialized under bh lock because concurrent
821                  * creation procedure may change it and iam_lookup_try() will
822                  * see obsolete tree height. -bzzz
823                  */
824                 if (err != 0)
825                         break;
826
827                 if (LDISKFS_INVARIANT_ON) {
828                         err = param->id_ops->id_node_check(path, frame);
829                         if (err != 0)
830                                 break;
831                 }
832
833                 err = param->id_ops->id_node_load(path, frame);
834                 if (err != 0)
835                         break;
836
837                 assert_inv(dx_node_check(path, frame));
838                 /*
839                  * splitting may change root index block and move hash we're
840                  * looking for into another index block so, we have to check
841                  * this situation and repeat from begining if path got changed
842                  * -bzzz
843                  */
844                 if (i > 0) {
845                         err = iam_check_path(path, frame - 1);
846                         if (err != 0)
847                                 break;
848                 }
849
850                 frame->at = iam_find_position(path, frame);
851                 frame->curidx = ptr;
852                 frame->leaf = ptr = dx_get_block(path, frame->at);
853
854                 iam_unlock_bh(frame->bh);
855                 do_corr(schedule());
856         }
857         if (err != 0)
858                 iam_unlock_bh(frame->bh);
859         path->ip_frame = --frame;
860         return err;
861 }
862
863 static int __iam_path_lookup(struct iam_path *path)
864 {
865         int err;
866         int i;
867
868         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i)
869                 assert(path->ip_frames[i].bh == NULL);
870
871         do {
872                 err = iam_lookup_try(path);
873                 do_corr(schedule());
874                 if (err != 0)
875                         iam_path_fini(path);
876         } while (err == -EAGAIN);
877
878         return err;
879 }
880
881 /*
882  * returns 0 if path was unchanged, -EAGAIN otherwise.
883  */
884 static int iam_check_full_path(struct iam_path *path, int search)
885 {
886         struct iam_frame *bottom;
887         struct iam_frame *scan;
888         int i;
889         int result;
890
891         do_corr(schedule());
892
893         for (bottom = path->ip_frames, i = 0;
894              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
895                 ; /* find last filled in frame */
896         }
897
898         /* Lock frames, bottom to top.  */
899         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
900                 iam_lock_bh(scan->bh);
901         /* Check them top to bottom.  */
902         result = 0;
903         for (scan = path->ip_frames; scan < bottom; ++scan) {
904                 struct iam_entry *pos;
905
906                 if (search) {
907                         if (iam_check_fast(path, scan) == 0)
908                                 continue;
909
910                         pos = iam_find_position(path, scan);
911                         if (scan->leaf != dx_get_block(path, pos)) {
912                                 result = -EAGAIN;
913                                 break;
914                         }
915                         scan->at = pos;
916                 } else {
917                         pos = iam_entry_shift(path, scan->entries,
918                                               dx_get_count(scan->entries) - 1);
919                         if (scan->at > pos ||
920                             scan->leaf != dx_get_block(path, scan->at)) {
921                                 result = -EAGAIN;
922                                 break;
923                         }
924                 }
925         }
926
927         /* Unlock top to bottom.  */
928         for (scan = path->ip_frames; scan < bottom; ++scan)
929                 iam_unlock_bh(scan->bh);
930         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
931         do_corr(schedule());
932
933         return result;
934 }
935
936
937 /*
938  * Performs path lookup and returns with found leaf (if any) locked by htree
939  * lock.
940  */
941 static int iam_lookup_lock(struct iam_path *path,
942                            struct dynlock_handle **dl, enum dynlock_type lt)
943 {
944         int result;
945
946         while ((result = __iam_path_lookup(path)) == 0) {
947                 do_corr(schedule());
948                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
949                                      lt);
950                 if (*dl == NULL) {
951                         iam_path_fini(path);
952                         result = -ENOMEM;
953                         break;
954                 }
955                 do_corr(schedule());
956                 /*
957                  * while locking leaf we just found may get split so we need
958                  * to check this -bzzz
959                  */
960                 if (iam_check_full_path(path, 1) == 0)
961                         break;
962                 iam_unlock_htree(path->ip_container, *dl);
963                 *dl = NULL;
964                 iam_path_fini(path);
965         }
966         return result;
967 }
968 /*
969  * Performs tree top-to-bottom traversal starting from root, and loads leaf
970  * node.
971  */
972 static int iam_path_lookup(struct iam_path *path, int index)
973 {
974         struct iam_leaf  *leaf;
975         int result;
976
977         leaf = &path->ip_leaf;
978         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
979         assert_inv(iam_path_check(path));
980         do_corr(schedule());
981         if (result == 0) {
982                 result = iam_leaf_load(path);
983                 if (result == 0) {
984                         do_corr(schedule());
985                         if (index)
986                                 result = iam_leaf_ops(leaf)->
987                                         ilookup(leaf, path->ip_ikey_target);
988                         else
989                                 result = iam_leaf_ops(leaf)->
990                                         lookup(leaf, path->ip_key_target);
991                         do_corr(schedule());
992                 }
993                 if (result < 0)
994                         iam_leaf_unlock(leaf);
995         }
996         return result;
997 }
998
999 /*
1000  * Common part of iam_it_{i,}get().
1001  */
1002 static int __iam_it_get(struct iam_iterator *it, int index)
1003 {
1004         int result;
1005
1006         assert_corr(it_state(it) == IAM_IT_DETACHED);
1007
1008         result = iam_path_lookup(&it->ii_path, index);
1009         if (result >= 0) {
1010                 int collision;
1011
1012                 collision = result & IAM_LOOKUP_LAST;
1013                 switch (result & ~IAM_LOOKUP_LAST) {
1014                 case IAM_LOOKUP_EXACT:
1015                         result = 1;
1016                         it->ii_state = IAM_IT_ATTACHED;
1017                         break;
1018                 case IAM_LOOKUP_OK:
1019                         result = 0;
1020                         it->ii_state = IAM_IT_ATTACHED;
1021                         break;
1022                 case IAM_LOOKUP_BEFORE:
1023                 case IAM_LOOKUP_EMPTY:
1024                         result = 0;
1025                         it->ii_state = IAM_IT_SKEWED;
1026                         break;
1027                 default:
1028                         assert(0);
1029                 }
1030                 result |= collision;
1031         }
1032         /* See iam_it_get_exact() for explanation.  */
1033         assert_corr(result != -ENOENT);
1034         return result;
1035 }
1036
1037 /*
1038  * Correct hash, but not the same key was found, iterate through hash
1039  * collision chain, looking for correct record.
1040  */
1041 static int iam_it_collision(struct iam_iterator *it)
1042 {
1043         int result;
1044
1045         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1046
1047         while ((result = iam_it_next(it)) == 0) {
1048                 do_corr(schedule());
1049                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1050                         return -ENOENT;
1051                 if (it_keyeq(it, it->ii_path.ip_key_target))
1052                         return 0;
1053         }
1054         return result;
1055 }
1056
1057 /*
1058  * Attach iterator. After successful completion, @it points to record with
1059  * least key not larger than @k.
1060  *
1061  * Return value: 0: positioned on existing record,
1062  *             +ve: exact position found,
1063  *             -ve: error.
1064  *
1065  * precondition:  it_state(it) == IAM_IT_DETACHED
1066  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1067  *                     it_keycmp(it, k) <= 0)
1068  */
1069 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1070 {
1071         int result;
1072
1073         assert_corr(it_state(it) == IAM_IT_DETACHED);
1074
1075         it->ii_path.ip_ikey_target = NULL;
1076         it->ii_path.ip_key_target  = k;
1077
1078         result = __iam_it_get(it, 0);
1079
1080         if (result == IAM_LOOKUP_LAST) {
1081                 result = iam_it_collision(it);
1082                 if (result != 0) {
1083                         iam_it_put(it);
1084                         iam_it_fini(it);
1085                         result = __iam_it_get(it, 0);
1086                 } else
1087                         result = 1;
1088         }
1089         if (result > 0)
1090                 result &= ~IAM_LOOKUP_LAST;
1091
1092         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1093         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1094                     it_keycmp(it, k) <= 0));
1095         return result;
1096 }
1097
1098 /*
1099  * Attach iterator by index key.
1100  */
1101 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1102 {
1103         assert_corr(it_state(it) == IAM_IT_DETACHED);
1104
1105         it->ii_path.ip_ikey_target = k;
1106         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1107 }
1108
1109 /*
1110  * Attach iterator, and assure it points to the record (not skewed).
1111  *
1112  * Return value: 0: positioned on existing record,
1113  *             +ve: exact position found,
1114  *             -ve: error.
1115  *
1116  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1117  *                !(it->ii_flags&IAM_IT_WRITE)
1118  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1119  */
1120 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1121 {
1122         int result;
1123
1124         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1125                     !(it->ii_flags&IAM_IT_WRITE));
1126         result = iam_it_get(it, k);
1127         if (result == 0) {
1128                 if (it_state(it) != IAM_IT_ATTACHED) {
1129                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1130                         result = iam_it_next(it);
1131                 }
1132         }
1133         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1134         return result;
1135 }
1136
1137 /*
1138  * Duplicates iterator.
1139  *
1140  * postcondition: it_state(dst) == it_state(src) &&
1141  *                iam_it_container(dst) == iam_it_container(src) &&
1142  *                dst->ii_flags = src->ii_flags &&
1143  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1144  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1145  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1146  */
1147 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1148 {
1149         dst->ii_flags = src->ii_flags;
1150         dst->ii_state = src->ii_state;
1151         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1152         /*
1153          * XXX: duplicate lock.
1154          */
1155         assert_corr(it_state(dst) == it_state(src));
1156         assert_corr(iam_it_container(dst) == iam_it_container(src));
1157         assert_corr(dst->ii_flags = src->ii_flags);
1158         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1159                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1160                     iam_it_key_get(dst) == iam_it_key_get(src)));
1161 }
1162
1163 /*
1164  * Detach iterator. Does nothing it detached state.
1165  *
1166  * postcondition: it_state(it) == IAM_IT_DETACHED
1167  */
1168 void iam_it_put(struct iam_iterator *it)
1169 {
1170         if (it->ii_state != IAM_IT_DETACHED) {
1171                 it->ii_state = IAM_IT_DETACHED;
1172                 iam_leaf_fini(&it->ii_path.ip_leaf);
1173         }
1174 }
1175
1176 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1177                                         struct iam_ikey *ikey);
1178
1179
1180 /*
1181  * This function increments the frame pointer to search the next leaf
1182  * block, and reads in the necessary intervening nodes if the search
1183  * should be necessary.  Whether or not the search is necessary is
1184  * controlled by the hash parameter.  If the hash value is even, then
1185  * the search is only continued if the next block starts with that
1186  * hash value.  This is used if we are searching for a specific file.
1187  *
1188  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1189  *
1190  * This function returns 1 if the caller should continue to search,
1191  * or 0 if it should not.  If there is an error reading one of the
1192  * index blocks, it will a negative error code.
1193  *
1194  * If start_hash is non-null, it will be filled in with the starting
1195  * hash of the next page.
1196  */
1197 static int iam_htree_advance(struct inode *dir, __u32 hash,
1198                               struct iam_path *path, __u32 *start_hash,
1199                               int compat)
1200 {
1201         struct iam_frame *p;
1202         struct buffer_head *bh;
1203         int err, num_frames = 0;
1204         __u32 bhash;
1205
1206         p = path->ip_frame;
1207         /*
1208          * Find the next leaf page by incrementing the frame pointer.
1209          * If we run out of entries in the interior node, loop around and
1210          * increment pointer in the parent node.  When we break out of
1211          * this loop, num_frames indicates the number of interior
1212          * nodes need to be read.
1213          */
1214         while (1) {
1215                 do_corr(schedule());
1216                 iam_lock_bh(p->bh);
1217                 if (p->at_shifted)
1218                         p->at_shifted = 0;
1219                 else
1220                         p->at = iam_entry_shift(path, p->at, +1);
1221                 if (p->at < iam_entry_shift(path, p->entries,
1222                                             dx_get_count(p->entries))) {
1223                         p->leaf = dx_get_block(path, p->at);
1224                         iam_unlock_bh(p->bh);
1225                         break;
1226                 }
1227                 iam_unlock_bh(p->bh);
1228                 if (p == path->ip_frames)
1229                         return 0;
1230                 num_frames++;
1231                 --p;
1232         }
1233
1234         if (compat) {
1235                 /* Htree hash magic.  */
1236
1237                 /*
1238                  * If the hash is 1, then continue only if the next page has a
1239                  * continuation hash of any value.  This is used for readdir
1240                  * handling.  Otherwise, check to see if the hash matches the
1241                  * desired contiuation hash.  If it doesn't, return since
1242                  * there's no point to read in the successive index pages.
1243                  */
1244                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1245                 if (start_hash)
1246                         *start_hash = bhash;
1247                 if ((hash & 1) == 0) {
1248                         if ((bhash & ~1) != hash)
1249                                 return 0;
1250                 }
1251         }
1252         /*
1253          * If the hash is HASH_NB_ALWAYS, we always go to the next
1254          * block so no check is necessary
1255          */
1256         while (num_frames--) {
1257                 iam_ptr_t idx;
1258
1259                 do_corr(schedule());
1260                 iam_lock_bh(p->bh);
1261                 idx = p->leaf = dx_get_block(path, p->at);
1262                 iam_unlock_bh(p->bh);
1263                 err = iam_path_descr(path)->id_ops->
1264                         id_node_read(path->ip_container, idx, NULL, &bh);
1265                 if (err != 0)
1266                         return err; /* Failure */
1267                 ++p;
1268                 brelse(p->bh);
1269                 assert_corr(p->bh != bh);
1270                 p->bh = bh;
1271                 p->entries = dx_node_get_entries(path, p);
1272                 p->at = iam_entry_shift(path, p->entries, !compat);
1273                 assert_corr(p->curidx != idx);
1274                 p->curidx = idx;
1275                 iam_lock_bh(p->bh);
1276                 assert_corr(p->leaf != dx_get_block(path, p->at));
1277                 p->leaf = dx_get_block(path, p->at);
1278                 iam_unlock_bh(p->bh);
1279                 assert_inv(dx_node_check(path, p));
1280         }
1281         return 1;
1282 }
1283
1284 static inline int iam_index_advance(struct iam_path *path)
1285 {
1286         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1287 }
1288
1289 static void iam_unlock_array(struct iam_container *ic,
1290                              struct dynlock_handle **lh)
1291 {
1292         int i;
1293
1294         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1295                 if (*lh != NULL) {
1296                         iam_unlock_htree(ic, *lh);
1297                         *lh = NULL;
1298                 }
1299         }
1300 }
1301 /*
1302  * Advance index part of @path to point to the next leaf. Returns 1 on
1303  * success, 0, when end of container was reached. Leaf node is locked.
1304  */
1305 int iam_index_next(struct iam_container *c, struct iam_path *path)
1306 {
1307         iam_ptr_t cursor;
1308         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1309         int result;
1310
1311         /* Locking for iam_index_next()... is to be described.  */
1312
1313         cursor = path->ip_frame->leaf;
1314
1315         while (1) {
1316                 result = iam_index_lock(path, lh);
1317                 do_corr(schedule());
1318                 if (result < 0)
1319                         break;
1320
1321                 result = iam_check_full_path(path, 0);
1322                 if (result == 0 && cursor == path->ip_frame->leaf) {
1323                         result = iam_index_advance(path);
1324
1325                         assert_corr(result == 0 ||
1326                                     cursor != path->ip_frame->leaf);
1327                         break;
1328                 }
1329                 do {
1330                         iam_unlock_array(c, lh);
1331
1332                         iam_path_release(path);
1333                         do_corr(schedule());
1334
1335                         result = __iam_path_lookup(path);
1336                         if (result < 0)
1337                                 break;
1338
1339                         while (path->ip_frame->leaf != cursor) {
1340                                 do_corr(schedule());
1341
1342                                 result = iam_index_lock(path, lh);
1343                                 do_corr(schedule());
1344                                 if (result < 0)
1345                                         break;
1346
1347                                 result = iam_check_full_path(path, 0);
1348                                 if (result != 0)
1349                                         break;
1350
1351                                 result = iam_index_advance(path);
1352                                 if (result == 0) {
1353                                         CERROR("cannot find cursor : %u\n",
1354                                                 cursor);
1355                                         result = -EIO;
1356                                 }
1357                                 if (result < 0)
1358                                         break;
1359                                 result = iam_check_full_path(path, 0);
1360                                 if (result != 0)
1361                                         break;
1362                                 iam_unlock_array(c, lh);
1363                         }
1364                 } while (result == -EAGAIN);
1365                 if (result < 0)
1366                         break;
1367         }
1368         iam_unlock_array(c, lh);
1369         return result;
1370 }
1371
1372 /*
1373  * Move iterator one record right.
1374  *
1375  * Return value: 0: success,
1376  *              +1: end of container reached
1377  *             -ve: error
1378  *
1379  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1380  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1381  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1382  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1383  */
1384 int iam_it_next(struct iam_iterator *it)
1385 {
1386         int result;
1387         struct iam_path *path;
1388         struct iam_leaf *leaf;
1389
1390         do_corr(struct iam_ikey *ik_orig);
1391
1392         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1393         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1394                     it_state(it) == IAM_IT_SKEWED);
1395
1396         path = &it->ii_path;
1397         leaf = &path->ip_leaf;
1398
1399         assert_corr(iam_leaf_is_locked(leaf));
1400
1401         result = 0;
1402         do_corr(ik_orig = it_at_rec(it) ?
1403                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1404         if (it_before(it)) {
1405                 assert_corr(!iam_leaf_at_end(leaf));
1406                 it->ii_state = IAM_IT_ATTACHED;
1407         } else {
1408                 if (!iam_leaf_at_end(leaf))
1409                         /* advance within leaf node */
1410                         iam_leaf_next(leaf);
1411                 /* multiple iterations may be necessary due to empty leaves. */
1412                 while (result == 0 && iam_leaf_at_end(leaf)) {
1413                         do_corr(schedule());
1414                         /* advance index portion of the path */
1415                         result = iam_index_next(iam_it_container(it), path);
1416                         assert_corr(iam_leaf_is_locked(leaf));
1417                         if (result == 1) {
1418                                 struct dynlock_handle *lh;
1419
1420                                 lh = iam_lock_htree(iam_it_container(it),
1421                                                     path->ip_frame->leaf,
1422                                                     DLT_WRITE);
1423                                 if (lh != NULL) {
1424                                         iam_leaf_fini(leaf);
1425                                         leaf->il_lock = lh;
1426                                         result = iam_leaf_load(path);
1427                                         if (result == 0)
1428                                                 iam_leaf_start(leaf);
1429                                 } else
1430                                         result = -ENOMEM;
1431                         } else if (result == 0)
1432                                 /* end of container reached */
1433                                 result = 1;
1434                         if (result != 0)
1435                                 iam_it_put(it);
1436                 }
1437                 if (result == 0)
1438                         it->ii_state = IAM_IT_ATTACHED;
1439         }
1440         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1441         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1442         assert_corr(ergo(result == 0 && ik_orig != NULL,
1443                     it_ikeycmp(it, ik_orig) >= 0));
1444         return result;
1445 }
1446
1447 /*
1448  * Return pointer to the record under iterator.
1449  *
1450  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1451  * postcondition: it_state(it) == IAM_IT_ATTACHED
1452  */
1453 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1454 {
1455         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1456         assert_corr(it_at_rec(it));
1457         return iam_leaf_rec(&it->ii_path.ip_leaf);
1458 }
1459
1460 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1461 {
1462         struct iam_leaf *folio;
1463
1464         folio = &it->ii_path.ip_leaf;
1465         iam_leaf_ops(folio)->rec_set(folio, r);
1466 }
1467
1468 /*
1469  * Replace contents of record under iterator.
1470  *
1471  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1472  *                it->ii_flags&IAM_IT_WRITE
1473  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1474  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1475  */
1476 int iam_it_rec_set(handle_t *h,
1477                    struct iam_iterator *it, const struct iam_rec *r)
1478 {
1479         int result;
1480         struct iam_path *path;
1481         struct buffer_head *bh;
1482
1483         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1484                     it->ii_flags&IAM_IT_WRITE);
1485         assert_corr(it_at_rec(it));
1486
1487         path = &it->ii_path;
1488         bh = path->ip_leaf.il_bh;
1489         result = iam_txn_add(h, path, bh);
1490         if (result == 0) {
1491                 iam_it_reccpy(it, r);
1492                 result = iam_txn_dirty(h, path, bh);
1493         }
1494         return result;
1495 }
1496
1497 /*
1498  * Return pointer to the index key under iterator.
1499  *
1500  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1501  *                it_state(it) == IAM_IT_SKEWED
1502  */
1503 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1504                                         struct iam_ikey *ikey)
1505 {
1506         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1507                     it_state(it) == IAM_IT_SKEWED);
1508         assert_corr(it_at_rec(it));
1509         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1510 }
1511
1512 /*
1513  * Return pointer to the key under iterator.
1514  *
1515  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1516  *                it_state(it) == IAM_IT_SKEWED
1517  */
1518 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1519 {
1520         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1521                     it_state(it) == IAM_IT_SKEWED);
1522         assert_corr(it_at_rec(it));
1523         return iam_leaf_key(&it->ii_path.ip_leaf);
1524 }
1525
1526 /*
1527  * Return size of key under iterator (in bytes)
1528  *
1529  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1530  *                it_state(it) == IAM_IT_SKEWED
1531  */
1532 int iam_it_key_size(const struct iam_iterator *it)
1533 {
1534         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1535                     it_state(it) == IAM_IT_SKEWED);
1536         assert_corr(it_at_rec(it));
1537         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1538 }
1539
1540 static struct buffer_head *
1541 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1542 {
1543         struct inode *inode = c->ic_object;
1544         struct buffer_head *bh = NULL;
1545         struct iam_idle_head *head;
1546         struct buffer_head *idle;
1547         __u32 *idle_blocks;
1548         __u16 count;
1549
1550         if (c->ic_idle_bh == NULL)
1551                 goto newblock;
1552
1553         mutex_lock(&c->ic_idle_mutex);
1554         if (unlikely(c->ic_idle_bh == NULL)) {
1555                 mutex_unlock(&c->ic_idle_mutex);
1556                 goto newblock;
1557         }
1558
1559         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1560         count = le16_to_cpu(head->iih_count);
1561         if (count > 0) {
1562                 *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1563                                                           c->ic_idle_bh,
1564                                                           LDISKFS_JTR_NONE);
1565                 if (*e != 0)
1566                         goto fail;
1567
1568                 --count;
1569                 *b = le32_to_cpu(head->iih_blks[count]);
1570                 head->iih_count = cpu_to_le16(count);
1571                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1572                 if (*e != 0)
1573                         goto fail;
1574
1575                 mutex_unlock(&c->ic_idle_mutex);
1576                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1577                 if (IS_ERR_OR_NULL(bh)) {
1578                         if (IS_ERR(bh))
1579                                 *e = PTR_ERR(bh);
1580                         else
1581                                 *e = -EIO;
1582                         return NULL;
1583                 }
1584                 goto got;
1585         }
1586
1587         /* The block itself which contains the iam_idle_head is
1588          * also an idle block, and can be used as the new node.
1589          */
1590         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1591                                 c->ic_descr->id_root_gap +
1592                                 sizeof(struct dx_countlimit));
1593         *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb,
1594                                                   c->ic_root_bh,
1595                                                   LDISKFS_JTR_NONE);
1596         if (*e != 0)
1597                 goto fail;
1598
1599         *b = le32_to_cpu(*idle_blocks);
1600         iam_lock_bh(c->ic_root_bh);
1601         *idle_blocks = head->iih_next;
1602         iam_unlock_bh(c->ic_root_bh);
1603         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1604         if (*e != 0) {
1605                 iam_lock_bh(c->ic_root_bh);
1606                 *idle_blocks = cpu_to_le32(*b);
1607                 iam_unlock_bh(c->ic_root_bh);
1608                 goto fail;
1609         }
1610
1611         bh = c->ic_idle_bh;
1612         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1613         if (idle != NULL && IS_ERR(idle)) {
1614                 *e = PTR_ERR(idle);
1615                 c->ic_idle_bh = NULL;
1616                 brelse(bh);
1617                 goto fail;
1618         }
1619
1620         c->ic_idle_bh = idle;
1621         mutex_unlock(&c->ic_idle_mutex);
1622
1623 got:
1624         /* get write access for the found buffer head */
1625         *e = osd_ldiskfs_journal_get_write_access(h, inode->i_sb, bh,
1626                                                   LDISKFS_JTR_NONE);
1627         if (*e != 0) {
1628                 brelse(bh);
1629                 bh = NULL;
1630                 ldiskfs_std_error(inode->i_sb, *e);
1631         } else {
1632                 /* Clear the reused node as new node does. */
1633                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1634                 set_buffer_uptodate(bh);
1635         }
1636         return bh;
1637
1638 newblock:
1639         bh = osd_ldiskfs_append(h, inode, b);
1640         if (IS_ERR(bh)) {
1641                 *e = PTR_ERR(bh);
1642                 bh = NULL;
1643         }
1644
1645         return bh;
1646
1647 fail:
1648         mutex_unlock(&c->ic_idle_mutex);
1649         ldiskfs_std_error(inode->i_sb, *e);
1650         return NULL;
1651 }
1652
1653 /*
1654  * Insertion of new record. Interaction with jbd during non-trivial case (when
1655  * split happens) is as following:
1656  *
1657  *  - new leaf node is involved into transaction by iam_new_node();
1658  *
1659  *  - old leaf node is involved into transaction by iam_add_rec();
1660  *
1661  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1662  *
1663  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1664  *  iam_new_leaf();
1665  *
1666  *  - split index nodes are involved into transaction and marked dirty by
1667  *  split_index_node().
1668  *
1669  *  - "safe" index node, which is no split, but where new pointer is inserted
1670  *  is involved into transaction and marked dirty by split_index_node().
1671  *
1672  *  - index node where pointer to new leaf is inserted is involved into
1673  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1674  *
1675  *  - inode is marked dirty by iam_add_rec().
1676  *
1677  */
1678
1679 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1680 {
1681         int err;
1682         iam_ptr_t blknr;
1683         struct buffer_head *new_leaf;
1684         struct buffer_head *old_leaf;
1685         struct iam_container *c;
1686         struct inode *obj;
1687         struct iam_path *path;
1688
1689         c = iam_leaf_container(leaf);
1690         path = leaf->il_path;
1691
1692         obj = c->ic_object;
1693         new_leaf = iam_new_node(handle, c, &blknr, &err);
1694         do_corr(schedule());
1695         if (new_leaf != NULL) {
1696                 struct dynlock_handle *lh;
1697
1698                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1699                 do_corr(schedule());
1700                 if (lh != NULL) {
1701                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1702                         do_corr(schedule());
1703                         old_leaf = leaf->il_bh;
1704                         iam_leaf_split(leaf, &new_leaf, blknr);
1705                         if (old_leaf != leaf->il_bh) {
1706                                 /* Switched to the new leaf.  */
1707                                 iam_leaf_unlock(leaf);
1708                                 leaf->il_lock = lh;
1709                                 path->ip_frame->leaf = blknr;
1710                         } else
1711                                 iam_unlock_htree(path->ip_container, lh);
1712                         do_corr(schedule());
1713                         err = iam_txn_dirty(handle, path, new_leaf);
1714                         if (err == 0)
1715                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1716                         do_corr(schedule());
1717                 } else
1718                         err = -ENOMEM;
1719                 brelse(new_leaf);
1720         }
1721         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1722         return err;
1723 }
1724
1725 static inline void dx_set_limit(struct iam_entry *entries, unsigned int value)
1726 {
1727         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1728 }
1729
1730 static int iam_shift_entries(struct iam_path *path,
1731                          struct iam_frame *frame, unsigned int count,
1732                          struct iam_entry *entries, struct iam_entry *entries2,
1733                          u32 newblock)
1734 {
1735         unsigned int count1;
1736         unsigned int count2;
1737         int delta;
1738
1739         struct iam_frame *parent = frame - 1;
1740         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1741
1742         delta = dx_index_is_compat(path) ? 0 : +1;
1743
1744         count1 = count/2 + delta;
1745         count2 = count - count1;
1746         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1747
1748         dxtrace(pr_info("Split index %d/%d\n", count1, count2));
1749
1750         memcpy((char *) iam_entry_shift(path, entries2, delta),
1751                (char *) iam_entry_shift(path, entries, count1),
1752                count2 * iam_entry_size(path));
1753
1754         dx_set_count(entries2, count2 + delta);
1755         dx_set_limit(entries2, dx_node_limit(path));
1756
1757         /*
1758          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1759          * level index in root index, then we insert new index here and set
1760          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1761          * index w/o hash it looks for. the solution is to check root index
1762          * after we locked just founded 2nd level index -bzzz
1763          */
1764         iam_insert_key_lock(path, parent, pivot, newblock);
1765
1766         /*
1767          * now old and new 2nd level index blocks contain all pointers, so
1768          * dx_probe() may find it in the both.  it's OK -bzzz
1769          */
1770         iam_lock_bh(frame->bh);
1771         dx_set_count(entries, count1);
1772         iam_unlock_bh(frame->bh);
1773
1774         /*
1775          * now old 2nd level index block points to first half of leafs. it's
1776          * importand that dx_probe() must check root index block for changes
1777          * under dx_lock_bh(frame->bh) -bzzz
1778          */
1779
1780         return count1;
1781 }
1782
1783
1784 int split_index_node(handle_t *handle, struct iam_path *path,
1785                      struct dynlock_handle **lh)
1786 {
1787         struct iam_entry *entries;   /* old block contents */
1788         struct iam_entry *entries2;  /* new block contents */
1789         struct iam_frame *frame, *safe;
1790         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1791         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1792         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1793         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1794         struct inode *dir = iam_path_obj(path);
1795         struct iam_descr *descr;
1796         int nr_splet;
1797         int i, err;
1798
1799         descr = iam_path_descr(path);
1800         /*
1801          * Algorithm below depends on this.
1802          */
1803         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1804
1805         frame = path->ip_frame;
1806         entries = frame->entries;
1807
1808         /*
1809          * Tall-tree handling: we might have to split multiple index blocks
1810          * all the way up to tree root. Tricky point here is error handling:
1811          * to avoid complicated undo/rollback we
1812          *
1813          *   - first allocate all necessary blocks
1814          *
1815          *   - insert pointers into them atomically.
1816          */
1817
1818         /*
1819          * Locking: leaf is already locked. htree-locks are acquired on all
1820          * index nodes that require split bottom-to-top, on the "safe" node,
1821          * and on all new nodes
1822          */
1823
1824         dxtrace(printk("using %u of %u node entries\n",
1825                        dx_get_count(entries), dx_get_limit(entries)));
1826
1827         /* What levels need split? */
1828         for (nr_splet = 0; frame >= path->ip_frames &&
1829              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1830              --frame, ++nr_splet) {
1831                 do_corr(schedule());
1832                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1833                         /*
1834                          * CWARN(dir->i_sb, __FUNCTION__,
1835                          * "Directory index full!\n");
1836                          */
1837                         err = -ENOSPC;
1838                         goto cleanup;
1839                 }
1840         }
1841
1842         safe = frame;
1843
1844         /* Lock all nodes, bottom to top.  */
1845         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1846                 do_corr(schedule());
1847                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1848                                          DLT_WRITE);
1849                 if (lock[i] == NULL) {
1850                         err = -ENOMEM;
1851                         goto cleanup;
1852                 }
1853         }
1854
1855         /*
1856          * Check for concurrent index modification.
1857          */
1858         err = iam_check_full_path(path, 1);
1859         if (err)
1860                 goto cleanup;
1861         /* And check that the same number of nodes is to be split.  */
1862         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1863              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1864              --frame, ++i) {
1865                 ;
1866         }
1867         if (i != nr_splet) {
1868                 err = -EAGAIN;
1869                 goto cleanup;
1870         }
1871
1872         /* Go back down, allocate blocks, lock them, and add to transaction */
1873         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1874                 bh_new[i] = iam_new_node(handle, path->ip_container,
1875                                          &newblock[i], &err);
1876                 do_corr(schedule());
1877                 if (!bh_new[i] ||
1878                     descr->id_ops->id_node_init(path->ip_container,
1879                                                 bh_new[i], 0) != 0)
1880                         goto cleanup;
1881
1882                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1883                                              DLT_WRITE);
1884                 if (new_lock[i] == NULL) {
1885                         err = -ENOMEM;
1886                         goto cleanup;
1887                 }
1888                 do_corr(schedule());
1889                 BUFFER_TRACE(frame->bh, "get_write_access");
1890                 err = osd_ldiskfs_journal_get_write_access(handle,
1891                                                            dir->i_sb,
1892                                                            frame->bh,
1893                                                            LDISKFS_JTR_NONE);
1894                 if (err)
1895                         goto journal_error;
1896         }
1897         /* Add "safe" node to transaction too */
1898         if (safe + 1 != path->ip_frames) {
1899                 do_corr(schedule());
1900                 err = osd_ldiskfs_journal_get_write_access(handle,
1901                                                            dir->i_sb,
1902                                                            safe->bh,
1903                                                            LDISKFS_JTR_NONE);
1904                 if (err)
1905                         goto journal_error;
1906         }
1907
1908         /* Go through nodes once more, inserting pointers */
1909         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1910                 unsigned int count;
1911                 int idx;
1912                 struct buffer_head *bh2;
1913                 struct buffer_head *bh;
1914
1915                 entries = frame->entries;
1916                 count = dx_get_count(entries);
1917                 idx = iam_entry_diff(path, frame->at, entries);
1918
1919                 bh2 = bh_new[i];
1920                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1921
1922                 bh = frame->bh;
1923                 if (frame == path->ip_frames) {
1924                         /* splitting root node. Tricky point:
1925                          *
1926                          * In the "normal" B-tree we'd split root *and* add
1927                          * new root to the tree with pointers to the old root
1928                          * and its sibling (thus introducing two new nodes).
1929                          *
1930                          * In htree it's enough to add one node, because
1931                          * capacity of the root node is smaller than that of
1932                          * non-root one.
1933                          */
1934                         struct iam_frame *frames;
1935                         struct iam_entry *next;
1936
1937                         assert_corr(i == 0);
1938
1939                         do_corr(schedule());
1940
1941                         frames = path->ip_frames;
1942                         memcpy((char *) entries2, (char *) entries,
1943                                count * iam_entry_size(path));
1944                         dx_set_limit(entries2, dx_node_limit(path));
1945
1946                         /* Set up root */
1947                         iam_lock_bh(frame->bh);
1948                         next = descr->id_ops->id_root_inc(path->ip_container,
1949                                                           path, frame);
1950                         dx_set_block(path, next, newblock[0]);
1951                         iam_unlock_bh(frame->bh);
1952
1953                         do_corr(schedule());
1954                         /* Shift frames in the path */
1955                         memmove(frames + 2, frames + 1,
1956                                 (sizeof(path->ip_frames)) -
1957                                  2 * sizeof(frames[0]));
1958                         /* Add new access path frame */
1959                         frames[1].at = iam_entry_shift(path, entries2, idx);
1960                         frames[1].entries = entries = entries2;
1961                         frames[1].bh = bh2;
1962                         assert_inv(dx_node_check(path, frame));
1963                         ++path->ip_frame;
1964                         ++frame;
1965                         assert_inv(dx_node_check(path, frame));
1966                         bh_new[0] = NULL; /* buffer head is "consumed" */
1967                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1968                         if (err)
1969                                 goto journal_error;
1970                         do_corr(schedule());
1971                 } else {
1972                         /* splitting non-root index node. */
1973                         struct iam_frame *parent = frame - 1;
1974
1975                         do_corr(schedule());
1976                         count = iam_shift_entries(path, frame, count,
1977                                                 entries, entries2, newblock[i]);
1978                         /* Which index block gets the new entry? */
1979                         if (idx >= count) {
1980                                 int d = dx_index_is_compat(path) ? 0 : +1;
1981
1982                                 frame->at = iam_entry_shift(path, entries2,
1983                                                             idx - count + d);
1984                                 frame->entries = entries = entries2;
1985                                 frame->curidx = newblock[i];
1986                                 swap(frame->bh, bh2);
1987                                 assert_corr(lock[i + 1] != NULL);
1988                                 assert_corr(new_lock[i] != NULL);
1989                                 swap(lock[i + 1], new_lock[i]);
1990                                 bh_new[i] = bh2;
1991                                 parent->at = iam_entry_shift(path,
1992                                                              parent->at, +1);
1993                         }
1994                         assert_inv(dx_node_check(path, frame));
1995                         assert_inv(dx_node_check(path, parent));
1996                         dxtrace(dx_show_index("node", frame->entries));
1997                         dxtrace(dx_show_index("node",
1998                                 ((struct dx_node *) bh2->b_data)->entries));
1999                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2000                         if (err)
2001                                 goto journal_error;
2002                         do_corr(schedule());
2003                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2004                                                             parent->bh);
2005                         if (err)
2006                                 goto journal_error;
2007                 }
2008                 do_corr(schedule());
2009                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2010                 if (err)
2011                         goto journal_error;
2012         }
2013                 /*
2014                  * This function was called to make insertion of new leaf
2015                  * possible. Check that it fulfilled its obligations.
2016                  */
2017                 assert_corr(dx_get_count(path->ip_frame->entries) <
2018                             dx_get_limit(path->ip_frame->entries));
2019         assert_corr(lock[nr_splet] != NULL);
2020         *lh = lock[nr_splet];
2021         lock[nr_splet] = NULL;
2022         if (nr_splet > 0) {
2023                 /* Log ->i_size modification. */
2024                 err = ldiskfs_mark_inode_dirty(handle, dir);
2025                 if (err)
2026                         goto journal_error;
2027         }
2028         goto cleanup;
2029 journal_error:
2030         ldiskfs_std_error(dir->i_sb, err);
2031
2032 cleanup:
2033         iam_unlock_array(path->ip_container, lock);
2034         iam_unlock_array(path->ip_container, new_lock);
2035
2036         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2037
2038         do_corr(schedule());
2039         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2040                 if (bh_new[i] != NULL)
2041                         brelse(bh_new[i]);
2042         }
2043         return err;
2044 }
2045
2046 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2047                        struct iam_path *path,
2048                        const struct iam_key *k, const struct iam_rec *r)
2049 {
2050         int err;
2051         struct iam_leaf *leaf;
2052
2053         leaf = &path->ip_leaf;
2054         assert_inv(iam_path_check(path));
2055         err = iam_txn_add(handle, path, leaf->il_bh);
2056         if (err == 0) {
2057                 do_corr(schedule());
2058                 if (!iam_leaf_can_add(leaf, k, r)) {
2059                         struct dynlock_handle *lh = NULL;
2060
2061                         do {
2062                                 assert_corr(lh == NULL);
2063                                 do_corr(schedule());
2064                                 err = split_index_node(handle, path, &lh);
2065                                 if (err == -EAGAIN) {
2066                                         assert_corr(lh == NULL);
2067
2068                                         iam_path_fini(path);
2069                                         it->ii_state = IAM_IT_DETACHED;
2070
2071                                         do_corr(schedule());
2072                                         err = iam_it_get_exact(it, k);
2073                                         if (err == -ENOENT)
2074                                                 err = 1; /* repeat split */
2075                                         else if (err == 0)
2076                                                 err = -EEXIST;
2077                                 }
2078                         } while (err > 0);
2079                         assert_inv(iam_path_check(path));
2080                         if (err == 0) {
2081                                 assert_corr(lh != NULL);
2082                                 do_corr(schedule());
2083                                 err = iam_new_leaf(handle, leaf);
2084                                 if (err == 0)
2085                                         err = iam_txn_dirty(handle, path,
2086                                                             path->ip_frame->bh);
2087                         }
2088                         iam_unlock_htree(path->ip_container, lh);
2089                         do_corr(schedule());
2090                 }
2091                 if (err == 0) {
2092                         iam_leaf_rec_add(leaf, k, r);
2093                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2094                 }
2095         }
2096         assert_inv(iam_path_check(path));
2097         return err;
2098 }
2099
2100 /*
2101  * Insert new record with key @k and contents from @r, shifting records to the
2102  * right. On success, iterator is positioned on the newly inserted record.
2103  *
2104  * precondition: it->ii_flags&IAM_IT_WRITE &&
2105  *               (it_state(it) == IAM_IT_ATTACHED ||
2106  *                it_state(it) == IAM_IT_SKEWED) &&
2107  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2108  *                    it_keycmp(it, k) <= 0) &&
2109  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2110  * postcondition: ergo(result == 0,
2111  *                     it_state(it) == IAM_IT_ATTACHED &&
2112  *                     it_keycmp(it, k) == 0 &&
2113  *                     !memcmp(iam_it_rec_get(it), r, ...))
2114  */
2115 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2116                       const struct iam_key *k, const struct iam_rec *r)
2117 {
2118         int result;
2119         struct iam_path *path;
2120
2121         path = &it->ii_path;
2122
2123         assert_corr(it->ii_flags&IAM_IT_WRITE);
2124         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2125                     it_state(it) == IAM_IT_SKEWED);
2126         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2127                     it_keycmp(it, k) <= 0));
2128         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2129         result = iam_add_rec(h, it, path, k, r);
2130         if (result == 0)
2131                 it->ii_state = IAM_IT_ATTACHED;
2132         assert_corr(ergo(result == 0,
2133                          it_state(it) == IAM_IT_ATTACHED &&
2134                          it_keycmp(it, k) == 0));
2135         return result;
2136 }
2137
2138 static inline int iam_idle_blocks_limit(struct inode *inode)
2139 {
2140         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2141 }
2142
2143 /*
2144  * If the leaf cannnot be recycled, we will lose one block for reusing.
2145  * It is not a serious issue because it almost the same of non-recycle.
2146  */
2147 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2148                                   struct iam_leaf *l, struct buffer_head **bh)
2149 {
2150         struct iam_container *c = p->ip_container;
2151         struct inode *inode = c->ic_object;
2152         struct iam_frame *frame = p->ip_frame;
2153         struct iam_entry *entries;
2154         struct iam_entry *pos;
2155         struct dynlock_handle *lh;
2156         int count;
2157         int rc;
2158
2159         if (c->ic_idle_failed)
2160                 return 0;
2161
2162         if (unlikely(frame == NULL))
2163                 return 0;
2164
2165         if (!iam_leaf_empty(l))
2166                 return 0;
2167
2168         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2169         if (lh == NULL) {
2170                 CWARN("%s: No memory to recycle idle blocks\n",
2171                       osd_ino2name(inode));
2172                 return 0;
2173         }
2174
2175         rc = iam_txn_add(h, p, frame->bh);
2176         if (rc != 0) {
2177                 iam_unlock_htree(c, lh);
2178                 return 0;
2179         }
2180
2181         iam_lock_bh(frame->bh);
2182         entries = frame->entries;
2183         count = dx_get_count(entries);
2184         /*
2185          * NOT shrink the last entry in the index node, which can be reused
2186          * directly by next new node.
2187          */
2188         if (count == 2) {
2189                 iam_unlock_bh(frame->bh);
2190                 iam_unlock_htree(c, lh);
2191                 return 0;
2192         }
2193
2194         pos = iam_find_position(p, frame);
2195         /*
2196          * There may be some new leaf nodes have been added or empty leaf nodes
2197          * have been shrinked during my delete operation.
2198          *
2199          * If the empty leaf is not under current index node because the index
2200          * node has been split, then just skip the empty leaf, which is rare.
2201          */
2202         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2203                 iam_unlock_bh(frame->bh);
2204                 iam_unlock_htree(c, lh);
2205                 return 0;
2206         }
2207
2208         frame->at = pos;
2209         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2210                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2211
2212                 memmove(frame->at, n,
2213                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2214                 frame->at_shifted = 1;
2215         }
2216         dx_set_count(entries, count - 1);
2217         iam_unlock_bh(frame->bh);
2218         rc = iam_txn_dirty(h, p, frame->bh);
2219         iam_unlock_htree(c, lh);
2220         if (rc != 0)
2221                 return 0;
2222
2223         get_bh(l->il_bh);
2224         *bh = l->il_bh;
2225         return frame->leaf;
2226 }
2227
2228 static int
2229 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2230                         __u32 *idle_blocks, iam_ptr_t blk)
2231 {
2232         struct iam_container *c = p->ip_container;
2233         struct buffer_head *old = c->ic_idle_bh;
2234         struct iam_idle_head *head;
2235         int rc;
2236
2237         head = (struct iam_idle_head *)(bh->b_data);
2238         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2239         head->iih_count = 0;
2240         head->iih_next = *idle_blocks;
2241         /* The bh already get_write_accessed. */
2242         rc = iam_txn_dirty(h, p, bh);
2243         if (rc != 0)
2244                 return rc;
2245
2246         rc = iam_txn_add(h, p, c->ic_root_bh);
2247         if (rc != 0)
2248                 return rc;
2249
2250         iam_lock_bh(c->ic_root_bh);
2251         *idle_blocks = cpu_to_le32(blk);
2252         iam_unlock_bh(c->ic_root_bh);
2253         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2254         if (rc == 0) {
2255                 /* NOT release old before new assigned. */
2256                 get_bh(bh);
2257                 c->ic_idle_bh = bh;
2258                 brelse(old);
2259         } else {
2260                 iam_lock_bh(c->ic_root_bh);
2261                 *idle_blocks = head->iih_next;
2262                 iam_unlock_bh(c->ic_root_bh);
2263         }
2264         return rc;
2265 }
2266
2267 /*
2268  * If the leaf cannnot be recycled, we will lose one block for reusing.
2269  * It is not a serious issue because it almost the same of non-recycle.
2270  */
2271 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2272                              struct buffer_head *bh, iam_ptr_t blk)
2273 {
2274         struct iam_container *c = p->ip_container;
2275         struct inode *inode = c->ic_object;
2276         struct iam_idle_head *head;
2277         __u32 *idle_blocks;
2278         int count;
2279         int rc;
2280
2281         mutex_lock(&c->ic_idle_mutex);
2282         if (unlikely(c->ic_idle_failed)) {
2283                 rc = -EFAULT;
2284                 goto unlock;
2285         }
2286
2287         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2288                                 c->ic_descr->id_root_gap +
2289                                 sizeof(struct dx_countlimit));
2290         /* It is the first idle block. */
2291         if (c->ic_idle_bh == NULL) {
2292                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2293                 goto unlock;
2294         }
2295
2296         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2297         count = le16_to_cpu(head->iih_count);
2298         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2299         if (count == iam_idle_blocks_limit(inode)) {
2300                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2301                 goto unlock;
2302         }
2303
2304         /* Just add to ic_idle_bh. */
2305         rc = iam_txn_add(h, p, c->ic_idle_bh);
2306         if (rc != 0)
2307                 goto unlock;
2308
2309         head->iih_blks[count] = cpu_to_le32(blk);
2310         head->iih_count = cpu_to_le16(count + 1);
2311         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2312
2313 unlock:
2314         mutex_unlock(&c->ic_idle_mutex);
2315         if (rc != 0)
2316                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2317                       osd_ino2name(inode), blk);
2318 }
2319
2320 /*
2321  * Delete record under iterator.
2322  *
2323  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2324  *                it->ii_flags&IAM_IT_WRITE &&
2325  *                it_at_rec(it)
2326  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2327  *                it_state(it) == IAM_IT_DETACHED
2328  */
2329 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2330 {
2331         int result;
2332         struct iam_leaf *leaf;
2333         struct iam_path *path;
2334
2335         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2336                     it->ii_flags&IAM_IT_WRITE);
2337         assert_corr(it_at_rec(it));
2338
2339         path = &it->ii_path;
2340         leaf = &path->ip_leaf;
2341
2342         assert_inv(iam_path_check(path));
2343
2344         result = iam_txn_add(h, path, leaf->il_bh);
2345         /* no compaction for now. */
2346         if (result == 0) {
2347                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2348                 result = iam_txn_dirty(h, path, leaf->il_bh);
2349                 if (result == 0 && iam_leaf_at_end(leaf)) {
2350                         struct buffer_head *bh = NULL;
2351                         iam_ptr_t blk;
2352
2353                         blk = iam_index_shrink(h, path, leaf, &bh);
2354                         if (it->ii_flags & IAM_IT_MOVE) {
2355                                 result = iam_it_next(it);
2356                                 if (result > 0)
2357                                         result = 0;
2358                         }
2359
2360                         if (bh != NULL) {
2361                                 iam_recycle_leaf(h, path, bh, blk);
2362                                 brelse(bh);
2363                         }
2364                 }
2365         }
2366         assert_inv(iam_path_check(path));
2367         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2368                     it_state(it) == IAM_IT_DETACHED);
2369         return result;
2370 }
2371
2372 /*
2373  * Convert iterator to cookie.
2374  *
2375  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2376  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2377  * postcondition: it_state(it) == IAM_IT_ATTACHED
2378  */
2379 iam_pos_t iam_it_store(const struct iam_iterator *it)
2380 {
2381         iam_pos_t result;
2382
2383         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2384         assert_corr(it_at_rec(it));
2385         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2386                     sizeof(result));
2387
2388         result = 0;
2389         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2390 }
2391
2392 /*
2393  * Restore iterator from cookie.
2394  *
2395  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2396  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2397  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2398  *                                  iam_it_store(it) == pos)
2399  */
2400 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2401 {
2402         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2403                 it->ii_flags&IAM_IT_MOVE);
2404         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2405                     sizeof(pos));
2406         return iam_it_iget(it, (struct iam_ikey *)&pos);
2407 }
2408
2409 /***********************************************************************/
2410 /* invariants                                                          */
2411 /***********************************************************************/
2412 static inline int ptr_inside(void *base, size_t size, void *ptr)
2413 {
2414         return (base <= ptr) && (ptr < base + size);
2415 }
2416
2417 static int iam_frame_invariant(struct iam_frame *f)
2418 {
2419         return
2420                 (f->bh != NULL &&
2421                 f->bh->b_data != NULL &&
2422                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2423                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2424                 f->entries <= f->at);
2425 }
2426
2427 static int iam_leaf_invariant(struct iam_leaf *l)
2428 {
2429         return
2430                 l->il_bh != NULL &&
2431                 l->il_bh->b_data != NULL &&
2432                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2433                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2434                 l->il_entries <= l->il_at;
2435 }
2436
2437 static int iam_path_invariant(struct iam_path *p)
2438 {
2439         int i;
2440
2441         if (p->ip_container == NULL ||
2442             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2443             p->ip_frame != p->ip_frames + p->ip_indirect ||
2444             !iam_leaf_invariant(&p->ip_leaf))
2445                 return 0;
2446         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2447                 if (i <= p->ip_indirect) {
2448                         if (!iam_frame_invariant(&p->ip_frames[i]))
2449                                 return 0;
2450                 }
2451         }
2452         return 1;
2453 }
2454
2455 int iam_it_invariant(struct iam_iterator *it)
2456 {
2457         return
2458                 (it->ii_state == IAM_IT_DETACHED ||
2459                 it->ii_state == IAM_IT_ATTACHED ||
2460                 it->ii_state == IAM_IT_SKEWED) &&
2461                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2462                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2463                 it->ii_state == IAM_IT_SKEWED,
2464                 iam_path_invariant(&it->ii_path) &&
2465                 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2466 }
2467
2468 /*
2469  * Search container @c for record with key @k. If record is found, its data
2470  * are moved into @r.
2471  *
2472  * Return values: 0: found, -ENOENT: not-found, -ve: error
2473  */
2474 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2475                struct iam_rec *r, struct iam_path_descr *pd)
2476 {
2477         struct iam_iterator it;
2478         int result;
2479
2480         iam_it_init(&it, c, 0, pd);
2481
2482         result = iam_it_get_exact(&it, k);
2483         if (result == 0)
2484                 /* record with required key found, copy it into user buffer */
2485                 iam_reccpy(&it.ii_path.ip_leaf, r);
2486         iam_it_put(&it);
2487         iam_it_fini(&it);
2488         return result;
2489 }
2490
2491 /*
2492  * Insert new record @r with key @k into container @c (within context of
2493  * transaction @h).
2494  *
2495  * Return values: 0: success, -ve: error, including -EEXIST when record with
2496  * given key is already present.
2497  *
2498  * postcondition: ergo(result == 0 || result == -EEXIST,
2499  *                                  iam_lookup(c, k, r2) > 0;
2500  */
2501 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2502                const struct iam_rec *r, struct iam_path_descr *pd)
2503 {
2504         struct iam_iterator it;
2505         int result;
2506
2507         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2508
2509         result = iam_it_get_exact(&it, k);
2510         if (result == -ENOENT)
2511                 result = iam_it_rec_insert(h, &it, k, r);
2512         else if (result == 0)
2513                 result = -EEXIST;
2514         iam_it_put(&it);
2515         iam_it_fini(&it);
2516         return result;
2517 }
2518
2519 /*
2520  * Update record with the key @k in container @c (within context of
2521  * transaction @h), new record is given by @r.
2522  *
2523  * Return values: +1: skip because of the same rec value, 0: success,
2524  * -ve: error, including -ENOENT if no record with the given key found.
2525  */
2526 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2527                const struct iam_rec *r, struct iam_path_descr *pd)
2528 {
2529         struct iam_iterator it;
2530         struct iam_leaf *folio;
2531         int result;
2532
2533         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2534
2535         result = iam_it_get_exact(&it, k);
2536         if (result == 0) {
2537                 folio = &it.ii_path.ip_leaf;
2538                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2539                 if (result == 0)
2540                         iam_it_rec_set(h, &it, r);
2541                 else
2542                         result = 1;
2543         }
2544         iam_it_put(&it);
2545         iam_it_fini(&it);
2546         return result;
2547 }
2548
2549 /*
2550  * Delete existing record with key @k.
2551  *
2552  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2553  *
2554  * postcondition: ergo(result == 0 || result == -ENOENT,
2555  *                                 !iam_lookup(c, k, *));
2556  */
2557 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2558                struct iam_path_descr *pd)
2559 {
2560         struct iam_iterator it;
2561         int result;
2562
2563         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2564
2565         result = iam_it_get_exact(&it, k);
2566         if (result == 0)
2567                 iam_it_rec_delete(h, &it);
2568         iam_it_put(&it);
2569         iam_it_fini(&it);
2570         return result;
2571 }
2572
2573 int iam_root_limit(int rootgap, int blocksize, int size)
2574 {
2575         int limit;
2576         int nlimit;
2577
2578         limit = (blocksize - rootgap) / size;
2579         nlimit = blocksize / size;
2580         if (limit == nlimit)
2581                 limit--;
2582         return limit;
2583 }