Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * iam.c
33  * Top-level entry points into iam module
34  *
35  * Author: Wang Di <wangdi@clusterfs.com>
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 /*
40  * iam: big theory statement.
41  *
42  * iam (Index Access Module) is a module providing abstraction of persistent
43  * transactional container on top of generalized ldiskfs htree.
44  *
45  * iam supports:
46  *
47  *     - key, pointer, and record size specifiable per container.
48  *
49  *     - trees taller than 2 index levels.
50  *
51  *     - read/write to existing ldiskfs htree directories as iam containers.
52  *
53  * iam container is a tree, consisting of leaf nodes containing keys and
54  * records stored in this container, and index nodes, containing keys and
55  * pointers to leaf or index nodes.
56  *
57  * iam does not work with keys directly, instead it calls user-supplied key
58  * comparison function (->dpo_keycmp()).
59  *
60  * Pointers are (currently) interpreted as logical offsets (measured in
61  * blocksful) within underlying flat file on top of which iam tree lives.
62  *
63  * On-disk format:
64  *
65  * iam mostly tries to reuse existing htree formats.
66  *
67  * Format of index node:
68  *
69  * +-----+-------+-------+-------+------+-------+------------+
70  * |     | count |       |       |      |       |            |
71  * | gap |   /   | entry | entry | .... | entry | free space |
72  * |     | limit |       |       |      |       |            |
73  * +-----+-------+-------+-------+------+-------+------------+
74  *
75  *       gap           this part of node is never accessed by iam code. It
76  *                     exists for binary compatibility with ldiskfs htree (that,
77  *                     in turn, stores fake struct ext2_dirent for ext2
78  *                     compatibility), and to keep some unspecified per-node
79  *                     data. Gap can be different for root and non-root index
80  *                     nodes. Gap size can be specified for each container
81  *                     (gap of 0 is allowed).
82  *
83  *       count/limit   current number of entries in this node, and the maximal
84  *                     number of entries that can fit into node. count/limit
85  *                     has the same size as entry, and is itself counted in
86  *                     count.
87  *
88  *       entry         index entry: consists of a key immediately followed by
89  *                     a pointer to a child node. Size of a key and size of a
90  *                     pointer depends on container. Entry has neither
91  *                     alignment nor padding.
92  *
93  *       free space    portion of node new entries are added to
94  *
95  * Entries in index node are sorted by their key value.
96  *
97  * Format of a leaf node is not specified. Generic iam code accesses leaf
98  * nodes through ->id_leaf methods in struct iam_descr.
99  *
100  * The IAM root block is a special node, which contains the IAM descriptor.
101  * It is on disk format:
102  *
103  * +---------+-------+--------+---------+-------+------+-------+------------+
104  * |IAM desc | count |  idle  |         |       |      |       |            |
105  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
106  * |         | limit |        |         |       |      |       |            |
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  *
109  * The padding length is calculated with the parameters in the IAM descriptor.
110  *
111  * The field "idle_blocks" is used to record empty leaf nodes, which have not
112  * been released but all contained entries in them have been removed. Usually,
113  * the idle blocks in the IAM should be reused when need to allocate new leaf
114  * nodes for new entries, it depends on the IAM hash functions to map the new
115  * entries to these idle blocks. Unfortunately, it is not easy to design some
116  * hash functions for such clever mapping, especially considering the insert/
117  * lookup performance.
118  *
119  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
120  * idle blocks pool. If need some new leaf node, it will try to take idle block
121  * from such pool with priority, in spite of how the IAM hash functions to map
122  * the entry.
123  *
124  * The idle blocks pool is organized as a series of tables, and each table
125  * can be described as following (on-disk format):
126  *
127  * +---------+---------+---------+---------+------+---------+-------+
128  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
129  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
130  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
131  * +---------+---------+---------+---------+------+---------+-------+
132  *
133  * The logic blk# for the first table is stored in the root node "idle_blocks".
134  *
135  */
136
137 #include <linux/module.h>
138 #include <linux/fs.h>
139 #include <linux/pagemap.h>
140 #include <linux/time.h>
141 #include <linux/fcntl.h>
142 #include <linux/stat.h>
143 #include <linux/string.h>
144 #include <linux/quotaops.h>
145 #include <linux/buffer_head.h>
146
147 #include <ldiskfs/ldiskfs.h>
148 #include <ldiskfs/xattr.h>
149 #undef ENTRY
150
151 #include "osd_internal.h"
152
153 #include <ldiskfs/acl.h>
154
155 static struct buffer_head *
156 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
157 {
158         struct inode *inode = c->ic_object;
159         struct iam_idle_head *head;
160         struct buffer_head *bh;
161
162         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
163
164         if (blk == 0)
165                 return NULL;
166
167         bh = __ldiskfs_bread(NULL, inode, blk, 0);
168         if (IS_ERR_OR_NULL(bh)) {
169                 CERROR("%s: cannot load idle blocks, blk = %u, err = %ld\n",
170                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
171                 c->ic_idle_failed = 1;
172                 if (bh == NULL)
173                         bh = ERR_PTR(-EIO);
174                 return bh;
175         }
176
177         head = (struct iam_idle_head *)(bh->b_data);
178         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
179                 CERROR("%s: invalid idle block head, blk = %u, magic = %d\n",
180                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic));
181                 brelse(bh);
182                 c->ic_idle_failed = 1;
183                 return ERR_PTR(-EBADF);
184         }
185
186         return bh;
187 }
188
189 /*
190  * Determine format of given container. This is done by scanning list of
191  * registered formats and calling ->if_guess() method of each in turn.
192  */
193 static int iam_format_guess(struct iam_container *c)
194 {
195         int result;
196
197         result = iam_lvar_guess(c);
198         if (result)
199                 result = iam_lfix_guess(c);
200
201         if (result == 0) {
202                 struct buffer_head *bh;
203                 __u32 *idle_blocks;
204
205                 LASSERT(c->ic_root_bh != NULL);
206
207                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
208                                         c->ic_descr->id_root_gap +
209                                         sizeof(struct dx_countlimit));
210                 mutex_lock(&c->ic_idle_mutex);
211                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
212                 if (bh != NULL && IS_ERR(bh))
213                         result = PTR_ERR(bh);
214                 else
215                         c->ic_idle_bh = bh;
216                 mutex_unlock(&c->ic_idle_mutex);
217         }
218
219         return result;
220 }
221
222 /*
223  * Initialize container @c.
224  */
225 int iam_container_init(struct iam_container *c,
226                        struct iam_descr *descr, struct inode *inode)
227 {
228         memset(c, 0, sizeof *c);
229         c->ic_descr = descr;
230         c->ic_object = inode;
231         init_rwsem(&c->ic_sem);
232         dynlock_init(&c->ic_tree_lock);
233         mutex_init(&c->ic_idle_mutex);
234         return 0;
235 }
236
237 /*
238  * Determine container format.
239  */
240 int iam_container_setup(struct iam_container *c)
241 {
242         return iam_format_guess(c);
243 }
244
245 /*
246  * Finalize container @c, release all resources.
247  */
248 void iam_container_fini(struct iam_container *c)
249 {
250         brelse(c->ic_idle_bh);
251         c->ic_idle_bh = NULL;
252         brelse(c->ic_root_bh);
253         c->ic_root_bh = NULL;
254 }
255
256 void iam_path_init(struct iam_path *path, struct iam_container *c,
257                    struct iam_path_descr *pd)
258 {
259         memset(path, 0, sizeof *path);
260         path->ip_container = c;
261         path->ip_frame = path->ip_frames;
262         path->ip_data = pd;
263         path->ip_leaf.il_path = path;
264 }
265
266 static void iam_leaf_fini(struct iam_leaf *leaf);
267
268 void iam_path_release(struct iam_path *path)
269 {
270         int i;
271
272         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
273                 if (path->ip_frames[i].bh != NULL) {
274                         path->ip_frames[i].at_shifted = 0;
275                         brelse(path->ip_frames[i].bh);
276                         path->ip_frames[i].bh = NULL;
277                 }
278         }
279 }
280
281 void iam_path_fini(struct iam_path *path)
282 {
283         iam_leaf_fini(&path->ip_leaf);
284         iam_path_release(path);
285 }
286
287
288 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
289 {
290         int i;
291
292         path->ipc_hinfo = &path->ipc_hinfo_area;
293         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
294                 path->ipc_descr.ipd_key_scratch[i] =
295                         (struct iam_ikey *)&path->ipc_scratch[i];
296
297         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
298 }
299
300 void iam_path_compat_fini(struct iam_path_compat *path)
301 {
302         iam_path_fini(&path->ipc_path);
303 }
304
305 /*
306  * Helper function initializing iam_path_descr and its key scratch area.
307  */
308 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
309 {
310         struct iam_path_descr *ipd;
311         void *karea;
312         int i;
313
314         ipd = area;
315         karea = ipd + 1;
316         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
317                 ipd->ipd_key_scratch[i] = karea;
318         return ipd;
319 }
320
321 void iam_ipd_free(struct iam_path_descr *ipd)
322 {
323 }
324
325 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
326                   handle_t *h, struct buffer_head **bh)
327 {
328         /*
329          * NB: it can be called by iam_lfix_guess() which is still at
330          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
331          * haven't been intialized yet.
332          * Also, we don't have this for IAM dir.
333          */
334         if (c->ic_root_bh != NULL &&
335             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
336                 get_bh(c->ic_root_bh);
337                 *bh = c->ic_root_bh;
338                 return 0;
339         }
340
341         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
342         if (IS_ERR(*bh))
343                 return PTR_ERR(*bh);
344
345         if (*bh == NULL)
346                 return -EIO;
347
348         return 0;
349 }
350
351 /*
352  * Return pointer to current leaf record. Pointer is valid while corresponding
353  * leaf node is locked and pinned.
354  */
355 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
356 {
357         return iam_leaf_ops(leaf)->rec(leaf);
358 }
359
360 /*
361  * Return pointer to the current leaf key. This function returns pointer to
362  * the key stored in node.
363  *
364  * Caller should assume that returned pointer is only valid while leaf node is
365  * pinned and locked.
366  */
367 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
368 {
369         return iam_leaf_ops(leaf)->key(leaf);
370 }
371
372 static int iam_leaf_key_size(const struct iam_leaf *leaf)
373 {
374         return iam_leaf_ops(leaf)->key_size(leaf);
375 }
376
377 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
378                                       struct iam_ikey *key)
379 {
380         return iam_leaf_ops(leaf)->ikey(leaf, key);
381 }
382
383 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
384                            const struct iam_key *key)
385 {
386         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
387 }
388
389 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
390                           const struct iam_key *key)
391 {
392         return iam_leaf_ops(leaf)->key_eq(leaf, key);
393 }
394
395 #if LDISKFS_INVARIANT_ON
396 static int iam_path_check(struct iam_path *p)
397 {
398         int i;
399         int result;
400         struct iam_frame *f;
401         struct iam_descr *param;
402
403         result = 1;
404         param = iam_path_descr(p);
405         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
406                 f = &p->ip_frames[i];
407                 if (f->bh != NULL) {
408                         result = dx_node_check(p, f);
409                         if (result)
410                                 result = !param->id_ops->id_node_check(p, f);
411                 }
412         }
413         if (result && p->ip_leaf.il_bh != NULL)
414                 result = 1;
415         if (result == 0)
416                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
417
418         return result;
419 }
420 #endif
421
422 static int iam_leaf_load(struct iam_path *path)
423 {
424         iam_ptr_t block;
425         int err;
426         struct iam_container *c;
427         struct buffer_head *bh;
428         struct iam_leaf *leaf;
429         struct iam_descr *descr;
430
431         c     = path->ip_container;
432         leaf  = &path->ip_leaf;
433         descr = iam_path_descr(path);
434         block = path->ip_frame->leaf;
435         if (block == 0) {
436                 /* XXX bug 11027 */
437                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
438                        (long unsigned)path->ip_frame->leaf,
439                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
440                        path->ip_frames[0].bh, path->ip_frames[1].bh,
441                        path->ip_frames[2].bh);
442         }
443         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
444         if (err == 0) {
445                 leaf->il_bh = bh;
446                 leaf->il_curidx = block;
447                 err = iam_leaf_ops(leaf)->init(leaf);
448         }
449         return err;
450 }
451
452 static void iam_unlock_htree(struct iam_container *ic,
453                              struct dynlock_handle *lh)
454 {
455         if (lh != NULL)
456                 dynlock_unlock(&ic->ic_tree_lock, lh);
457 }
458
459
460 static void iam_leaf_unlock(struct iam_leaf *leaf)
461 {
462         if (leaf->il_lock != NULL) {
463                 iam_unlock_htree(iam_leaf_container(leaf),
464                                  leaf->il_lock);
465                 do_corr(schedule());
466                 leaf->il_lock = NULL;
467         }
468 }
469
470 static void iam_leaf_fini(struct iam_leaf *leaf)
471 {
472         if (leaf->il_path != NULL) {
473                 iam_leaf_unlock(leaf);
474                 iam_leaf_ops(leaf)->fini(leaf);
475                 if (leaf->il_bh) {
476                         brelse(leaf->il_bh);
477                         leaf->il_bh = NULL;
478                         leaf->il_curidx = 0;
479                 }
480         }
481 }
482
483 static void iam_leaf_start(struct iam_leaf *folio)
484 {
485         iam_leaf_ops(folio)->start(folio);
486 }
487
488 void iam_leaf_next(struct iam_leaf *folio)
489 {
490         iam_leaf_ops(folio)->next(folio);
491 }
492
493 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
494                              const struct iam_rec *rec)
495 {
496         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
497 }
498
499 static void iam_rec_del(struct iam_leaf *leaf, int shift)
500 {
501         iam_leaf_ops(leaf)->rec_del(leaf, shift);
502 }
503
504 int iam_leaf_at_end(const struct iam_leaf *leaf)
505 {
506         return iam_leaf_ops(leaf)->at_end(leaf);
507 }
508
509 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
510                            iam_ptr_t nr)
511 {
512         iam_leaf_ops(l)->split(l, bh, nr);
513 }
514
515 static inline int iam_leaf_empty(struct iam_leaf *l)
516 {
517         return iam_leaf_ops(l)->leaf_empty(l);
518 }
519
520 int iam_leaf_can_add(const struct iam_leaf *l,
521                      const struct iam_key *k, const struct iam_rec *r)
522 {
523         return iam_leaf_ops(l)->can_add(l, k, r);
524 }
525
526 static int iam_txn_dirty(handle_t *handle,
527                          struct iam_path *path, struct buffer_head *bh)
528 {
529         int result;
530
531         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
532         if (result != 0)
533                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
534         return result;
535 }
536
537 static int iam_txn_add(handle_t *handle,
538                        struct iam_path *path, struct buffer_head *bh)
539 {
540         int result;
541
542         result = ldiskfs_journal_get_write_access(handle, bh);
543         if (result != 0)
544                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
545         return result;
546 }
547
548 /***********************************************************************/
549 /* iterator interface                                                  */
550 /***********************************************************************/
551
552 static enum iam_it_state it_state(const struct iam_iterator *it)
553 {
554         return it->ii_state;
555 }
556
557 /*
558  * Helper function returning scratch key.
559  */
560 static struct iam_container *iam_it_container(const struct iam_iterator *it)
561 {
562         return it->ii_path.ip_container;
563 }
564
565 static inline int it_keycmp(const struct iam_iterator *it,
566                             const struct iam_key *k)
567 {
568         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
569 }
570
571 static inline int it_keyeq(const struct iam_iterator *it,
572                            const struct iam_key *k)
573 {
574         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
575 }
576
577 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
578 {
579         return iam_ikeycmp(it->ii_path.ip_container,
580                            iam_leaf_ikey(&it->ii_path.ip_leaf,
581                                         iam_path_ikey(&it->ii_path, 0)), ik);
582 }
583
584 static inline int it_at_rec(const struct iam_iterator *it)
585 {
586         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
587 }
588
589 static inline int it_before(const struct iam_iterator *it)
590 {
591         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
592 }
593
594 /*
595  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
596  * with exactly the same key as asked is found.
597  */
598 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
599 {
600         int result;
601
602         result = iam_it_get(it, k);
603         if (result > 0)
604                 result = 0;
605         else if (result == 0)
606                 /*
607                  * Return -ENOENT if cursor is located above record with a key
608                  * different from one specified, or in the empty leaf.
609                  *
610                  * XXX returning -ENOENT only works if iam_it_get() never
611                  * returns -ENOENT as a legitimate error.
612                  */
613                 result = -ENOENT;
614         return result;
615 }
616
617 void iam_container_write_lock(struct iam_container *ic)
618 {
619         down_write(&ic->ic_sem);
620 }
621
622 void iam_container_write_unlock(struct iam_container *ic)
623 {
624         up_write(&ic->ic_sem);
625 }
626
627 void iam_container_read_lock(struct iam_container *ic)
628 {
629         down_read(&ic->ic_sem);
630 }
631
632 void iam_container_read_unlock(struct iam_container *ic)
633 {
634         up_read(&ic->ic_sem);
635 }
636
637 /*
638  * Initialize iterator to IAM_IT_DETACHED state.
639  *
640  * postcondition: it_state(it) == IAM_IT_DETACHED
641  */
642 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
643                  struct iam_path_descr *pd)
644 {
645         memset(it, 0, sizeof *it);
646         it->ii_flags  = flags;
647         it->ii_state  = IAM_IT_DETACHED;
648         iam_path_init(&it->ii_path, c, pd);
649         return 0;
650 }
651
652 /*
653  * Finalize iterator and release all resources.
654  *
655  * precondition: it_state(it) == IAM_IT_DETACHED
656  */
657 void iam_it_fini(struct iam_iterator *it)
658 {
659         assert_corr(it_state(it) == IAM_IT_DETACHED);
660         iam_path_fini(&it->ii_path);
661 }
662
663 /*
664  * this locking primitives are used to protect parts
665  * of dir's htree. protection unit is block: leaf or index
666  */
667 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
668                                              unsigned long value,
669                                              enum dynlock_type lt)
670 {
671         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
672 }
673
674 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
675 {
676         struct iam_frame *f;
677
678         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
679                 do_corr(schedule());
680                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
681                 if (*lh == NULL)
682                         return -ENOMEM;
683         }
684         return 0;
685 }
686
687 /*
688  * Fast check for frame consistency.
689  */
690 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
691 {
692         struct iam_container *bag;
693         struct iam_entry *next;
694         struct iam_entry *last;
695         struct iam_entry *entries;
696         struct iam_entry *at;
697
698         bag = path->ip_container;
699         at = frame->at;
700         entries = frame->entries;
701         last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
702
703         if (unlikely(at > last))
704                 return -EAGAIN;
705
706         if (unlikely(dx_get_block(path, at) != frame->leaf))
707                 return -EAGAIN;
708
709         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
710                      path->ip_ikey_target) > 0))
711                 return -EAGAIN;
712
713         next = iam_entry_shift(path, at, +1);
714         if (next <= last) {
715                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
716                                          path->ip_ikey_target) <= 0))
717                         return -EAGAIN;
718         }
719         return 0;
720 }
721
722 int dx_index_is_compat(struct iam_path *path)
723 {
724         return iam_path_descr(path) == NULL;
725 }
726
727 /*
728  * dx_find_position
729  *
730  * search position of specified hash in index
731  *
732  */
733
734 static struct iam_entry *iam_find_position(struct iam_path *path,
735                                            struct iam_frame *frame)
736 {
737         int count;
738         struct iam_entry *p;
739         struct iam_entry *q;
740         struct iam_entry *m;
741
742         count = dx_get_count(frame->entries);
743         assert_corr(count && count <= dx_get_limit(frame->entries));
744         p = iam_entry_shift(path, frame->entries,
745                             dx_index_is_compat(path) ? 1 : 2);
746         q = iam_entry_shift(path, frame->entries, count - 1);
747         while (p <= q) {
748                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
749                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
750                                 path->ip_ikey_target) > 0)
751                         q = iam_entry_shift(path, m, -1);
752                 else
753                         p = iam_entry_shift(path, m, +1);
754         }
755         return iam_entry_shift(path, p, -1);
756 }
757
758
759
760 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
761 {
762         return dx_get_block(path, iam_find_position(path, frame));
763 }
764
765 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
766                     const struct iam_ikey *key, iam_ptr_t ptr)
767 {
768         struct iam_entry *entries = frame->entries;
769         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
770         int count = dx_get_count(entries);
771
772         /*
773          * Unfortunately we cannot assert this, as this function is sometimes
774          * called by VFS under i_sem and without pdirops lock.
775          */
776         assert_corr(1 || iam_frame_is_locked(path, frame));
777         assert_corr(count < dx_get_limit(entries));
778         assert_corr(frame->at < iam_entry_shift(path, entries, count));
779         assert_inv(dx_node_check(path, frame));
780
781         memmove(iam_entry_shift(path, new, 1), new,
782                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
783         dx_set_ikey(path, new, key);
784         dx_set_block(path, new, ptr);
785         dx_set_count(entries, count + 1);
786         assert_inv(dx_node_check(path, frame));
787 }
788
789 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
790                          const struct iam_ikey *key, iam_ptr_t ptr)
791 {
792         iam_lock_bh(frame->bh);
793         iam_insert_key(path, frame, key, ptr);
794         iam_unlock_bh(frame->bh);
795 }
796 /*
797  * returns 0 if path was unchanged, -EAGAIN otherwise.
798  */
799 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
800 {
801         int equal;
802
803         iam_lock_bh(frame->bh);
804         equal = iam_check_fast(path, frame) == 0 ||
805                 frame->leaf == iam_find_ptr(path, frame);
806         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
807         iam_unlock_bh(frame->bh);
808
809         return equal ? 0 : -EAGAIN;
810 }
811
812 static int iam_lookup_try(struct iam_path *path)
813 {
814         u32 ptr;
815         int err = 0;
816         int i;
817
818         struct iam_descr *param;
819         struct iam_frame *frame;
820         struct iam_container *c;
821
822         param = iam_path_descr(path);
823         c = path->ip_container;
824
825         ptr = param->id_ops->id_root_ptr(c);
826         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
827              ++frame, ++i) {
828                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
829                                                   &frame->bh);
830                 do_corr(schedule());
831
832                 iam_lock_bh(frame->bh);
833                 /*
834                  * node must be initialized under bh lock because concurrent
835                  * creation procedure may change it and iam_lookup_try() will
836                  * see obsolete tree height. -bzzz
837                  */
838                 if (err != 0)
839                         break;
840
841                 if (LDISKFS_INVARIANT_ON) {
842                         err = param->id_ops->id_node_check(path, frame);
843                         if (err != 0)
844                                 break;
845                 }
846
847                 err = param->id_ops->id_node_load(path, frame);
848                 if (err != 0)
849                         break;
850
851                 assert_inv(dx_node_check(path, frame));
852                 /*
853                  * splitting may change root index block and move hash we're
854                  * looking for into another index block so, we have to check
855                  * this situation and repeat from begining if path got changed
856                  * -bzzz
857                  */
858                 if (i > 0) {
859                         err = iam_check_path(path, frame - 1);
860                         if (err != 0)
861                                 break;
862                 }
863
864                 frame->at = iam_find_position(path, frame);
865                 frame->curidx = ptr;
866                 frame->leaf = ptr = dx_get_block(path, frame->at);
867
868                 iam_unlock_bh(frame->bh);
869                 do_corr(schedule());
870         }
871         if (err != 0)
872                 iam_unlock_bh(frame->bh);
873         path->ip_frame = --frame;
874         return err;
875 }
876
877 static int __iam_path_lookup(struct iam_path *path)
878 {
879         int err;
880         int i;
881
882         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
883                 assert(path->ip_frames[i].bh == NULL);
884
885         do {
886                 err = iam_lookup_try(path);
887                 do_corr(schedule());
888                 if (err != 0)
889                         iam_path_fini(path);
890         } while (err == -EAGAIN);
891
892         return err;
893 }
894
895 /*
896  * returns 0 if path was unchanged, -EAGAIN otherwise.
897  */
898 static int iam_check_full_path(struct iam_path *path, int search)
899 {
900         struct iam_frame *bottom;
901         struct iam_frame *scan;
902         int i;
903         int result;
904
905         do_corr(schedule());
906
907         for (bottom = path->ip_frames, i = 0;
908              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
909                 ; /* find last filled in frame */
910         }
911
912         /*
913          * Lock frames, bottom to top.
914          */
915         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
916                 iam_lock_bh(scan->bh);
917         /*
918          * Check them top to bottom.
919          */
920         result = 0;
921         for (scan = path->ip_frames; scan < bottom; ++scan) {
922                 struct iam_entry *pos;
923
924                 if (search) {
925                         if (iam_check_fast(path, scan) == 0)
926                                 continue;
927
928                         pos = iam_find_position(path, scan);
929                         if (scan->leaf != dx_get_block(path, pos)) {
930                                 result = -EAGAIN;
931                                 break;
932                         }
933                         scan->at = pos;
934                 } else {
935                         pos = iam_entry_shift(path, scan->entries,
936                                               dx_get_count(scan->entries) - 1);
937                         if (scan->at > pos ||
938                             scan->leaf != dx_get_block(path, scan->at)) {
939                                 result = -EAGAIN;
940                                 break;
941                         }
942                 }
943         }
944
945         /*
946          * Unlock top to bottom.
947          */
948         for (scan = path->ip_frames; scan < bottom; ++scan)
949                 iam_unlock_bh(scan->bh);
950         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
951         do_corr(schedule());
952
953         return result;
954 }
955
956
957 /*
958  * Performs path lookup and returns with found leaf (if any) locked by htree
959  * lock.
960  */
961 static int iam_lookup_lock(struct iam_path *path,
962                            struct dynlock_handle **dl, enum dynlock_type lt)
963 {
964         int result;
965
966         while ((result = __iam_path_lookup(path)) == 0) {
967                 do_corr(schedule());
968                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
969                                      lt);
970                 if (*dl == NULL) {
971                         iam_path_fini(path);
972                         result = -ENOMEM;
973                         break;
974                 }
975                 do_corr(schedule());
976                 /*
977                  * while locking leaf we just found may get split so we need
978                  * to check this -bzzz
979                  */
980                 if (iam_check_full_path(path, 1) == 0)
981                         break;
982                 iam_unlock_htree(path->ip_container, *dl);
983                 *dl = NULL;
984                 iam_path_fini(path);
985         }
986         return result;
987 }
988 /*
989  * Performs tree top-to-bottom traversal starting from root, and loads leaf
990  * node.
991  */
992 static int iam_path_lookup(struct iam_path *path, int index)
993 {
994         struct iam_leaf  *leaf;
995         int result;
996
997         leaf = &path->ip_leaf;
998         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
999         assert_inv(iam_path_check(path));
1000         do_corr(schedule());
1001         if (result == 0) {
1002                 result = iam_leaf_load(path);
1003                 if (result == 0) {
1004                         do_corr(schedule());
1005                         if (index)
1006                                 result = iam_leaf_ops(leaf)->
1007                                         ilookup(leaf, path->ip_ikey_target);
1008                         else
1009                                 result = iam_leaf_ops(leaf)->
1010                                         lookup(leaf, path->ip_key_target);
1011                         do_corr(schedule());
1012                 }
1013                 if (result < 0)
1014                         iam_leaf_unlock(leaf);
1015         }
1016         return result;
1017 }
1018
1019 /*
1020  * Common part of iam_it_{i,}get().
1021  */
1022 static int __iam_it_get(struct iam_iterator *it, int index)
1023 {
1024         int result;
1025
1026         assert_corr(it_state(it) == IAM_IT_DETACHED);
1027
1028         result = iam_path_lookup(&it->ii_path, index);
1029         if (result >= 0) {
1030                 int collision;
1031
1032                 collision = result & IAM_LOOKUP_LAST;
1033                 switch (result & ~IAM_LOOKUP_LAST) {
1034                 case IAM_LOOKUP_EXACT:
1035                         result = +1;
1036                         it->ii_state = IAM_IT_ATTACHED;
1037                         break;
1038                 case IAM_LOOKUP_OK:
1039                         result = 0;
1040                         it->ii_state = IAM_IT_ATTACHED;
1041                         break;
1042                 case IAM_LOOKUP_BEFORE:
1043                 case IAM_LOOKUP_EMPTY:
1044                         result = 0;
1045                         it->ii_state = IAM_IT_SKEWED;
1046                         break;
1047                 default:
1048                         assert(0);
1049                 }
1050                 result |= collision;
1051         }
1052         /*
1053          * See iam_it_get_exact() for explanation.
1054          */
1055         assert_corr(result != -ENOENT);
1056         return result;
1057 }
1058
1059 /*
1060  * Correct hash, but not the same key was found, iterate through hash
1061  * collision chain, looking for correct record.
1062  */
1063 static int iam_it_collision(struct iam_iterator *it)
1064 {
1065         int result;
1066
1067         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1068
1069         while ((result = iam_it_next(it)) == 0) {
1070                 do_corr(schedule());
1071                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1072                         return -ENOENT;
1073                 if (it_keyeq(it, it->ii_path.ip_key_target))
1074                         return 0;
1075         }
1076         return result;
1077 }
1078
1079 /*
1080  * Attach iterator. After successful completion, @it points to record with
1081  * least key not larger than @k.
1082  *
1083  * Return value: 0: positioned on existing record,
1084  *             +ve: exact position found,
1085  *             -ve: error.
1086  *
1087  * precondition:  it_state(it) == IAM_IT_DETACHED
1088  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1089  *                     it_keycmp(it, k) <= 0)
1090  */
1091 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1092 {
1093         int result;
1094
1095         assert_corr(it_state(it) == IAM_IT_DETACHED);
1096
1097         it->ii_path.ip_ikey_target = NULL;
1098         it->ii_path.ip_key_target  = k;
1099
1100         result = __iam_it_get(it, 0);
1101
1102         if (result == IAM_LOOKUP_LAST) {
1103                 result = iam_it_collision(it);
1104                 if (result != 0) {
1105                         iam_it_put(it);
1106                         iam_it_fini(it);
1107                         result = __iam_it_get(it, 0);
1108                 } else
1109                         result = +1;
1110         }
1111         if (result > 0)
1112                 result &= ~IAM_LOOKUP_LAST;
1113
1114         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1115         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1116                     it_keycmp(it, k) <= 0));
1117         return result;
1118 }
1119
1120 /*
1121  * Attach iterator by index key.
1122  */
1123 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1124 {
1125         assert_corr(it_state(it) == IAM_IT_DETACHED);
1126
1127         it->ii_path.ip_ikey_target = k;
1128         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1129 }
1130
1131 /*
1132  * Attach iterator, and assure it points to the record (not skewed).
1133  *
1134  * Return value: 0: positioned on existing record,
1135  *             +ve: exact position found,
1136  *             -ve: error.
1137  *
1138  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1139  *                !(it->ii_flags&IAM_IT_WRITE)
1140  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1141  */
1142 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1143 {
1144         int result;
1145
1146         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1147                     !(it->ii_flags&IAM_IT_WRITE));
1148         result = iam_it_get(it, k);
1149         if (result == 0) {
1150                 if (it_state(it) != IAM_IT_ATTACHED) {
1151                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1152                         result = iam_it_next(it);
1153                 }
1154         }
1155         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1156         return result;
1157 }
1158
1159 /*
1160  * Duplicates iterator.
1161  *
1162  * postcondition: it_state(dst) == it_state(src) &&
1163  *                iam_it_container(dst) == iam_it_container(src) &&
1164  *                dst->ii_flags = src->ii_flags &&
1165  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1166  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1167  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1168  */
1169 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1170 {
1171         dst->ii_flags = src->ii_flags;
1172         dst->ii_state = src->ii_state;
1173         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1174         /*
1175          * XXX: duplicate lock.
1176          */
1177         assert_corr(it_state(dst) == it_state(src));
1178         assert_corr(iam_it_container(dst) == iam_it_container(src));
1179         assert_corr(dst->ii_flags = src->ii_flags);
1180         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1181                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1182                     iam_it_key_get(dst) == iam_it_key_get(src)));
1183 }
1184
1185 /*
1186  * Detach iterator. Does nothing it detached state.
1187  *
1188  * postcondition: it_state(it) == IAM_IT_DETACHED
1189  */
1190 void iam_it_put(struct iam_iterator *it)
1191 {
1192         if (it->ii_state != IAM_IT_DETACHED) {
1193                 it->ii_state = IAM_IT_DETACHED;
1194                 iam_leaf_fini(&it->ii_path.ip_leaf);
1195         }
1196 }
1197
1198 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1199                                         struct iam_ikey *ikey);
1200
1201
1202 /*
1203  * This function increments the frame pointer to search the next leaf
1204  * block, and reads in the necessary intervening nodes if the search
1205  * should be necessary.  Whether or not the search is necessary is
1206  * controlled by the hash parameter.  If the hash value is even, then
1207  * the search is only continued if the next block starts with that
1208  * hash value.  This is used if we are searching for a specific file.
1209  *
1210  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1211  *
1212  * This function returns 1 if the caller should continue to search,
1213  * or 0 if it should not.  If there is an error reading one of the
1214  * index blocks, it will a negative error code.
1215  *
1216  * If start_hash is non-null, it will be filled in with the starting
1217  * hash of the next page.
1218  */
1219 static int iam_htree_advance(struct inode *dir, __u32 hash,
1220                               struct iam_path *path, __u32 *start_hash,
1221                               int compat)
1222 {
1223         struct iam_frame *p;
1224         struct buffer_head *bh;
1225         int err, num_frames = 0;
1226         __u32 bhash;
1227
1228         p = path->ip_frame;
1229         /*
1230          * Find the next leaf page by incrementing the frame pointer.
1231          * If we run out of entries in the interior node, loop around and
1232          * increment pointer in the parent node.  When we break out of
1233          * this loop, num_frames indicates the number of interior
1234          * nodes need to be read.
1235          */
1236         while (1) {
1237                 do_corr(schedule());
1238                 iam_lock_bh(p->bh);
1239                 if (p->at_shifted)
1240                         p->at_shifted = 0;
1241                 else
1242                         p->at = iam_entry_shift(path, p->at, +1);
1243                 if (p->at < iam_entry_shift(path, p->entries,
1244                                             dx_get_count(p->entries))) {
1245                         p->leaf = dx_get_block(path, p->at);
1246                         iam_unlock_bh(p->bh);
1247                         break;
1248                 }
1249                 iam_unlock_bh(p->bh);
1250                 if (p == path->ip_frames)
1251                         return 0;
1252                 num_frames++;
1253                 --p;
1254         }
1255
1256         if (compat) {
1257                 /*
1258                  * Htree hash magic.
1259                  */
1260
1261                 /*
1262                  * If the hash is 1, then continue only if the next page has a
1263                  * continuation hash of any value.  This is used for readdir
1264                  * handling.  Otherwise, check to see if the hash matches the
1265                  * desired contiuation hash.  If it doesn't, return since
1266                  * there's no point to read in the successive index pages.
1267                  */
1268                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1269                 if (start_hash)
1270                         *start_hash = bhash;
1271                 if ((hash & 1) == 0) {
1272                         if ((bhash & ~1) != hash)
1273                                 return 0;
1274                 }
1275         }
1276         /*
1277          * If the hash is HASH_NB_ALWAYS, we always go to the next
1278          * block so no check is necessary
1279          */
1280         while (num_frames--) {
1281                 iam_ptr_t idx;
1282
1283                 do_corr(schedule());
1284                 iam_lock_bh(p->bh);
1285                 idx = p->leaf = dx_get_block(path, p->at);
1286                 iam_unlock_bh(p->bh);
1287                 err = iam_path_descr(path)->id_ops->
1288                         id_node_read(path->ip_container, idx, NULL, &bh);
1289                 if (err != 0)
1290                         return err; /* Failure */
1291                 ++p;
1292                 brelse(p->bh);
1293                 assert_corr(p->bh != bh);
1294                 p->bh = bh;
1295                 p->entries = dx_node_get_entries(path, p);
1296                 p->at = iam_entry_shift(path, p->entries, !compat);
1297                 assert_corr(p->curidx != idx);
1298                 p->curidx = idx;
1299                 iam_lock_bh(p->bh);
1300                 assert_corr(p->leaf != dx_get_block(path, p->at));
1301                 p->leaf = dx_get_block(path, p->at);
1302                 iam_unlock_bh(p->bh);
1303                 assert_inv(dx_node_check(path, p));
1304         }
1305         return 1;
1306 }
1307
1308 static inline int iam_index_advance(struct iam_path *path)
1309 {
1310         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1311 }
1312
1313 static void iam_unlock_array(struct iam_container *ic,
1314                              struct dynlock_handle **lh)
1315 {
1316         int i;
1317
1318         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1319                 if (*lh != NULL) {
1320                         iam_unlock_htree(ic, *lh);
1321                         *lh = NULL;
1322                 }
1323         }
1324 }
1325 /*
1326  * Advance index part of @path to point to the next leaf. Returns 1 on
1327  * success, 0, when end of container was reached. Leaf node is locked.
1328  */
1329 int iam_index_next(struct iam_container *c, struct iam_path *path)
1330 {
1331         iam_ptr_t cursor;
1332         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1333         int result;
1334
1335         /*
1336          * Locking for iam_index_next()... is to be described.
1337          */
1338
1339         cursor = path->ip_frame->leaf;
1340
1341         while (1) {
1342                 result = iam_index_lock(path, lh);
1343                 do_corr(schedule());
1344                 if (result < 0)
1345                         break;
1346
1347                 result = iam_check_full_path(path, 0);
1348                 if (result == 0 && cursor == path->ip_frame->leaf) {
1349                         result = iam_index_advance(path);
1350
1351                         assert_corr(result == 0 ||
1352                                     cursor != path->ip_frame->leaf);
1353                         break;
1354                 }
1355                 do {
1356                         iam_unlock_array(c, lh);
1357
1358                         iam_path_release(path);
1359                         do_corr(schedule());
1360
1361                         result = __iam_path_lookup(path);
1362                         if (result < 0)
1363                                 break;
1364
1365                         while (path->ip_frame->leaf != cursor) {
1366                                 do_corr(schedule());
1367
1368                                 result = iam_index_lock(path, lh);
1369                                 do_corr(schedule());
1370                                 if (result < 0)
1371                                         break;
1372
1373                                 result = iam_check_full_path(path, 0);
1374                                 if (result != 0)
1375                                         break;
1376
1377                                 result = iam_index_advance(path);
1378                                 if (result == 0) {
1379                                         CERROR("cannot find cursor : %u\n",
1380                                                 cursor);
1381                                         result = -EIO;
1382                                 }
1383                                 if (result < 0)
1384                                         break;
1385                                 result = iam_check_full_path(path, 0);
1386                                 if (result != 0)
1387                                         break;
1388                                 iam_unlock_array(c, lh);
1389                         }
1390                 } while (result == -EAGAIN);
1391                 if (result < 0)
1392                         break;
1393         }
1394         iam_unlock_array(c, lh);
1395         return result;
1396 }
1397
1398 /*
1399  * Move iterator one record right.
1400  *
1401  * Return value: 0: success,
1402  *              +1: end of container reached
1403  *             -ve: error
1404  *
1405  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1406  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1407  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1408  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1409  */
1410 int iam_it_next(struct iam_iterator *it)
1411 {
1412         int result;
1413         struct iam_path *path;
1414         struct iam_leaf *leaf;
1415
1416         do_corr(struct iam_ikey *ik_orig);
1417
1418         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1419         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1420                     it_state(it) == IAM_IT_SKEWED);
1421
1422         path = &it->ii_path;
1423         leaf = &path->ip_leaf;
1424
1425         assert_corr(iam_leaf_is_locked(leaf));
1426
1427         result = 0;
1428         do_corr(ik_orig = it_at_rec(it) ?
1429                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1430         if (it_before(it)) {
1431                 assert_corr(!iam_leaf_at_end(leaf));
1432                 it->ii_state = IAM_IT_ATTACHED;
1433         } else {
1434                 if (!iam_leaf_at_end(leaf))
1435                         /* advance within leaf node */
1436                         iam_leaf_next(leaf);
1437                 /*
1438                  * multiple iterations may be necessary due to empty leaves.
1439                  */
1440                 while (result == 0 && iam_leaf_at_end(leaf)) {
1441                         do_corr(schedule());
1442                         /* advance index portion of the path */
1443                         result = iam_index_next(iam_it_container(it), path);
1444                         assert_corr(iam_leaf_is_locked(leaf));
1445                         if (result == 1) {
1446                                 struct dynlock_handle *lh;
1447                                 lh = iam_lock_htree(iam_it_container(it),
1448                                                     path->ip_frame->leaf,
1449                                                     DLT_WRITE);
1450                                 if (lh != NULL) {
1451                                         iam_leaf_fini(leaf);
1452                                         leaf->il_lock = lh;
1453                                         result = iam_leaf_load(path);
1454                                         if (result == 0)
1455                                                 iam_leaf_start(leaf);
1456                                 } else
1457                                         result = -ENOMEM;
1458                         } else if (result == 0)
1459                                 /* end of container reached */
1460                                 result = +1;
1461                         if (result != 0)
1462                                 iam_it_put(it);
1463                 }
1464                 if (result == 0)
1465                         it->ii_state = IAM_IT_ATTACHED;
1466         }
1467         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1468         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1469         assert_corr(ergo(result == 0 && ik_orig != NULL,
1470                     it_ikeycmp(it, ik_orig) >= 0));
1471         return result;
1472 }
1473
1474 /*
1475  * Return pointer to the record under iterator.
1476  *
1477  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1478  * postcondition: it_state(it) == IAM_IT_ATTACHED
1479  */
1480 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1481 {
1482         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1483         assert_corr(it_at_rec(it));
1484         return iam_leaf_rec(&it->ii_path.ip_leaf);
1485 }
1486
1487 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1488 {
1489         struct iam_leaf *folio;
1490
1491         folio = &it->ii_path.ip_leaf;
1492         iam_leaf_ops(folio)->rec_set(folio, r);
1493 }
1494
1495 /*
1496  * Replace contents of record under iterator.
1497  *
1498  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1499  *                it->ii_flags&IAM_IT_WRITE
1500  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1501  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1502  */
1503 int iam_it_rec_set(handle_t *h,
1504                    struct iam_iterator *it, const struct iam_rec *r)
1505 {
1506         int result;
1507         struct iam_path *path;
1508         struct buffer_head *bh;
1509
1510         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1511                     it->ii_flags&IAM_IT_WRITE);
1512         assert_corr(it_at_rec(it));
1513
1514         path = &it->ii_path;
1515         bh = path->ip_leaf.il_bh;
1516         result = iam_txn_add(h, path, bh);
1517         if (result == 0) {
1518                 iam_it_reccpy(it, r);
1519                 result = iam_txn_dirty(h, path, bh);
1520         }
1521         return result;
1522 }
1523
1524 /*
1525  * Return pointer to the index key under iterator.
1526  *
1527  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1528  *                it_state(it) == IAM_IT_SKEWED
1529  */
1530 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1531                                         struct iam_ikey *ikey)
1532 {
1533         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1534                     it_state(it) == IAM_IT_SKEWED);
1535         assert_corr(it_at_rec(it));
1536         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1537 }
1538
1539 /*
1540  * Return pointer to the key under iterator.
1541  *
1542  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1543  *                it_state(it) == IAM_IT_SKEWED
1544  */
1545 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1546 {
1547         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1548                     it_state(it) == IAM_IT_SKEWED);
1549         assert_corr(it_at_rec(it));
1550         return iam_leaf_key(&it->ii_path.ip_leaf);
1551 }
1552
1553 /*
1554  * Return size of key under iterator (in bytes)
1555  *
1556  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1557  *                it_state(it) == IAM_IT_SKEWED
1558  */
1559 int iam_it_key_size(const struct iam_iterator *it)
1560 {
1561         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1562                     it_state(it) == IAM_IT_SKEWED);
1563         assert_corr(it_at_rec(it));
1564         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1565 }
1566
1567 static struct buffer_head *
1568 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1569 {
1570         struct inode *inode = c->ic_object;
1571         struct buffer_head *bh = NULL;
1572         struct iam_idle_head *head;
1573         struct buffer_head *idle;
1574         __u32 *idle_blocks;
1575         __u16 count;
1576
1577         if (c->ic_idle_bh == NULL)
1578                 goto newblock;
1579
1580         mutex_lock(&c->ic_idle_mutex);
1581         if (unlikely(c->ic_idle_bh == NULL)) {
1582                 mutex_unlock(&c->ic_idle_mutex);
1583                 goto newblock;
1584         }
1585
1586         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1587         count = le16_to_cpu(head->iih_count);
1588         if (count > 0) {
1589                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1590                 if (*e != 0)
1591                         goto fail;
1592
1593                 --count;
1594                 *b = le32_to_cpu(head->iih_blks[count]);
1595                 head->iih_count = cpu_to_le16(count);
1596                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1597                 if (*e != 0)
1598                         goto fail;
1599
1600                 mutex_unlock(&c->ic_idle_mutex);
1601                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1602                 if (IS_ERR_OR_NULL(bh)) {
1603                         if (IS_ERR(bh))
1604                                 *e = PTR_ERR(bh);
1605                         else
1606                                 *e = -EIO;
1607                         return NULL;
1608                 }
1609                 goto got;
1610         }
1611
1612         /* The block itself which contains the iam_idle_head is
1613          * also an idle block, and can be used as the new node. */
1614         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1615                                 c->ic_descr->id_root_gap +
1616                                 sizeof(struct dx_countlimit));
1617         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1618         if (*e != 0)
1619                 goto fail;
1620
1621         *b = le32_to_cpu(*idle_blocks);
1622         iam_lock_bh(c->ic_root_bh);
1623         *idle_blocks = head->iih_next;
1624         iam_unlock_bh(c->ic_root_bh);
1625         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1626         if (*e != 0) {
1627                 iam_lock_bh(c->ic_root_bh);
1628                 *idle_blocks = cpu_to_le32(*b);
1629                 iam_unlock_bh(c->ic_root_bh);
1630                 goto fail;
1631         }
1632
1633         bh = c->ic_idle_bh;
1634         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1635         if (idle != NULL && IS_ERR(idle)) {
1636                 *e = PTR_ERR(idle);
1637                 c->ic_idle_bh = NULL;
1638                 brelse(bh);
1639                 goto fail;
1640         }
1641
1642         c->ic_idle_bh = idle;
1643         mutex_unlock(&c->ic_idle_mutex);
1644
1645 got:
1646         /* get write access for the found buffer head */
1647         *e = ldiskfs_journal_get_write_access(h, bh);
1648         if (*e != 0) {
1649                 brelse(bh);
1650                 bh = NULL;
1651                 ldiskfs_std_error(inode->i_sb, *e);
1652         } else {
1653                 /* Clear the reused node as new node does. */
1654                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1655                 set_buffer_uptodate(bh);
1656         }
1657         return bh;
1658
1659 newblock:
1660         bh = osd_ldiskfs_append(h, inode, b);
1661         if (IS_ERR(bh)) {
1662                 *e = PTR_ERR(bh);
1663                 bh = NULL;
1664         }
1665
1666         return bh;
1667
1668 fail:
1669         mutex_unlock(&c->ic_idle_mutex);
1670         ldiskfs_std_error(inode->i_sb, *e);
1671         return NULL;
1672 }
1673
1674 /*
1675  * Insertion of new record. Interaction with jbd during non-trivial case (when
1676  * split happens) is as following:
1677  *
1678  *  - new leaf node is involved into transaction by iam_new_node();
1679  *
1680  *  - old leaf node is involved into transaction by iam_add_rec();
1681  *
1682  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1683  *
1684  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1685  *  iam_new_leaf();
1686  *
1687  *  - split index nodes are involved into transaction and marked dirty by
1688  *  split_index_node().
1689  *
1690  *  - "safe" index node, which is no split, but where new pointer is inserted
1691  *  is involved into transaction and marked dirty by split_index_node().
1692  *
1693  *  - index node where pointer to new leaf is inserted is involved into
1694  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1695  *
1696  *  - inode is marked dirty by iam_add_rec().
1697  *
1698  */
1699
1700 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1701 {
1702         int err;
1703         iam_ptr_t blknr;
1704         struct buffer_head *new_leaf;
1705         struct buffer_head *old_leaf;
1706         struct iam_container *c;
1707         struct inode *obj;
1708         struct iam_path *path;
1709
1710         c = iam_leaf_container(leaf);
1711         path = leaf->il_path;
1712
1713         obj = c->ic_object;
1714         new_leaf = iam_new_node(handle, c, &blknr, &err);
1715         do_corr(schedule());
1716         if (new_leaf != NULL) {
1717                 struct dynlock_handle *lh;
1718
1719                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1720                 do_corr(schedule());
1721                 if (lh != NULL) {
1722                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1723                         do_corr(schedule());
1724                         old_leaf = leaf->il_bh;
1725                         iam_leaf_split(leaf, &new_leaf, blknr);
1726                         if (old_leaf != leaf->il_bh) {
1727                                 /*
1728                                  * Switched to the new leaf.
1729                                  */
1730                                 iam_leaf_unlock(leaf);
1731                                 leaf->il_lock = lh;
1732                                 path->ip_frame->leaf = blknr;
1733                         } else
1734                                 iam_unlock_htree(path->ip_container, lh);
1735                         do_corr(schedule());
1736                         err = iam_txn_dirty(handle, path, new_leaf);
1737                         if (err == 0)
1738                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1739                         do_corr(schedule());
1740                 } else
1741                         err = -ENOMEM;
1742                 brelse(new_leaf);
1743         }
1744         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1745         return err;
1746 }
1747
1748 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1749 {
1750         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1751 }
1752
1753 static int iam_shift_entries(struct iam_path *path,
1754                          struct iam_frame *frame, unsigned count,
1755                          struct iam_entry *entries, struct iam_entry *entries2,
1756                          u32 newblock)
1757 {
1758         unsigned count1;
1759         unsigned count2;
1760         int delta;
1761
1762         struct iam_frame *parent = frame - 1;
1763         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1764
1765         delta = dx_index_is_compat(path) ? 0 : +1;
1766
1767         count1 = count/2 + delta;
1768         count2 = count - count1;
1769         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1770
1771         dxtrace(printk("Split index %d/%d\n", count1, count2));
1772
1773         memcpy((char *) iam_entry_shift(path, entries2, delta),
1774                (char *) iam_entry_shift(path, entries, count1),
1775                count2 * iam_entry_size(path));
1776
1777         dx_set_count(entries2, count2 + delta);
1778         dx_set_limit(entries2, dx_node_limit(path));
1779
1780         /*
1781          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1782          * level index in root index, then we insert new index here and set
1783          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1784          * index w/o hash it looks for. the solution is to check root index
1785          * after we locked just founded 2nd level index -bzzz
1786          */
1787         iam_insert_key_lock(path, parent, pivot, newblock);
1788
1789         /*
1790          * now old and new 2nd level index blocks contain all pointers, so
1791          * dx_probe() may find it in the both.  it's OK -bzzz
1792          */
1793         iam_lock_bh(frame->bh);
1794         dx_set_count(entries, count1);
1795         iam_unlock_bh(frame->bh);
1796
1797         /*
1798          * now old 2nd level index block points to first half of leafs. it's
1799          * importand that dx_probe() must check root index block for changes
1800          * under dx_lock_bh(frame->bh) -bzzz
1801          */
1802
1803         return count1;
1804 }
1805
1806
1807 int split_index_node(handle_t *handle, struct iam_path *path,
1808                      struct dynlock_handle **lh)
1809 {
1810         struct iam_entry *entries;   /* old block contents */
1811         struct iam_entry *entries2;  /* new block contents */
1812         struct iam_frame *frame, *safe;
1813         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1814         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1815         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1816         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1817         struct inode *dir = iam_path_obj(path);
1818         struct iam_descr *descr;
1819         int nr_splet;
1820         int i, err;
1821
1822         descr = iam_path_descr(path);
1823         /*
1824          * Algorithm below depends on this.
1825          */
1826         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1827
1828         frame = path->ip_frame;
1829         entries = frame->entries;
1830
1831         /*
1832          * Tall-tree handling: we might have to split multiple index blocks
1833          * all the way up to tree root. Tricky point here is error handling:
1834          * to avoid complicated undo/rollback we
1835          *
1836          *   - first allocate all necessary blocks
1837          *
1838          *   - insert pointers into them atomically.
1839          */
1840
1841         /*
1842          * Locking: leaf is already locked. htree-locks are acquired on all
1843          * index nodes that require split bottom-to-top, on the "safe" node,
1844          * and on all new nodes
1845          */
1846
1847         dxtrace(printk("using %u of %u node entries\n",
1848                        dx_get_count(entries), dx_get_limit(entries)));
1849
1850         /* What levels need split? */
1851         for (nr_splet = 0; frame >= path->ip_frames &&
1852              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1853              --frame, ++nr_splet) {
1854                 do_corr(schedule());
1855                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1856                         /*
1857                          * CWARN(dir->i_sb, __FUNCTION__,
1858                          * "Directory index full!\n");
1859                          */
1860                         err = -ENOSPC;
1861                         goto cleanup;
1862                 }
1863         }
1864
1865         safe = frame;
1866
1867         /*
1868          * Lock all nodes, bottom to top.
1869          */
1870         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1871                 do_corr(schedule());
1872                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1873                                          DLT_WRITE);
1874                 if (lock[i] == NULL) {
1875                         err = -ENOMEM;
1876                         goto cleanup;
1877                 }
1878         }
1879
1880         /*
1881          * Check for concurrent index modification.
1882          */
1883         err = iam_check_full_path(path, 1);
1884         if (err)
1885                 goto cleanup;
1886         /*
1887          * And check that the same number of nodes is to be split.
1888          */
1889         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1890              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1891              --frame, ++i) {
1892                 ;
1893         }
1894         if (i != nr_splet) {
1895                 err = -EAGAIN;
1896                 goto cleanup;
1897         }
1898
1899         /*
1900          * Go back down, allocating blocks, locking them, and adding into
1901          * transaction...
1902          */
1903         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1904                 bh_new[i] = iam_new_node(handle, path->ip_container,
1905                                          &newblock[i], &err);
1906                 do_corr(schedule());
1907                 if (!bh_new[i] ||
1908                     descr->id_ops->id_node_init(path->ip_container,
1909                                                 bh_new[i], 0) != 0)
1910                         goto cleanup;
1911
1912                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1913                                              DLT_WRITE);
1914                 if (new_lock[i] == NULL) {
1915                         err = -ENOMEM;
1916                         goto cleanup;
1917                 }
1918                 do_corr(schedule());
1919                 BUFFER_TRACE(frame->bh, "get_write_access");
1920                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1921                 if (err)
1922                         goto journal_error;
1923         }
1924         /* Add "safe" node to transaction too */
1925         if (safe + 1 != path->ip_frames) {
1926                 do_corr(schedule());
1927                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1928                 if (err)
1929                         goto journal_error;
1930         }
1931
1932         /* Go through nodes once more, inserting pointers */
1933         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1934                 unsigned count;
1935                 int idx;
1936                 struct buffer_head *bh2;
1937                 struct buffer_head *bh;
1938
1939                 entries = frame->entries;
1940                 count = dx_get_count(entries);
1941                 idx = iam_entry_diff(path, frame->at, entries);
1942
1943                 bh2 = bh_new[i];
1944                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1945
1946                 bh = frame->bh;
1947                 if (frame == path->ip_frames) {
1948                         /* splitting root node. Tricky point:
1949                          *
1950                          * In the "normal" B-tree we'd split root *and* add
1951                          * new root to the tree with pointers to the old root
1952                          * and its sibling (thus introducing two new nodes).
1953                          *
1954                          * In htree it's enough to add one node, because
1955                          * capacity of the root node is smaller than that of
1956                          * non-root one.
1957                          */
1958                         struct iam_frame *frames;
1959                         struct iam_entry *next;
1960
1961                         assert_corr(i == 0);
1962
1963                         do_corr(schedule());
1964
1965                         frames = path->ip_frames;
1966                         memcpy((char *) entries2, (char *) entries,
1967                                count * iam_entry_size(path));
1968                         dx_set_limit(entries2, dx_node_limit(path));
1969
1970                         /* Set up root */
1971                         iam_lock_bh(frame->bh);
1972                         next = descr->id_ops->id_root_inc(path->ip_container,
1973                                                           path, frame);
1974                         dx_set_block(path, next, newblock[0]);
1975                         iam_unlock_bh(frame->bh);
1976
1977                         do_corr(schedule());
1978                         /* Shift frames in the path */
1979                         memmove(frames + 2, frames + 1,
1980                                (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1981                         /* Add new access path frame */
1982                         frames[1].at = iam_entry_shift(path, entries2, idx);
1983                         frames[1].entries = entries = entries2;
1984                         frames[1].bh = bh2;
1985                         assert_inv(dx_node_check(path, frame));
1986                         ++ path->ip_frame;
1987                         ++ frame;
1988                         assert_inv(dx_node_check(path, frame));
1989                         bh_new[0] = NULL; /* buffer head is "consumed" */
1990                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1991                         if (err)
1992                                 goto journal_error;
1993                         do_corr(schedule());
1994                 } else {
1995                         /* splitting non-root index node. */
1996                         struct iam_frame *parent = frame - 1;
1997
1998                         do_corr(schedule());
1999                         count = iam_shift_entries(path, frame, count,
2000                                                 entries, entries2, newblock[i]);
2001                         /* Which index block gets the new entry? */
2002                         if (idx >= count) {
2003                                 int d = dx_index_is_compat(path) ? 0 : +1;
2004
2005                                 frame->at = iam_entry_shift(path, entries2,
2006                                                             idx - count + d);
2007                                 frame->entries = entries = entries2;
2008                                 frame->curidx = newblock[i];
2009                                 swap(frame->bh, bh2);
2010                                 assert_corr(lock[i + 1] != NULL);
2011                                 assert_corr(new_lock[i] != NULL);
2012                                 swap(lock[i + 1], new_lock[i]);
2013                                 bh_new[i] = bh2;
2014                                 parent->at = iam_entry_shift(path,
2015                                                              parent->at, +1);
2016                         }
2017                         assert_inv(dx_node_check(path, frame));
2018                         assert_inv(dx_node_check(path, parent));
2019                         dxtrace(dx_show_index("node", frame->entries));
2020                         dxtrace(dx_show_index("node",
2021                                 ((struct dx_node *) bh2->b_data)->entries));
2022                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2023                         if (err)
2024                                 goto journal_error;
2025                         do_corr(schedule());
2026                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2027                                                             parent->bh);
2028                         if (err)
2029                                 goto journal_error;
2030                 }
2031                 do_corr(schedule());
2032                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2033                 if (err)
2034                         goto journal_error;
2035         }
2036                 /*
2037                  * This function was called to make insertion of new leaf
2038                  * possible. Check that it fulfilled its obligations.
2039                  */
2040                 assert_corr(dx_get_count(path->ip_frame->entries) <
2041                             dx_get_limit(path->ip_frame->entries));
2042         assert_corr(lock[nr_splet] != NULL);
2043         *lh = lock[nr_splet];
2044         lock[nr_splet] = NULL;
2045         if (nr_splet > 0) {
2046                 /*
2047                  * Log ->i_size modification.
2048                  */
2049                 err = ldiskfs_mark_inode_dirty(handle, dir);
2050                 if (err)
2051                         goto journal_error;
2052         }
2053         goto cleanup;
2054 journal_error:
2055         ldiskfs_std_error(dir->i_sb, err);
2056
2057 cleanup:
2058         iam_unlock_array(path->ip_container, lock);
2059         iam_unlock_array(path->ip_container, new_lock);
2060
2061         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2062
2063         do_corr(schedule());
2064         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2065                 if (bh_new[i] != NULL)
2066                         brelse(bh_new[i]);
2067         }
2068         return err;
2069 }
2070
2071 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2072                        struct iam_path *path,
2073                        const struct iam_key *k, const struct iam_rec *r)
2074 {
2075         int err;
2076         struct iam_leaf *leaf;
2077
2078         leaf = &path->ip_leaf;
2079         assert_inv(iam_path_check(path));
2080         err = iam_txn_add(handle, path, leaf->il_bh);
2081         if (err == 0) {
2082                 do_corr(schedule());
2083                 if (!iam_leaf_can_add(leaf, k, r)) {
2084                         struct dynlock_handle *lh = NULL;
2085
2086                         do {
2087                                 assert_corr(lh == NULL);
2088                                 do_corr(schedule());
2089                                 err = split_index_node(handle, path, &lh);
2090                                 if (err == -EAGAIN) {
2091                                         assert_corr(lh == NULL);
2092
2093                                         iam_path_fini(path);
2094                                         it->ii_state = IAM_IT_DETACHED;
2095
2096                                         do_corr(schedule());
2097                                         err = iam_it_get_exact(it, k);
2098                                         if (err == -ENOENT)
2099                                                 err = +1; /* repeat split */
2100                                         else if (err == 0)
2101                                                 err = -EEXIST;
2102                                 }
2103                         } while (err > 0);
2104                         assert_inv(iam_path_check(path));
2105                         if (err == 0) {
2106                                 assert_corr(lh != NULL);
2107                                 do_corr(schedule());
2108                                 err = iam_new_leaf(handle, leaf);
2109                                 if (err == 0)
2110                                         err = iam_txn_dirty(handle, path,
2111                                                             path->ip_frame->bh);
2112                         }
2113                         iam_unlock_htree(path->ip_container, lh);
2114                         do_corr(schedule());
2115                 }
2116                 if (err == 0) {
2117                         iam_leaf_rec_add(leaf, k, r);
2118                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2119                 }
2120         }
2121         assert_inv(iam_path_check(path));
2122         return err;
2123 }
2124
2125 /*
2126  * Insert new record with key @k and contents from @r, shifting records to the
2127  * right. On success, iterator is positioned on the newly inserted record.
2128  *
2129  * precondition: it->ii_flags&IAM_IT_WRITE &&
2130  *               (it_state(it) == IAM_IT_ATTACHED ||
2131  *                it_state(it) == IAM_IT_SKEWED) &&
2132  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2133  *                    it_keycmp(it, k) <= 0) &&
2134  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2135  * postcondition: ergo(result == 0,
2136  *                     it_state(it) == IAM_IT_ATTACHED &&
2137  *                     it_keycmp(it, k) == 0 &&
2138  *                     !memcmp(iam_it_rec_get(it), r, ...))
2139  */
2140 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2141                       const struct iam_key *k, const struct iam_rec *r)
2142 {
2143         int result;
2144         struct iam_path *path;
2145
2146         path = &it->ii_path;
2147
2148         assert_corr(it->ii_flags&IAM_IT_WRITE);
2149         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2150                     it_state(it) == IAM_IT_SKEWED);
2151         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2152                     it_keycmp(it, k) <= 0));
2153         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2154         result = iam_add_rec(h, it, path, k, r);
2155         if (result == 0)
2156                 it->ii_state = IAM_IT_ATTACHED;
2157         assert_corr(ergo(result == 0,
2158                          it_state(it) == IAM_IT_ATTACHED &&
2159                          it_keycmp(it, k) == 0));
2160         return result;
2161 }
2162
2163 static inline int iam_idle_blocks_limit(struct inode *inode)
2164 {
2165         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2166 }
2167
2168 /*
2169  * If the leaf cannnot be recycled, we will lose one block for reusing.
2170  * It is not a serious issue because it almost the same of non-recycle.
2171  */
2172 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2173                                   struct iam_leaf *l, struct buffer_head **bh)
2174 {
2175         struct iam_container *c = p->ip_container;
2176         struct inode *inode = c->ic_object;
2177         struct iam_frame *frame = p->ip_frame;
2178         struct iam_entry *entries;
2179         struct iam_entry *pos;
2180         struct dynlock_handle *lh;
2181         int count;
2182         int rc;
2183
2184         if (c->ic_idle_failed)
2185                 return 0;
2186
2187         if (unlikely(frame == NULL))
2188                 return 0;
2189
2190         if (!iam_leaf_empty(l))
2191                 return 0;
2192
2193         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2194         if (lh == NULL) {
2195                 CWARN("%s: No memory to recycle idle blocks\n",
2196                       osd_ino2name(inode));
2197                 return 0;
2198         }
2199
2200         rc = iam_txn_add(h, p, frame->bh);
2201         if (rc != 0) {
2202                 iam_unlock_htree(c, lh);
2203                 return 0;
2204         }
2205
2206         iam_lock_bh(frame->bh);
2207         entries = frame->entries;
2208         count = dx_get_count(entries);
2209         /*
2210          * NOT shrink the last entry in the index node, which can be reused
2211          * directly by next new node.
2212          */
2213         if (count == 2) {
2214                 iam_unlock_bh(frame->bh);
2215                 iam_unlock_htree(c, lh);
2216                 return 0;
2217         }
2218
2219         pos = iam_find_position(p, frame);
2220         /*
2221          * There may be some new leaf nodes have been added or empty leaf nodes
2222          * have been shrinked during my delete operation.
2223          *
2224          * If the empty leaf is not under current index node because the index
2225          * node has been split, then just skip the empty leaf, which is rare.
2226          */
2227         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2228                 iam_unlock_bh(frame->bh);
2229                 iam_unlock_htree(c, lh);
2230                 return 0;
2231         }
2232
2233         frame->at = pos;
2234         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2235                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2236
2237                 memmove(frame->at, n,
2238                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2239                 frame->at_shifted = 1;
2240         }
2241         dx_set_count(entries, count - 1);
2242         iam_unlock_bh(frame->bh);
2243         rc = iam_txn_dirty(h, p, frame->bh);
2244         iam_unlock_htree(c, lh);
2245         if (rc != 0)
2246                 return 0;
2247
2248         get_bh(l->il_bh);
2249         *bh = l->il_bh;
2250         return frame->leaf;
2251 }
2252
2253 static int
2254 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2255                         __u32 *idle_blocks, iam_ptr_t blk)
2256 {
2257         struct iam_container *c = p->ip_container;
2258         struct buffer_head *old = c->ic_idle_bh;
2259         struct iam_idle_head *head;
2260         int rc;
2261
2262         head = (struct iam_idle_head *)(bh->b_data);
2263         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2264         head->iih_count = 0;
2265         head->iih_next = *idle_blocks;
2266         /* The bh already get_write_accessed. */
2267         rc = iam_txn_dirty(h, p, bh);
2268         if (rc != 0)
2269                 return rc;
2270
2271         rc = iam_txn_add(h, p, c->ic_root_bh);
2272         if (rc != 0)
2273                 return rc;
2274
2275         iam_lock_bh(c->ic_root_bh);
2276         *idle_blocks = cpu_to_le32(blk);
2277         iam_unlock_bh(c->ic_root_bh);
2278         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2279         if (rc == 0) {
2280                 /* NOT release old before new assigned. */
2281                 get_bh(bh);
2282                 c->ic_idle_bh = bh;
2283                 brelse(old);
2284         } else {
2285                 iam_lock_bh(c->ic_root_bh);
2286                 *idle_blocks = head->iih_next;
2287                 iam_unlock_bh(c->ic_root_bh);
2288         }
2289         return rc;
2290 }
2291
2292 /*
2293  * If the leaf cannnot be recycled, we will lose one block for reusing.
2294  * It is not a serious issue because it almost the same of non-recycle.
2295  */
2296 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2297                              struct buffer_head *bh, iam_ptr_t blk)
2298 {
2299         struct iam_container *c = p->ip_container;
2300         struct inode *inode = c->ic_object;
2301         struct iam_idle_head *head;
2302         __u32 *idle_blocks;
2303         int count;
2304         int rc;
2305
2306         mutex_lock(&c->ic_idle_mutex);
2307         if (unlikely(c->ic_idle_failed)) {
2308                 rc = -EFAULT;
2309                 goto unlock;
2310         }
2311
2312         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2313                                 c->ic_descr->id_root_gap +
2314                                 sizeof(struct dx_countlimit));
2315         /* It is the first idle block. */
2316         if (c->ic_idle_bh == NULL) {
2317                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2318                 goto unlock;
2319         }
2320
2321         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2322         count = le16_to_cpu(head->iih_count);
2323         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2324         if (count == iam_idle_blocks_limit(inode)) {
2325                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2326                 goto unlock;
2327         }
2328
2329         /* Just add to ic_idle_bh. */
2330         rc = iam_txn_add(h, p, c->ic_idle_bh);
2331         if (rc != 0)
2332                 goto unlock;
2333
2334         head->iih_blks[count] = cpu_to_le32(blk);
2335         head->iih_count = cpu_to_le16(count + 1);
2336         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2337
2338 unlock:
2339         mutex_unlock(&c->ic_idle_mutex);
2340         if (rc != 0)
2341                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2342                       osd_ino2name(inode), blk);
2343 }
2344
2345 /*
2346  * Delete record under iterator.
2347  *
2348  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2349  *                it->ii_flags&IAM_IT_WRITE &&
2350  *                it_at_rec(it)
2351  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2352  *                it_state(it) == IAM_IT_DETACHED
2353  */
2354 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2355 {
2356         int result;
2357         struct iam_leaf *leaf;
2358         struct iam_path *path;
2359
2360         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2361                     it->ii_flags&IAM_IT_WRITE);
2362         assert_corr(it_at_rec(it));
2363
2364         path = &it->ii_path;
2365         leaf = &path->ip_leaf;
2366
2367         assert_inv(iam_path_check(path));
2368
2369         result = iam_txn_add(h, path, leaf->il_bh);
2370         /*
2371          * no compaction for now.
2372          */
2373         if (result == 0) {
2374                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2375                 result = iam_txn_dirty(h, path, leaf->il_bh);
2376                 if (result == 0 && iam_leaf_at_end(leaf)) {
2377                         struct buffer_head *bh = NULL;
2378                         iam_ptr_t blk;
2379
2380                         blk = iam_index_shrink(h, path, leaf, &bh);
2381                         if (it->ii_flags & IAM_IT_MOVE) {
2382                                 result = iam_it_next(it);
2383                                 if (result > 0)
2384                                         result = 0;
2385                         }
2386
2387                         if (bh != NULL) {
2388                                 iam_recycle_leaf(h, path, bh, blk);
2389                                 brelse(bh);
2390                         }
2391                 }
2392         }
2393         assert_inv(iam_path_check(path));
2394         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2395                     it_state(it) == IAM_IT_DETACHED);
2396         return result;
2397 }
2398
2399 /*
2400  * Convert iterator to cookie.
2401  *
2402  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2403  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2404  * postcondition: it_state(it) == IAM_IT_ATTACHED
2405  */
2406 iam_pos_t iam_it_store(const struct iam_iterator *it)
2407 {
2408         iam_pos_t result;
2409
2410         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2411         assert_corr(it_at_rec(it));
2412         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2413                     sizeof result);
2414
2415         result = 0;
2416         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2417 }
2418
2419 /*
2420  * Restore iterator from cookie.
2421  *
2422  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2423  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2424  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2425  *                                  iam_it_store(it) == pos)
2426  */
2427 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2428 {
2429         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2430                 it->ii_flags&IAM_IT_MOVE);
2431         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2432         return iam_it_iget(it, (struct iam_ikey *)&pos);
2433 }
2434
2435 /***********************************************************************/
2436 /* invariants                                                          */
2437 /***********************************************************************/
2438
2439 static inline int ptr_inside(void *base, size_t size, void *ptr)
2440 {
2441         return (base <= ptr) && (ptr < base + size);
2442 }
2443
2444 static int iam_frame_invariant(struct iam_frame *f)
2445 {
2446         return
2447                 (f->bh != NULL &&
2448                 f->bh->b_data != NULL &&
2449                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2450                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2451                 f->entries <= f->at);
2452 }
2453
2454 static int iam_leaf_invariant(struct iam_leaf *l)
2455 {
2456         return
2457                 l->il_bh != NULL &&
2458                 l->il_bh->b_data != NULL &&
2459                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2460                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2461                 l->il_entries <= l->il_at;
2462 }
2463
2464 static int iam_path_invariant(struct iam_path *p)
2465 {
2466         int i;
2467
2468         if (p->ip_container == NULL ||
2469             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2470             p->ip_frame != p->ip_frames + p->ip_indirect ||
2471             !iam_leaf_invariant(&p->ip_leaf))
2472                 return 0;
2473         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2474                 if (i <= p->ip_indirect) {
2475                         if (!iam_frame_invariant(&p->ip_frames[i]))
2476                                 return 0;
2477                 }
2478         }
2479         return 1;
2480 }
2481
2482 int iam_it_invariant(struct iam_iterator *it)
2483 {
2484         return
2485                 (it->ii_state == IAM_IT_DETACHED ||
2486                 it->ii_state == IAM_IT_ATTACHED ||
2487                 it->ii_state == IAM_IT_SKEWED) &&
2488                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2489                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2490                 it->ii_state == IAM_IT_SKEWED,
2491                 iam_path_invariant(&it->ii_path) &&
2492                 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2493 }
2494
2495 /*
2496  * Search container @c for record with key @k. If record is found, its data
2497  * are moved into @r.
2498  *
2499  * Return values: 0: found, -ENOENT: not-found, -ve: error
2500  */
2501 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2502                struct iam_rec *r, struct iam_path_descr *pd)
2503 {
2504         struct iam_iterator it;
2505         int result;
2506
2507         iam_it_init(&it, c, 0, pd);
2508
2509         result = iam_it_get_exact(&it, k);
2510         if (result == 0)
2511                 /*
2512                  * record with required key found, copy it into user buffer
2513                  */
2514                 iam_reccpy(&it.ii_path.ip_leaf, r);
2515         iam_it_put(&it);
2516         iam_it_fini(&it);
2517         return result;
2518 }
2519
2520 /*
2521  * Insert new record @r with key @k into container @c (within context of
2522  * transaction @h).
2523  *
2524  * Return values: 0: success, -ve: error, including -EEXIST when record with
2525  * given key is already present.
2526  *
2527  * postcondition: ergo(result == 0 || result == -EEXIST,
2528  *                                  iam_lookup(c, k, r2) > 0;
2529  */
2530 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2531                const struct iam_rec *r, struct iam_path_descr *pd)
2532 {
2533         struct iam_iterator it;
2534         int result;
2535
2536         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2537
2538         result = iam_it_get_exact(&it, k);
2539         if (result == -ENOENT)
2540                 result = iam_it_rec_insert(h, &it, k, r);
2541         else if (result == 0)
2542                 result = -EEXIST;
2543         iam_it_put(&it);
2544         iam_it_fini(&it);
2545         return result;
2546 }
2547
2548 /*
2549  * Update record with the key @k in container @c (within context of
2550  * transaction @h), new record is given by @r.
2551  *
2552  * Return values: +1: skip because of the same rec value, 0: success,
2553  * -ve: error, including -ENOENT if no record with the given key found.
2554  */
2555 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2556                const struct iam_rec *r, struct iam_path_descr *pd)
2557 {
2558         struct iam_iterator it;
2559         struct iam_leaf *folio;
2560         int result;
2561
2562         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2563
2564         result = iam_it_get_exact(&it, k);
2565         if (result == 0) {
2566                 folio = &it.ii_path.ip_leaf;
2567                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2568                 if (result == 0)
2569                         iam_it_rec_set(h, &it, r);
2570                 else
2571                         result = 1;
2572         }
2573         iam_it_put(&it);
2574         iam_it_fini(&it);
2575         return result;
2576 }
2577
2578 /*
2579  * Delete existing record with key @k.
2580  *
2581  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2582  *
2583  * postcondition: ergo(result == 0 || result == -ENOENT,
2584  *                                 !iam_lookup(c, k, *));
2585  */
2586 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2587                struct iam_path_descr *pd)
2588 {
2589         struct iam_iterator it;
2590         int result;
2591
2592         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2593
2594         result = iam_it_get_exact(&it, k);
2595         if (result == 0)
2596                 iam_it_rec_delete(h, &it);
2597         iam_it_put(&it);
2598         iam_it_fini(&it);
2599         return result;
2600 }
2601
2602 int iam_root_limit(int rootgap, int blocksize, int size)
2603 {
2604         int limit;
2605         int nlimit;
2606
2607         limit = (blocksize - rootgap) / size;
2608         nlimit = blocksize / size;
2609         if (limit == nlimit)
2610                 limit--;
2611         return limit;
2612 }