Whamcloud - gitweb
LU-9440 osd-ldiskfs: efficient way to get ldiskfs backend name
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * iam.c
33  * Top-level entry points into iam module
34  *
35  * Author: Wang Di <wangdi@clusterfs.com>
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 /*
40  * iam: big theory statement.
41  *
42  * iam (Index Access Module) is a module providing abstraction of persistent
43  * transactional container on top of generalized ldiskfs htree.
44  *
45  * iam supports:
46  *
47  *     - key, pointer, and record size specifiable per container.
48  *
49  *     - trees taller than 2 index levels.
50  *
51  *     - read/write to existing ldiskfs htree directories as iam containers.
52  *
53  * iam container is a tree, consisting of leaf nodes containing keys and
54  * records stored in this container, and index nodes, containing keys and
55  * pointers to leaf or index nodes.
56  *
57  * iam does not work with keys directly, instead it calls user-supplied key
58  * comparison function (->dpo_keycmp()).
59  *
60  * Pointers are (currently) interpreted as logical offsets (measured in
61  * blocksful) within underlying flat file on top of which iam tree lives.
62  *
63  * On-disk format:
64  *
65  * iam mostly tries to reuse existing htree formats.
66  *
67  * Format of index node:
68  *
69  * +-----+-------+-------+-------+------+-------+------------+
70  * |     | count |       |       |      |       |            |
71  * | gap |   /   | entry | entry | .... | entry | free space |
72  * |     | limit |       |       |      |       |            |
73  * +-----+-------+-------+-------+------+-------+------------+
74  *
75  *       gap           this part of node is never accessed by iam code. It
76  *                     exists for binary compatibility with ldiskfs htree (that,
77  *                     in turn, stores fake struct ext2_dirent for ext2
78  *                     compatibility), and to keep some unspecified per-node
79  *                     data. Gap can be different for root and non-root index
80  *                     nodes. Gap size can be specified for each container
81  *                     (gap of 0 is allowed).
82  *
83  *       count/limit   current number of entries in this node, and the maximal
84  *                     number of entries that can fit into node. count/limit
85  *                     has the same size as entry, and is itself counted in
86  *                     count.
87  *
88  *       entry         index entry: consists of a key immediately followed by
89  *                     a pointer to a child node. Size of a key and size of a
90  *                     pointer depends on container. Entry has neither
91  *                     alignment nor padding.
92  *
93  *       free space    portion of node new entries are added to
94  *
95  * Entries in index node are sorted by their key value.
96  *
97  * Format of a leaf node is not specified. Generic iam code accesses leaf
98  * nodes through ->id_leaf methods in struct iam_descr.
99  *
100  * The IAM root block is a special node, which contains the IAM descriptor.
101  * It is on disk format:
102  *
103  * +---------+-------+--------+---------+-------+------+-------+------------+
104  * |IAM desc | count |  idle  |         |       |      |       |            |
105  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
106  * |         | limit |        |         |       |      |       |            |
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  *
109  * The padding length is calculated with the parameters in the IAM descriptor.
110  *
111  * The field "idle_blocks" is used to record empty leaf nodes, which have not
112  * been released but all contained entries in them have been removed. Usually,
113  * the idle blocks in the IAM should be reused when need to allocate new leaf
114  * nodes for new entries, it depends on the IAM hash functions to map the new
115  * entries to these idle blocks. Unfortunately, it is not easy to design some
116  * hash functions for such clever mapping, especially considering the insert/
117  * lookup performance.
118  *
119  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
120  * idle blocks pool. If need some new leaf node, it will try to take idle block
121  * from such pool with priority, in spite of how the IAM hash functions to map
122  * the entry.
123  *
124  * The idle blocks pool is organized as a series of tables, and each table
125  * can be described as following (on-disk format):
126  *
127  * +---------+---------+---------+---------+------+---------+-------+
128  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
129  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
130  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
131  * +---------+---------+---------+---------+------+---------+-------+
132  *
133  * The logic blk# for the first table is stored in the root node "idle_blocks".
134  *
135  */
136
137 #include <linux/module.h>
138 #include <linux/fs.h>
139 #include <linux/pagemap.h>
140 #include <linux/time.h>
141 #include <linux/fcntl.h>
142 #include <linux/stat.h>
143 #include <linux/string.h>
144 #include <linux/quotaops.h>
145 #include <linux/buffer_head.h>
146
147 #include <ldiskfs/ldiskfs.h>
148 #include <ldiskfs/xattr.h>
149 #undef ENTRY
150
151 #include "osd_internal.h"
152
153 #include <ldiskfs/acl.h>
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         list_add(&fmt->if_linkage, &iam_formats);
165 }
166
167 static struct buffer_head *
168 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
169 {
170         struct inode *inode = c->ic_object;
171         struct iam_idle_head *head;
172         struct buffer_head *bh;
173
174         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
175
176         if (blk == 0)
177                 return NULL;
178
179         bh = __ldiskfs_bread(NULL, inode, blk, 0);
180         if (IS_ERR_OR_NULL(bh)) {
181                 CERROR("%s: cannot load idle blocks, blk = %u, err = %ld\n",
182                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
183                 c->ic_idle_failed = 1;
184                 if (bh == NULL)
185                         bh = ERR_PTR(-EIO);
186                 return bh;
187         }
188
189         head = (struct iam_idle_head *)(bh->b_data);
190         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
191                 CERROR("%s: invalid idle block head, blk = %u, magic = %d\n",
192                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic));
193                 brelse(bh);
194                 c->ic_idle_failed = 1;
195                 return ERR_PTR(-EBADF);
196         }
197
198         return bh;
199 }
200
201 /*
202  * Determine format of given container. This is done by scanning list of
203  * registered formats and calling ->if_guess() method of each in turn.
204  */
205 static int iam_format_guess(struct iam_container *c)
206 {
207         int result;
208         struct iam_format *fmt;
209
210         /*
211          * XXX temporary initialization hook.
212          */
213         {
214                 static int initialized = 0;
215
216                 if (!initialized) {
217                         iam_lvar_format_init();
218                         iam_lfix_format_init();
219                         initialized = 1;
220                 }
221         }
222
223         result = -ENOENT;
224         list_for_each_entry(fmt, &iam_formats, if_linkage) {
225                 result = fmt->if_guess(c);
226                 if (result == 0)
227                         break;
228         }
229
230         if (result == 0) {
231                 struct buffer_head *bh;
232                 __u32 *idle_blocks;
233
234                 LASSERT(c->ic_root_bh != NULL);
235
236                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
237                                         c->ic_descr->id_root_gap +
238                                         sizeof(struct dx_countlimit));
239                 mutex_lock(&c->ic_idle_mutex);
240                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
241                 if (bh != NULL && IS_ERR(bh))
242                         result = PTR_ERR(bh);
243                 else
244                         c->ic_idle_bh = bh;
245                 mutex_unlock(&c->ic_idle_mutex);
246         }
247
248         return result;
249 }
250
251 /*
252  * Initialize container @c.
253  */
254 int iam_container_init(struct iam_container *c,
255                        struct iam_descr *descr, struct inode *inode)
256 {
257         memset(c, 0, sizeof *c);
258         c->ic_descr  = descr;
259         c->ic_object = inode;
260         init_rwsem(&c->ic_sem);
261         dynlock_init(&c->ic_tree_lock);
262         mutex_init(&c->ic_idle_mutex);
263         return 0;
264 }
265
266 /*
267  * Determine container format.
268  */
269 int iam_container_setup(struct iam_container *c)
270 {
271         return iam_format_guess(c);
272 }
273
274 /*
275  * Finalize container @c, release all resources.
276  */
277 void iam_container_fini(struct iam_container *c)
278 {
279         brelse(c->ic_idle_bh);
280         c->ic_idle_bh = NULL;
281         brelse(c->ic_root_bh);
282         c->ic_root_bh = NULL;
283 }
284
285 void iam_path_init(struct iam_path *path, struct iam_container *c,
286                    struct iam_path_descr *pd)
287 {
288         memset(path, 0, sizeof *path);
289         path->ip_container = c;
290         path->ip_frame = path->ip_frames;
291         path->ip_data = pd;
292         path->ip_leaf.il_path = path;
293 }
294
295 static void iam_leaf_fini(struct iam_leaf *leaf);
296
297 void iam_path_release(struct iam_path *path)
298 {
299         int i;
300
301         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
302                 if (path->ip_frames[i].bh != NULL) {
303                         path->ip_frames[i].at_shifted = 0;
304                         brelse(path->ip_frames[i].bh);
305                         path->ip_frames[i].bh = NULL;
306                 }
307         }
308 }
309
310 void iam_path_fini(struct iam_path *path)
311 {
312         iam_leaf_fini(&path->ip_leaf);
313         iam_path_release(path);
314 }
315
316
317 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
318 {
319         int i;
320
321         path->ipc_hinfo = &path->ipc_hinfo_area;
322         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
323                 path->ipc_descr.ipd_key_scratch[i] =
324                         (struct iam_ikey *)&path->ipc_scratch[i];
325
326         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
327 }
328
329 void iam_path_compat_fini(struct iam_path_compat *path)
330 {
331         iam_path_fini(&path->ipc_path);
332 }
333
334 /*
335  * Helper function initializing iam_path_descr and its key scratch area.
336  */
337 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
338 {
339         struct iam_path_descr *ipd;
340         void *karea;
341         int i;
342
343         ipd = area;
344         karea = ipd + 1;
345         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
346                 ipd->ipd_key_scratch[i] = karea;
347         return ipd;
348 }
349
350 void iam_ipd_free(struct iam_path_descr *ipd)
351 {
352 }
353
354 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
355                   handle_t *h, struct buffer_head **bh)
356 {
357         /* NB: it can be called by iam_lfix_guess() which is still at
358          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
359          * haven't been intialized yet.
360          * Also, we don't have this for IAM dir.
361          */
362         if (c->ic_root_bh != NULL &&
363             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
364                 get_bh(c->ic_root_bh);
365                 *bh = c->ic_root_bh;
366                 return 0;
367         }
368
369         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
370         if (IS_ERR(*bh))
371                 return PTR_ERR(*bh);
372
373         if (*bh == NULL)
374                 return -EIO;
375
376         return 0;
377 }
378
379 /*
380  * Return pointer to current leaf record. Pointer is valid while corresponding
381  * leaf node is locked and pinned.
382  */
383 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
384 {
385         return iam_leaf_ops(leaf)->rec(leaf);
386 }
387
388 /*
389  * Return pointer to the current leaf key. This function returns pointer to
390  * the key stored in node.
391  *
392  * Caller should assume that returned pointer is only valid while leaf node is
393  * pinned and locked.
394  */
395 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
396 {
397         return iam_leaf_ops(leaf)->key(leaf);
398 }
399
400 static int iam_leaf_key_size(const struct iam_leaf *leaf)
401 {
402         return iam_leaf_ops(leaf)->key_size(leaf);
403 }
404
405 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
406                                       struct iam_ikey *key)
407 {
408         return iam_leaf_ops(leaf)->ikey(leaf, key);
409 }
410
411 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
412                            const struct iam_key *key)
413 {
414         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
415 }
416
417 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
418                           const struct iam_key *key)
419 {
420         return iam_leaf_ops(leaf)->key_eq(leaf, key);
421 }
422
423 #if LDISKFS_INVARIANT_ON
424 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
425
426 static int iam_path_check(struct iam_path *p)
427 {
428         int i;
429         int result;
430         struct iam_frame *f;
431         struct iam_descr *param;
432
433         result = 1;
434         param = iam_path_descr(p);
435         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
436                 f = &p->ip_frames[i];
437                 if (f->bh != NULL) {
438                         result = dx_node_check(p, f);
439                         if (result)
440                                 result = !param->id_ops->id_node_check(p, f);
441                 }
442         }
443         if (result && p->ip_leaf.il_bh != NULL)
444                 result = 1;
445         if (result == 0)
446                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
447
448         return result;
449 }
450 #endif
451
452 static int iam_leaf_load(struct iam_path *path)
453 {
454         iam_ptr_t block;
455         int err;
456         struct iam_container *c;
457         struct buffer_head   *bh;
458         struct iam_leaf      *leaf;
459         struct iam_descr     *descr;
460
461         c     = path->ip_container;
462         leaf  = &path->ip_leaf;
463         descr = iam_path_descr(path);
464         block = path->ip_frame->leaf;
465         if (block == 0) {
466                 /* XXX bug 11027 */
467                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
468                        (long unsigned)path->ip_frame->leaf,
469                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
470                        path->ip_frames[0].bh, path->ip_frames[1].bh,
471                        path->ip_frames[2].bh);
472         }
473         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
474         if (err == 0) {
475                 leaf->il_bh = bh;
476                 leaf->il_curidx = block;
477                 err = iam_leaf_ops(leaf)->init(leaf);
478         }
479         return err;
480 }
481
482 static void iam_unlock_htree(struct iam_container *ic,
483                              struct dynlock_handle *lh)
484 {
485         if (lh != NULL)
486                 dynlock_unlock(&ic->ic_tree_lock, lh);
487 }
488
489
490 static void iam_leaf_unlock(struct iam_leaf *leaf)
491 {
492         if (leaf->il_lock != NULL) {
493                 iam_unlock_htree(iam_leaf_container(leaf),
494                                  leaf->il_lock);
495                 do_corr(schedule());
496                 leaf->il_lock = NULL;
497         }
498 }
499
500 static void iam_leaf_fini(struct iam_leaf *leaf)
501 {
502         if (leaf->il_path != NULL) {
503                 iam_leaf_unlock(leaf);
504                 iam_leaf_ops(leaf)->fini(leaf);
505                 if (leaf->il_bh) {
506                         brelse(leaf->il_bh);
507                         leaf->il_bh = NULL;
508                         leaf->il_curidx = 0;
509                 }
510         }
511 }
512
513 static void iam_leaf_start(struct iam_leaf *folio)
514 {
515         iam_leaf_ops(folio)->start(folio);
516 }
517
518 void iam_leaf_next(struct iam_leaf *folio)
519 {
520         iam_leaf_ops(folio)->next(folio);
521 }
522
523 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
524                              const struct iam_rec *rec)
525 {
526         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
527 }
528
529 static void iam_rec_del(struct iam_leaf *leaf, int shift)
530 {
531         iam_leaf_ops(leaf)->rec_del(leaf, shift);
532 }
533
534 int iam_leaf_at_end(const struct iam_leaf *leaf)
535 {
536         return iam_leaf_ops(leaf)->at_end(leaf);
537 }
538
539 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
540                            iam_ptr_t nr)
541 {
542         iam_leaf_ops(l)->split(l, bh, nr);
543 }
544
545 static inline int iam_leaf_empty(struct iam_leaf *l)
546 {
547         return iam_leaf_ops(l)->leaf_empty(l);
548 }
549
550 int iam_leaf_can_add(const struct iam_leaf *l,
551                      const struct iam_key *k, const struct iam_rec *r)
552 {
553         return iam_leaf_ops(l)->can_add(l, k, r);
554 }
555
556 static int iam_txn_dirty(handle_t *handle,
557                          struct iam_path *path, struct buffer_head *bh)
558 {
559         int result;
560
561         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
562         if (result != 0)
563                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
564         return result;
565 }
566
567 static int iam_txn_add(handle_t *handle,
568                        struct iam_path *path, struct buffer_head *bh)
569 {
570         int result;
571
572         result = ldiskfs_journal_get_write_access(handle, bh);
573         if (result != 0)
574                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
575         return result;
576 }
577
578 /***********************************************************************/
579 /* iterator interface                                                  */
580 /***********************************************************************/
581
582 static enum iam_it_state it_state(const struct iam_iterator *it)
583 {
584         return it->ii_state;
585 }
586
587 /*
588  * Helper function returning scratch key.
589  */
590 static struct iam_container *iam_it_container(const struct iam_iterator *it)
591 {
592         return it->ii_path.ip_container;
593 }
594
595 static inline int it_keycmp(const struct iam_iterator *it,
596                             const struct iam_key *k)
597 {
598         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
599 }
600
601 static inline int it_keyeq(const struct iam_iterator *it,
602                            const struct iam_key *k)
603 {
604         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
605 }
606
607 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
608 {
609         return iam_ikeycmp(it->ii_path.ip_container,
610                            iam_leaf_ikey(&it->ii_path.ip_leaf,
611                                          iam_path_ikey(&it->ii_path, 0)), ik);
612 }
613
614 static inline int it_at_rec(const struct iam_iterator *it)
615 {
616         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
617 }
618
619 static inline int it_before(const struct iam_iterator *it)
620 {
621         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
622 }
623
624 /*
625  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
626  * with exactly the same key as asked is found.
627  */
628 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
629 {
630         int result;
631
632         result = iam_it_get(it, k);
633         if (result > 0)
634                 result = 0;
635         else if (result == 0)
636                 /*
637                  * Return -ENOENT if cursor is located above record with a key
638                  * different from one specified, or in the empty leaf.
639                  *
640                  * XXX returning -ENOENT only works if iam_it_get() never
641                  * returns -ENOENT as a legitimate error.
642                  */
643                 result = -ENOENT;
644         return result;
645 }
646
647 void iam_container_write_lock(struct iam_container *ic)
648 {
649         down_write(&ic->ic_sem);
650 }
651
652 void iam_container_write_unlock(struct iam_container *ic)
653 {
654         up_write(&ic->ic_sem);
655 }
656
657 void iam_container_read_lock(struct iam_container *ic)
658 {
659         down_read(&ic->ic_sem);
660 }
661
662 void iam_container_read_unlock(struct iam_container *ic)
663 {
664         up_read(&ic->ic_sem);
665 }
666
667 /*
668  * Initialize iterator to IAM_IT_DETACHED state.
669  *
670  * postcondition: it_state(it) == IAM_IT_DETACHED
671  */
672 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
673                  struct iam_path_descr *pd)
674 {
675         memset(it, 0, sizeof *it);
676         it->ii_flags  = flags;
677         it->ii_state  = IAM_IT_DETACHED;
678         iam_path_init(&it->ii_path, c, pd);
679         return 0;
680 }
681
682 /*
683  * Finalize iterator and release all resources.
684  *
685  * precondition: it_state(it) == IAM_IT_DETACHED
686  */
687 void iam_it_fini(struct iam_iterator *it)
688 {
689         assert_corr(it_state(it) == IAM_IT_DETACHED);
690         iam_path_fini(&it->ii_path);
691 }
692
693 /*
694  * this locking primitives are used to protect parts
695  * of dir's htree. protection unit is block: leaf or index
696  */
697 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
698                                              unsigned long value,
699                                              enum dynlock_type lt)
700 {
701         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
702 }
703
704 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
705 {
706         struct iam_frame *f;
707
708         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
709                 do_corr(schedule());
710                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
711                 if (*lh == NULL)
712                         return -ENOMEM;
713         }
714         return 0;
715 }
716
717 /*
718  * Fast check for frame consistency.
719  */
720 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
721 {
722         struct iam_container *bag;
723         struct iam_entry *next;
724         struct iam_entry *last;
725         struct iam_entry *entries;
726         struct iam_entry *at;
727
728         bag     = path->ip_container;
729         at      = frame->at;
730         entries = frame->entries;
731         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
732
733         if (unlikely(at > last))
734                 return -EAGAIN;
735
736         if (unlikely(dx_get_block(path, at) != frame->leaf))
737                 return -EAGAIN;
738
739         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
740                                  path->ip_ikey_target) > 0))
741                 return -EAGAIN;
742
743         next = iam_entry_shift(path, at, +1);
744         if (next <= last) {
745                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
746                                          path->ip_ikey_target) <= 0))
747                         return -EAGAIN;
748         }
749         return 0;
750 }
751
752 int dx_index_is_compat(struct iam_path *path)
753 {
754         return iam_path_descr(path) == NULL;
755 }
756
757 /*
758  * dx_find_position
759  *
760  * search position of specified hash in index
761  *
762  */
763
764 static struct iam_entry *iam_find_position(struct iam_path *path,
765                                            struct iam_frame *frame)
766 {
767         int count;
768         struct iam_entry *p;
769         struct iam_entry *q;
770         struct iam_entry *m;
771
772         count = dx_get_count(frame->entries);
773         assert_corr(count && count <= dx_get_limit(frame->entries));
774         p = iam_entry_shift(path, frame->entries,
775                             dx_index_is_compat(path) ? 1 : 2);
776         q = iam_entry_shift(path, frame->entries, count - 1);
777         while (p <= q) {
778                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
779                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
780                                 path->ip_ikey_target) > 0)
781                         q = iam_entry_shift(path, m, -1);
782                 else
783                         p = iam_entry_shift(path, m, +1);
784         }
785         return iam_entry_shift(path, p, -1);
786 }
787
788
789
790 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
791 {
792         return dx_get_block(path, iam_find_position(path, frame));
793 }
794
795 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
796                     const struct iam_ikey *key, iam_ptr_t ptr)
797 {
798         struct iam_entry *entries = frame->entries;
799         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
800         int count = dx_get_count(entries);
801
802         /*
803          * Unfortunately we cannot assert this, as this function is sometimes
804          * called by VFS under i_sem and without pdirops lock.
805          */
806         assert_corr(1 || iam_frame_is_locked(path, frame));
807         assert_corr(count < dx_get_limit(entries));
808         assert_corr(frame->at < iam_entry_shift(path, entries, count));
809         assert_inv(dx_node_check(path, frame));
810
811         memmove(iam_entry_shift(path, new, 1), new,
812                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
813         dx_set_ikey(path, new, key);
814         dx_set_block(path, new, ptr);
815         dx_set_count(entries, count + 1);
816         assert_inv(dx_node_check(path, frame));
817 }
818
819 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
820                          const struct iam_ikey *key, iam_ptr_t ptr)
821 {
822         iam_lock_bh(frame->bh);
823         iam_insert_key(path, frame, key, ptr);
824         iam_unlock_bh(frame->bh);
825 }
826 /*
827  * returns 0 if path was unchanged, -EAGAIN otherwise.
828  */
829 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
830 {
831         int equal;
832
833         iam_lock_bh(frame->bh);
834         equal = iam_check_fast(path, frame) == 0 ||
835                 frame->leaf == iam_find_ptr(path, frame);
836         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
837         iam_unlock_bh(frame->bh);
838
839         return equal ? 0 : -EAGAIN;
840 }
841
842 static int iam_lookup_try(struct iam_path *path)
843 {
844         u32 ptr;
845         int err = 0;
846         int i;
847
848         struct iam_descr *param;
849         struct iam_frame *frame;
850         struct iam_container *c;
851
852         param = iam_path_descr(path);
853         c = path->ip_container;
854
855         ptr = param->id_ops->id_root_ptr(c);
856         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
857              ++frame, ++i) {
858                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
859                                                   &frame->bh);
860                 do_corr(schedule());
861
862                 iam_lock_bh(frame->bh);
863                 /*
864                  * node must be initialized under bh lock because concurrent
865                  * creation procedure may change it and iam_lookup_try() will
866                  * see obsolete tree height. -bzzz
867                  */
868                 if (err != 0)
869                         break;
870
871                 if (LDISKFS_INVARIANT_ON) {
872                         err = param->id_ops->id_node_check(path, frame);
873                         if (err != 0)
874                                 break;
875                 }
876
877                 err = param->id_ops->id_node_load(path, frame);
878                 if (err != 0)
879                         break;
880
881                 assert_inv(dx_node_check(path, frame));
882                 /*
883                  * splitting may change root index block and move hash we're
884                  * looking for into another index block so, we have to check
885                  * this situation and repeat from begining if path got changed
886                  * -bzzz
887                  */
888                 if (i > 0) {
889                         err = iam_check_path(path, frame - 1);
890                         if (err != 0)
891                                 break;
892                 }
893
894                 frame->at = iam_find_position(path, frame);
895                 frame->curidx = ptr;
896                 frame->leaf = ptr = dx_get_block(path, frame->at);
897
898                 iam_unlock_bh(frame->bh);
899                 do_corr(schedule());
900         }
901         if (err != 0)
902                 iam_unlock_bh(frame->bh);
903         path->ip_frame = --frame;
904         return err;
905 }
906
907 static int __iam_path_lookup(struct iam_path *path)
908 {
909         int err;
910         int i;
911
912         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
913                 assert(path->ip_frames[i].bh == NULL);
914
915         do {
916                 err = iam_lookup_try(path);
917                 do_corr(schedule());
918                 if (err != 0)
919                         iam_path_fini(path);
920         } while (err == -EAGAIN);
921
922         return err;
923 }
924
925 /*
926  * returns 0 if path was unchanged, -EAGAIN otherwise.
927  */
928 static int iam_check_full_path(struct iam_path *path, int search)
929 {
930         struct iam_frame *bottom;
931         struct iam_frame *scan;
932         int i;
933         int result;
934
935         do_corr(schedule());
936
937         for (bottom = path->ip_frames, i = 0;
938              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
939                 ; /* find last filled in frame */
940         }
941
942         /*
943          * Lock frames, bottom to top.
944          */
945         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
946                 iam_lock_bh(scan->bh);
947         /*
948          * Check them top to bottom.
949          */
950         result = 0;
951         for (scan = path->ip_frames; scan < bottom; ++scan) {
952                 struct iam_entry *pos;
953
954                 if (search) {
955                         if (iam_check_fast(path, scan) == 0)
956                                 continue;
957
958                         pos = iam_find_position(path, scan);
959                         if (scan->leaf != dx_get_block(path, pos)) {
960                                 result = -EAGAIN;
961                                 break;
962                         }
963                         scan->at = pos;
964                 } else {
965                         pos = iam_entry_shift(path, scan->entries,
966                                               dx_get_count(scan->entries) - 1);
967                         if (scan->at > pos ||
968                             scan->leaf != dx_get_block(path, scan->at)) {
969                                 result = -EAGAIN;
970                                 break;
971                         }
972                 }
973         }
974
975         /*
976          * Unlock top to bottom.
977          */
978         for (scan = path->ip_frames; scan < bottom; ++scan)
979                 iam_unlock_bh(scan->bh);
980         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
981         do_corr(schedule());
982
983         return result;
984 }
985
986
987 /*
988  * Performs path lookup and returns with found leaf (if any) locked by htree
989  * lock.
990  */
991 static int iam_lookup_lock(struct iam_path *path,
992                            struct dynlock_handle **dl, enum dynlock_type lt)
993 {
994         int result;
995
996         while ((result = __iam_path_lookup(path)) == 0) {
997                 do_corr(schedule());
998                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
999                                      lt);
1000                 if (*dl == NULL) {
1001                         iam_path_fini(path);
1002                         result = -ENOMEM;
1003                         break;
1004                 }
1005                 do_corr(schedule());
1006                 /*
1007                  * while locking leaf we just found may get split so we need
1008                  * to check this -bzzz
1009                  */
1010                 if (iam_check_full_path(path, 1) == 0)
1011                         break;
1012                 iam_unlock_htree(path->ip_container, *dl);
1013                 *dl = NULL;
1014                 iam_path_fini(path);
1015         }
1016         return result;
1017 }
1018 /*
1019  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1020  * node.
1021  */
1022 static int iam_path_lookup(struct iam_path *path, int index)
1023 {
1024         struct iam_leaf  *leaf;
1025         int result;
1026
1027         leaf = &path->ip_leaf;
1028         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1029         assert_inv(iam_path_check(path));
1030         do_corr(schedule());
1031         if (result == 0) {
1032                 result = iam_leaf_load(path);
1033                 if (result == 0) {
1034                         do_corr(schedule());
1035                         if (index)
1036                                 result = iam_leaf_ops(leaf)->
1037                                         ilookup(leaf, path->ip_ikey_target);
1038                         else
1039                                 result = iam_leaf_ops(leaf)->
1040                                         lookup(leaf, path->ip_key_target);
1041                         do_corr(schedule());
1042                 }
1043                 if (result < 0)
1044                         iam_leaf_unlock(leaf);
1045         }
1046         return result;
1047 }
1048
1049 /*
1050  * Common part of iam_it_{i,}get().
1051  */
1052 static int __iam_it_get(struct iam_iterator *it, int index)
1053 {
1054         int result;
1055         assert_corr(it_state(it) == IAM_IT_DETACHED);
1056
1057         result = iam_path_lookup(&it->ii_path, index);
1058         if (result >= 0) {
1059                 int collision;
1060
1061                 collision = result & IAM_LOOKUP_LAST;
1062                 switch (result & ~IAM_LOOKUP_LAST) {
1063                 case IAM_LOOKUP_EXACT:
1064                         result = +1;
1065                         it->ii_state = IAM_IT_ATTACHED;
1066                         break;
1067                 case IAM_LOOKUP_OK:
1068                         result = 0;
1069                         it->ii_state = IAM_IT_ATTACHED;
1070                         break;
1071                 case IAM_LOOKUP_BEFORE:
1072                 case IAM_LOOKUP_EMPTY:
1073                         result = 0;
1074                         it->ii_state = IAM_IT_SKEWED;
1075                         break;
1076                 default:
1077                         assert(0);
1078                 }
1079                 result |= collision;
1080         }
1081         /*
1082          * See iam_it_get_exact() for explanation.
1083          */
1084         assert_corr(result != -ENOENT);
1085         return result;
1086 }
1087
1088 /*
1089  * Correct hash, but not the same key was found, iterate through hash
1090  * collision chain, looking for correct record.
1091  */
1092 static int iam_it_collision(struct iam_iterator *it)
1093 {
1094         int result;
1095
1096         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1097
1098         while ((result = iam_it_next(it)) == 0) {
1099                 do_corr(schedule());
1100                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1101                         return -ENOENT;
1102                 if (it_keyeq(it, it->ii_path.ip_key_target))
1103                         return 0;
1104         }
1105         return result;
1106 }
1107
1108 /*
1109  * Attach iterator. After successful completion, @it points to record with
1110  * least key not larger than @k.
1111  *
1112  * Return value: 0: positioned on existing record,
1113  *             +ve: exact position found,
1114  *             -ve: error.
1115  *
1116  * precondition:  it_state(it) == IAM_IT_DETACHED
1117  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1118  *                     it_keycmp(it, k) <= 0)
1119  */
1120 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1121 {
1122         int result;
1123         assert_corr(it_state(it) == IAM_IT_DETACHED);
1124
1125         it->ii_path.ip_ikey_target = NULL;
1126         it->ii_path.ip_key_target  = k;
1127
1128         result = __iam_it_get(it, 0);
1129
1130         if (result == IAM_LOOKUP_LAST) {
1131                 result = iam_it_collision(it);
1132                 if (result != 0) {
1133                         iam_it_put(it);
1134                         iam_it_fini(it);
1135                         result = __iam_it_get(it, 0);
1136                 } else
1137                         result = +1;
1138         }
1139         if (result > 0)
1140                 result &= ~IAM_LOOKUP_LAST;
1141
1142         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1143         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1144                          it_keycmp(it, k) <= 0));
1145         return result;
1146 }
1147
1148 /*
1149  * Attach iterator by index key.
1150  */
1151 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1152 {
1153         assert_corr(it_state(it) == IAM_IT_DETACHED);
1154
1155         it->ii_path.ip_ikey_target = k;
1156         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1157 }
1158
1159 /*
1160  * Attach iterator, and assure it points to the record (not skewed).
1161  *
1162  * Return value: 0: positioned on existing record,
1163  *             +ve: exact position found,
1164  *             -ve: error.
1165  *
1166  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1167  *                !(it->ii_flags&IAM_IT_WRITE)
1168  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1169  */
1170 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1171 {
1172         int result;
1173         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1174                     !(it->ii_flags&IAM_IT_WRITE));
1175         result = iam_it_get(it, k);
1176         if (result == 0) {
1177                 if (it_state(it) != IAM_IT_ATTACHED) {
1178                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1179                         result = iam_it_next(it);
1180                 }
1181         }
1182         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1183         return result;
1184 }
1185
1186 /*
1187  * Duplicates iterator.
1188  *
1189  * postcondition: it_state(dst) == it_state(src) &&
1190  *                iam_it_container(dst) == iam_it_container(src) &&
1191  *                dst->ii_flags = src->ii_flags &&
1192  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1193  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1194  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1195  */
1196 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1197 {
1198         dst->ii_flags     = src->ii_flags;
1199         dst->ii_state     = src->ii_state;
1200         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1201         /*
1202          * XXX: duplicate lock.
1203          */
1204         assert_corr(it_state(dst) == it_state(src));
1205         assert_corr(iam_it_container(dst) == iam_it_container(src));
1206         assert_corr(dst->ii_flags = src->ii_flags);
1207         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1208                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1209                     iam_it_key_get(dst) == iam_it_key_get(src)));
1210
1211 }
1212
1213 /*
1214  * Detach iterator. Does nothing it detached state.
1215  *
1216  * postcondition: it_state(it) == IAM_IT_DETACHED
1217  */
1218 void iam_it_put(struct iam_iterator *it)
1219 {
1220         if (it->ii_state != IAM_IT_DETACHED) {
1221                 it->ii_state = IAM_IT_DETACHED;
1222                 iam_leaf_fini(&it->ii_path.ip_leaf);
1223         }
1224 }
1225
1226 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1227                                         struct iam_ikey *ikey);
1228
1229
1230 /*
1231  * This function increments the frame pointer to search the next leaf
1232  * block, and reads in the necessary intervening nodes if the search
1233  * should be necessary.  Whether or not the search is necessary is
1234  * controlled by the hash parameter.  If the hash value is even, then
1235  * the search is only continued if the next block starts with that
1236  * hash value.  This is used if we are searching for a specific file.
1237  *
1238  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1239  *
1240  * This function returns 1 if the caller should continue to search,
1241  * or 0 if it should not.  If there is an error reading one of the
1242  * index blocks, it will a negative error code.
1243  *
1244  * If start_hash is non-null, it will be filled in with the starting
1245  * hash of the next page.
1246  */
1247 static int iam_htree_advance(struct inode *dir, __u32 hash,
1248                               struct iam_path *path, __u32 *start_hash,
1249                               int compat)
1250 {
1251         struct iam_frame *p;
1252         struct buffer_head *bh;
1253         int err, num_frames = 0;
1254         __u32 bhash;
1255
1256         p = path->ip_frame;
1257         /*
1258          * Find the next leaf page by incrementing the frame pointer.
1259          * If we run out of entries in the interior node, loop around and
1260          * increment pointer in the parent node.  When we break out of
1261          * this loop, num_frames indicates the number of interior
1262          * nodes need to be read.
1263          */
1264         while (1) {
1265                 do_corr(schedule());
1266                 iam_lock_bh(p->bh);
1267                 if (p->at_shifted)
1268                         p->at_shifted = 0;
1269                 else
1270                         p->at = iam_entry_shift(path, p->at, +1);
1271                 if (p->at < iam_entry_shift(path, p->entries,
1272                                             dx_get_count(p->entries))) {
1273                         p->leaf = dx_get_block(path, p->at);
1274                         iam_unlock_bh(p->bh);
1275                         break;
1276                 }
1277                 iam_unlock_bh(p->bh);
1278                 if (p == path->ip_frames)
1279                         return 0;
1280                 num_frames++;
1281                 --p;
1282         }
1283
1284         if (compat) {
1285                 /*
1286                  * Htree hash magic.
1287                  */
1288         /*
1289          * If the hash is 1, then continue only if the next page has a
1290          * continuation hash of any value.  This is used for readdir
1291          * handling.  Otherwise, check to see if the hash matches the
1292          * desired contiuation hash.  If it doesn't, return since
1293          * there's no point to read in the successive index pages.
1294          */
1295                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1296         if (start_hash)
1297                 *start_hash = bhash;
1298         if ((hash & 1) == 0) {
1299                 if ((bhash & ~1) != hash)
1300                         return 0;
1301         }
1302         }
1303         /*
1304          * If the hash is HASH_NB_ALWAYS, we always go to the next
1305          * block so no check is necessary
1306          */
1307         while (num_frames--) {
1308                 iam_ptr_t idx;
1309
1310                 do_corr(schedule());
1311                 iam_lock_bh(p->bh);
1312                 idx = p->leaf = dx_get_block(path, p->at);
1313                 iam_unlock_bh(p->bh);
1314                 err = iam_path_descr(path)->id_ops->
1315                         id_node_read(path->ip_container, idx, NULL, &bh);
1316                 if (err != 0)
1317                         return err; /* Failure */
1318                 ++p;
1319                 brelse(p->bh);
1320                 assert_corr(p->bh != bh);
1321                 p->bh = bh;
1322                 p->entries = dx_node_get_entries(path, p);
1323                 p->at = iam_entry_shift(path, p->entries, !compat);
1324                 assert_corr(p->curidx != idx);
1325                 p->curidx = idx;
1326                 iam_lock_bh(p->bh);
1327                 assert_corr(p->leaf != dx_get_block(path, p->at));
1328                 p->leaf = dx_get_block(path, p->at);
1329                 iam_unlock_bh(p->bh);
1330                 assert_inv(dx_node_check(path, p));
1331         }
1332         return 1;
1333 }
1334
1335
1336 static inline int iam_index_advance(struct iam_path *path)
1337 {
1338         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1339 }
1340
1341 static void iam_unlock_array(struct iam_container *ic,
1342                              struct dynlock_handle **lh)
1343 {
1344         int i;
1345
1346         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1347                 if (*lh != NULL) {
1348                         iam_unlock_htree(ic, *lh);
1349                         *lh = NULL;
1350                 }
1351         }
1352 }
1353 /*
1354  * Advance index part of @path to point to the next leaf. Returns 1 on
1355  * success, 0, when end of container was reached. Leaf node is locked.
1356  */
1357 int iam_index_next(struct iam_container *c, struct iam_path *path)
1358 {
1359         iam_ptr_t cursor;
1360         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1361         int result;
1362
1363         /*
1364          * Locking for iam_index_next()... is to be described.
1365          */
1366
1367         cursor = path->ip_frame->leaf;
1368
1369         while (1) {
1370                 result = iam_index_lock(path, lh);
1371                 do_corr(schedule());
1372                 if (result < 0)
1373                         break;
1374
1375                 result = iam_check_full_path(path, 0);
1376                 if (result == 0 && cursor == path->ip_frame->leaf) {
1377                         result = iam_index_advance(path);
1378
1379                         assert_corr(result == 0 ||
1380                                     cursor != path->ip_frame->leaf);
1381                         break;
1382                 }
1383                 do {
1384                         iam_unlock_array(c, lh);
1385
1386                         iam_path_release(path);
1387                         do_corr(schedule());
1388
1389                         result = __iam_path_lookup(path);
1390                         if (result < 0)
1391                                 break;
1392
1393                         while (path->ip_frame->leaf != cursor) {
1394                                 do_corr(schedule());
1395
1396                                 result = iam_index_lock(path, lh);
1397                                 do_corr(schedule());
1398                                 if (result < 0)
1399                                         break;
1400
1401                                 result = iam_check_full_path(path, 0);
1402                                 if (result != 0)
1403                                         break;
1404
1405                                 result = iam_index_advance(path);
1406                                 if (result == 0) {
1407                                         CERROR("cannot find cursor : %u\n",
1408                                                 cursor);
1409                                         result = -EIO;
1410                                 }
1411                                 if (result < 0)
1412                                         break;
1413                                 result = iam_check_full_path(path, 0);
1414                                 if (result != 0)
1415                                         break;
1416                                 iam_unlock_array(c, lh);
1417                         }
1418                 } while (result == -EAGAIN);
1419                 if (result < 0)
1420                         break;
1421         }
1422         iam_unlock_array(c, lh);
1423         return result;
1424 }
1425
1426 /*
1427  * Move iterator one record right.
1428  *
1429  * Return value: 0: success,
1430  *              +1: end of container reached
1431  *             -ve: error
1432  *
1433  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1434  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1435  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1436  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1437  */
1438 int iam_it_next(struct iam_iterator *it)
1439 {
1440         int result;
1441         struct iam_path      *path;
1442         struct iam_leaf      *leaf;
1443         do_corr(struct iam_ikey *ik_orig);
1444
1445         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1446         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1447                     it_state(it) == IAM_IT_SKEWED);
1448
1449         path = &it->ii_path;
1450         leaf = &path->ip_leaf;
1451
1452         assert_corr(iam_leaf_is_locked(leaf));
1453
1454         result = 0;
1455         do_corr(ik_orig = it_at_rec(it) ?
1456                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1457         if (it_before(it)) {
1458                 assert_corr(!iam_leaf_at_end(leaf));
1459                 it->ii_state = IAM_IT_ATTACHED;
1460         } else {
1461                 if (!iam_leaf_at_end(leaf))
1462                         /* advance within leaf node */
1463                         iam_leaf_next(leaf);
1464                 /*
1465                  * multiple iterations may be necessary due to empty leaves.
1466                  */
1467                 while (result == 0 && iam_leaf_at_end(leaf)) {
1468                         do_corr(schedule());
1469                         /* advance index portion of the path */
1470                         result = iam_index_next(iam_it_container(it), path);
1471                         assert_corr(iam_leaf_is_locked(leaf));
1472                         if (result == 1) {
1473                                 struct dynlock_handle *lh;
1474                                 lh = iam_lock_htree(iam_it_container(it),
1475                                                     path->ip_frame->leaf,
1476                                                     DLT_WRITE);
1477                                 if (lh != NULL) {
1478                                         iam_leaf_fini(leaf);
1479                                         leaf->il_lock = lh;
1480                                         result = iam_leaf_load(path);
1481                                         if (result == 0)
1482                                                 iam_leaf_start(leaf);
1483                                 } else
1484                                         result = -ENOMEM;
1485                         } else if (result == 0)
1486                                 /* end of container reached */
1487                                 result = +1;
1488                         if (result != 0)
1489                                 iam_it_put(it);
1490                 }
1491                 if (result == 0)
1492                         it->ii_state = IAM_IT_ATTACHED;
1493         }
1494         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1495         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1496         assert_corr(ergo(result == 0 && ik_orig != NULL,
1497                          it_ikeycmp(it, ik_orig) >= 0));
1498         return result;
1499 }
1500
1501 /*
1502  * Return pointer to the record under iterator.
1503  *
1504  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1505  * postcondition: it_state(it) == IAM_IT_ATTACHED
1506  */
1507 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1508 {
1509         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1510         assert_corr(it_at_rec(it));
1511         return iam_leaf_rec(&it->ii_path.ip_leaf);
1512 }
1513
1514 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1515 {
1516         struct iam_leaf *folio;
1517
1518         folio = &it->ii_path.ip_leaf;
1519         iam_leaf_ops(folio)->rec_set(folio, r);
1520 }
1521
1522 /*
1523  * Replace contents of record under iterator.
1524  *
1525  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1526  *                it->ii_flags&IAM_IT_WRITE
1527  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1528  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1529  */
1530 int iam_it_rec_set(handle_t *h,
1531                    struct iam_iterator *it, const struct iam_rec *r)
1532 {
1533         int result;
1534         struct iam_path *path;
1535         struct buffer_head *bh;
1536
1537         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1538                     it->ii_flags&IAM_IT_WRITE);
1539         assert_corr(it_at_rec(it));
1540
1541         path = &it->ii_path;
1542         bh   = path->ip_leaf.il_bh;
1543         result = iam_txn_add(h, path, bh);
1544         if (result == 0) {
1545                 iam_it_reccpy(it, r);
1546                 result = iam_txn_dirty(h, path, bh);
1547         }
1548         return result;
1549 }
1550
1551 /*
1552  * Return pointer to the index key under iterator.
1553  *
1554  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1555  *                it_state(it) == IAM_IT_SKEWED
1556  */
1557 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1558                                         struct iam_ikey *ikey)
1559 {
1560         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1561                     it_state(it) == IAM_IT_SKEWED);
1562         assert_corr(it_at_rec(it));
1563         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1564 }
1565
1566 /*
1567  * Return pointer to the key under iterator.
1568  *
1569  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1570  *                it_state(it) == IAM_IT_SKEWED
1571  */
1572 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1573 {
1574         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1575                     it_state(it) == IAM_IT_SKEWED);
1576         assert_corr(it_at_rec(it));
1577         return iam_leaf_key(&it->ii_path.ip_leaf);
1578 }
1579
1580 /*
1581  * Return size of key under iterator (in bytes)
1582  *
1583  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1584  *                it_state(it) == IAM_IT_SKEWED
1585  */
1586 int iam_it_key_size(const struct iam_iterator *it)
1587 {
1588         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1589                     it_state(it) == IAM_IT_SKEWED);
1590         assert_corr(it_at_rec(it));
1591         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1592 }
1593
1594 static struct buffer_head *
1595 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1596 {
1597         struct inode *inode = c->ic_object;
1598         struct buffer_head *bh = NULL;
1599         struct iam_idle_head *head;
1600         struct buffer_head *idle;
1601         __u32 *idle_blocks;
1602         __u16 count;
1603
1604         if (c->ic_idle_bh == NULL)
1605                 goto newblock;
1606
1607         mutex_lock(&c->ic_idle_mutex);
1608         if (unlikely(c->ic_idle_bh == NULL)) {
1609                 mutex_unlock(&c->ic_idle_mutex);
1610                 goto newblock;
1611         }
1612
1613         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1614         count = le16_to_cpu(head->iih_count);
1615         if (count > 0) {
1616                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1617                 if (*e != 0)
1618                         goto fail;
1619
1620                 --count;
1621                 *b = le32_to_cpu(head->iih_blks[count]);
1622                 head->iih_count = cpu_to_le16(count);
1623                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1624                 if (*e != 0)
1625                         goto fail;
1626
1627                 mutex_unlock(&c->ic_idle_mutex);
1628                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1629                 if (IS_ERR_OR_NULL(bh)) {
1630                         if (IS_ERR(bh))
1631                                 *e = PTR_ERR(bh);
1632                         else
1633                                 *e = -EIO;
1634                         return NULL;
1635                 }
1636                 goto got;
1637         }
1638
1639         /* The block itself which contains the iam_idle_head is
1640          * also an idle block, and can be used as the new node. */
1641         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1642                                 c->ic_descr->id_root_gap +
1643                                 sizeof(struct dx_countlimit));
1644         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1645         if (*e != 0)
1646                 goto fail;
1647
1648         *b = le32_to_cpu(*idle_blocks);
1649         iam_lock_bh(c->ic_root_bh);
1650         *idle_blocks = head->iih_next;
1651         iam_unlock_bh(c->ic_root_bh);
1652         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1653         if (*e != 0) {
1654                 iam_lock_bh(c->ic_root_bh);
1655                 *idle_blocks = cpu_to_le32(*b);
1656                 iam_unlock_bh(c->ic_root_bh);
1657                 goto fail;
1658         }
1659
1660         bh = c->ic_idle_bh;
1661         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1662         if (idle != NULL && IS_ERR(idle)) {
1663                 *e = PTR_ERR(idle);
1664                 c->ic_idle_bh = NULL;
1665                 brelse(bh);
1666                 goto fail;
1667         }
1668
1669         c->ic_idle_bh = idle;
1670         mutex_unlock(&c->ic_idle_mutex);
1671
1672 got:
1673         /* get write access for the found buffer head */
1674         *e = ldiskfs_journal_get_write_access(h, bh);
1675         if (*e != 0) {
1676                 brelse(bh);
1677                 bh = NULL;
1678                 ldiskfs_std_error(inode->i_sb, *e);
1679         } else {
1680                 /* Clear the reused node as new node does. */
1681                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1682                 set_buffer_uptodate(bh);
1683         }
1684         return bh;
1685
1686 newblock:
1687         bh = osd_ldiskfs_append(h, inode, b);
1688         if (IS_ERR(bh)) {
1689                 *e = PTR_ERR(bh);
1690                 bh = NULL;
1691         }
1692
1693         return bh;
1694
1695 fail:
1696         mutex_unlock(&c->ic_idle_mutex);
1697         ldiskfs_std_error(inode->i_sb, *e);
1698         return NULL;
1699 }
1700
1701 /*
1702  * Insertion of new record. Interaction with jbd during non-trivial case (when
1703  * split happens) is as following:
1704  *
1705  *  - new leaf node is involved into transaction by iam_new_node();
1706  *
1707  *  - old leaf node is involved into transaction by iam_add_rec();
1708  *
1709  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1710  *
1711  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1712  *  iam_new_leaf();
1713  *
1714  *  - split index nodes are involved into transaction and marked dirty by
1715  *  split_index_node().
1716  *
1717  *  - "safe" index node, which is no split, but where new pointer is inserted
1718  *  is involved into transaction and marked dirty by split_index_node().
1719  *
1720  *  - index node where pointer to new leaf is inserted is involved into
1721  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1722  *
1723  *  - inode is marked dirty by iam_add_rec().
1724  *
1725  */
1726
1727 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1728 {
1729         int err;
1730         iam_ptr_t blknr;
1731         struct buffer_head   *new_leaf;
1732         struct buffer_head   *old_leaf;
1733         struct iam_container *c;
1734         struct inode         *obj;
1735         struct iam_path      *path;
1736
1737         c = iam_leaf_container(leaf);
1738         path = leaf->il_path;
1739
1740         obj = c->ic_object;
1741         new_leaf = iam_new_node(handle, c, &blknr, &err);
1742         do_corr(schedule());
1743         if (new_leaf != NULL) {
1744                 struct dynlock_handle *lh;
1745
1746                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1747                 do_corr(schedule());
1748                 if (lh != NULL) {
1749                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1750                         do_corr(schedule());
1751                         old_leaf = leaf->il_bh;
1752                         iam_leaf_split(leaf, &new_leaf, blknr);
1753                         if (old_leaf != leaf->il_bh) {
1754                                 /*
1755                                  * Switched to the new leaf.
1756                                  */
1757                                 iam_leaf_unlock(leaf);
1758                                 leaf->il_lock = lh;
1759                                 path->ip_frame->leaf = blknr;
1760                         } else
1761                                 iam_unlock_htree(path->ip_container, lh);
1762                         do_corr(schedule());
1763                         err = iam_txn_dirty(handle, path, new_leaf);
1764                         if (err == 0)
1765                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1766                         do_corr(schedule());
1767                 } else
1768                         err = -ENOMEM;
1769                 brelse(new_leaf);
1770         }
1771         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1772         return err;
1773 }
1774
1775 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1776 {
1777         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1778 }
1779
1780 static int iam_shift_entries(struct iam_path *path,
1781                          struct iam_frame *frame, unsigned count,
1782                          struct iam_entry *entries, struct iam_entry *entries2,
1783                          u32 newblock)
1784 {
1785         unsigned count1;
1786         unsigned count2;
1787         int delta;
1788
1789         struct iam_frame *parent = frame - 1;
1790         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1791
1792         delta = dx_index_is_compat(path) ? 0 : +1;
1793
1794         count1 = count/2 + delta;
1795         count2 = count - count1;
1796         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1797
1798         dxtrace(printk("Split index %d/%d\n", count1, count2));
1799
1800         memcpy((char *) iam_entry_shift(path, entries2, delta),
1801                (char *) iam_entry_shift(path, entries, count1),
1802                count2 * iam_entry_size(path));
1803
1804         dx_set_count(entries2, count2 + delta);
1805         dx_set_limit(entries2, dx_node_limit(path));
1806
1807         /*
1808          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1809          * level index in root index, then we insert new index here and set
1810          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1811          * index w/o hash it looks for. the solution is to check root index
1812          * after we locked just founded 2nd level index -bzzz
1813          */
1814         iam_insert_key_lock(path, parent, pivot, newblock);
1815
1816         /*
1817          * now old and new 2nd level index blocks contain all pointers, so
1818          * dx_probe() may find it in the both.  it's OK -bzzz
1819          */
1820         iam_lock_bh(frame->bh);
1821         dx_set_count(entries, count1);
1822         iam_unlock_bh(frame->bh);
1823
1824         /*
1825          * now old 2nd level index block points to first half of leafs. it's
1826          * importand that dx_probe() must check root index block for changes
1827          * under dx_lock_bh(frame->bh) -bzzz
1828          */
1829
1830         return count1;
1831 }
1832
1833
1834 int split_index_node(handle_t *handle, struct iam_path *path,
1835                      struct dynlock_handle **lh)
1836 {
1837
1838         struct iam_entry *entries;   /* old block contents */
1839         struct iam_entry *entries2;  /* new block contents */
1840         struct iam_frame *frame, *safe;
1841         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1842         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1843         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1844         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1845         struct inode *dir = iam_path_obj(path);
1846         struct iam_descr *descr;
1847         int nr_splet;
1848         int i, err;
1849
1850         descr = iam_path_descr(path);
1851         /*
1852          * Algorithm below depends on this.
1853          */
1854         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1855
1856         frame = path->ip_frame;
1857         entries = frame->entries;
1858
1859         /*
1860          * Tall-tree handling: we might have to split multiple index blocks
1861          * all the way up to tree root. Tricky point here is error handling:
1862          * to avoid complicated undo/rollback we
1863          *
1864          *   - first allocate all necessary blocks
1865          *
1866          *   - insert pointers into them atomically.
1867          */
1868
1869         /*
1870          * Locking: leaf is already locked. htree-locks are acquired on all
1871          * index nodes that require split bottom-to-top, on the "safe" node,
1872          * and on all new nodes
1873          */
1874
1875         dxtrace(printk("using %u of %u node entries\n",
1876                        dx_get_count(entries), dx_get_limit(entries)));
1877
1878         /* What levels need split? */
1879         for (nr_splet = 0; frame >= path->ip_frames &&
1880              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1881              --frame, ++nr_splet) {
1882                 do_corr(schedule());
1883                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1884                         /*
1885                         CWARN(dir->i_sb, __FUNCTION__,
1886                                      "Directory index full!\n");
1887                                      */
1888                         err = -ENOSPC;
1889                         goto cleanup;
1890                 }
1891         }
1892
1893         safe = frame;
1894
1895         /*
1896          * Lock all nodes, bottom to top.
1897          */
1898         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1899                 do_corr(schedule());
1900                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1901                                          DLT_WRITE);
1902                 if (lock[i] == NULL) {
1903                         err = -ENOMEM;
1904                         goto cleanup;
1905                 }
1906         }
1907
1908         /*
1909          * Check for concurrent index modification.
1910          */
1911         err = iam_check_full_path(path, 1);
1912         if (err)
1913                 goto cleanup;
1914         /*
1915          * And check that the same number of nodes is to be split.
1916          */
1917         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1918              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1919              --frame, ++i) {
1920                 ;
1921         }
1922         if (i != nr_splet) {
1923                 err = -EAGAIN;
1924                 goto cleanup;
1925         }
1926
1927         /* Go back down, allocating blocks, locking them, and adding into
1928          * transaction... */
1929         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1930                 bh_new[i] = iam_new_node(handle, path->ip_container,
1931                                          &newblock[i], &err);
1932                 do_corr(schedule());
1933                 if (!bh_new[i] ||
1934                     descr->id_ops->id_node_init(path->ip_container,
1935                                                 bh_new[i], 0) != 0)
1936                         goto cleanup;
1937                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1938                                              DLT_WRITE);
1939                 if (new_lock[i] == NULL) {
1940                         err = -ENOMEM;
1941                         goto cleanup;
1942                 }
1943                 do_corr(schedule());
1944                 BUFFER_TRACE(frame->bh, "get_write_access");
1945                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1946                 if (err)
1947                         goto journal_error;
1948         }
1949         /* Add "safe" node to transaction too */
1950         if (safe + 1 != path->ip_frames) {
1951                 do_corr(schedule());
1952                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1953                 if (err)
1954                         goto journal_error;
1955         }
1956
1957         /* Go through nodes once more, inserting pointers */
1958         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1959                 unsigned count;
1960                 int idx;
1961                 struct buffer_head *bh2;
1962                 struct buffer_head *bh;
1963
1964                 entries = frame->entries;
1965                 count = dx_get_count(entries);
1966                 idx = iam_entry_diff(path, frame->at, entries);
1967
1968                 bh2 = bh_new[i];
1969                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1970
1971                 bh = frame->bh;
1972                 if (frame == path->ip_frames) {
1973                         /* splitting root node. Tricky point:
1974                          *
1975                          * In the "normal" B-tree we'd split root *and* add
1976                          * new root to the tree with pointers to the old root
1977                          * and its sibling (thus introducing two new nodes).
1978                          *
1979                          * In htree it's enough to add one node, because
1980                          * capacity of the root node is smaller than that of
1981                          * non-root one.
1982                          */
1983                         struct iam_frame *frames;
1984                         struct iam_entry *next;
1985
1986                         assert_corr(i == 0);
1987
1988                         do_corr(schedule());
1989
1990                         frames = path->ip_frames;
1991                         memcpy((char *) entries2, (char *) entries,
1992                                count * iam_entry_size(path));
1993                         dx_set_limit(entries2, dx_node_limit(path));
1994
1995                         /* Set up root */
1996                           iam_lock_bh(frame->bh);
1997                         next = descr->id_ops->id_root_inc(path->ip_container,
1998                                                           path, frame);
1999                         dx_set_block(path, next, newblock[0]);
2000                           iam_unlock_bh(frame->bh);
2001
2002                         do_corr(schedule());
2003                         /* Shift frames in the path */
2004                         memmove(frames + 2, frames + 1,
2005                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2006                         /* Add new access path frame */
2007                         frames[1].at = iam_entry_shift(path, entries2, idx);
2008                         frames[1].entries = entries = entries2;
2009                         frames[1].bh = bh2;
2010                         assert_inv(dx_node_check(path, frame));
2011                         ++ path->ip_frame;
2012                         ++ frame;
2013                         assert_inv(dx_node_check(path, frame));
2014                         bh_new[0] = NULL; /* buffer head is "consumed" */
2015                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2016                         if (err)
2017                                 goto journal_error;
2018                         do_corr(schedule());
2019                 } else {
2020                         /* splitting non-root index node. */
2021                         struct iam_frame *parent = frame - 1;
2022
2023                         do_corr(schedule());
2024                         count = iam_shift_entries(path, frame, count,
2025                                               entries, entries2, newblock[i]);
2026                         /* Which index block gets the new entry? */
2027                         if (idx >= count) {
2028                                 int d = dx_index_is_compat(path) ? 0 : +1;
2029
2030                                 frame->at = iam_entry_shift(path, entries2,
2031                                                             idx - count + d);
2032                                 frame->entries = entries = entries2;
2033                                 frame->curidx = newblock[i];
2034                                 swap(frame->bh, bh2);
2035                                 assert_corr(lock[i + 1] != NULL);
2036                                 assert_corr(new_lock[i] != NULL);
2037                                 swap(lock[i + 1], new_lock[i]);
2038                                 bh_new[i] = bh2;
2039                                 parent->at = iam_entry_shift(path,
2040                                                              parent->at, +1);
2041                         }
2042                         assert_inv(dx_node_check(path, frame));
2043                         assert_inv(dx_node_check(path, parent));
2044                         dxtrace(dx_show_index ("node", frame->entries));
2045                         dxtrace(dx_show_index ("node",
2046                                ((struct dx_node *) bh2->b_data)->entries));
2047                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2048                         if (err)
2049                                 goto journal_error;
2050                         do_corr(schedule());
2051                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2052                                                             parent->bh);
2053                         if (err)
2054                                 goto journal_error;
2055                 }
2056                 do_corr(schedule());
2057                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2058                 if (err)
2059                         goto journal_error;
2060         }
2061                 /*
2062                  * This function was called to make insertion of new leaf
2063                  * possible. Check that it fulfilled its obligations.
2064                  */
2065                 assert_corr(dx_get_count(path->ip_frame->entries) <
2066                             dx_get_limit(path->ip_frame->entries));
2067         assert_corr(lock[nr_splet] != NULL);
2068         *lh = lock[nr_splet];
2069         lock[nr_splet] = NULL;
2070         if (nr_splet > 0) {
2071                 /*
2072                  * Log ->i_size modification.
2073                  */
2074                 err = ldiskfs_mark_inode_dirty(handle, dir);
2075                 if (err)
2076                         goto journal_error;
2077         }
2078         goto cleanup;
2079 journal_error:
2080         ldiskfs_std_error(dir->i_sb, err);
2081
2082 cleanup:
2083         iam_unlock_array(path->ip_container, lock);
2084         iam_unlock_array(path->ip_container, new_lock);
2085
2086         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2087
2088         do_corr(schedule());
2089         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2090                 if (bh_new[i] != NULL)
2091                         brelse(bh_new[i]);
2092         }
2093         return err;
2094 }
2095
2096 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2097                        struct iam_path *path,
2098                        const struct iam_key *k, const struct iam_rec *r)
2099 {
2100         int err;
2101         struct iam_leaf *leaf;
2102
2103         leaf = &path->ip_leaf;
2104         assert_inv(iam_path_check(path));
2105         err = iam_txn_add(handle, path, leaf->il_bh);
2106         if (err == 0) {
2107                 do_corr(schedule());
2108                 if (!iam_leaf_can_add(leaf, k, r)) {
2109                         struct dynlock_handle *lh = NULL;
2110
2111                         do {
2112                                 assert_corr(lh == NULL);
2113                                 do_corr(schedule());
2114                                 err = split_index_node(handle, path, &lh);
2115                                 if (err == -EAGAIN) {
2116                                         assert_corr(lh == NULL);
2117
2118                                         iam_path_fini(path);
2119                                         it->ii_state = IAM_IT_DETACHED;
2120
2121                                         do_corr(schedule());
2122                                         err = iam_it_get_exact(it, k);
2123                                         if (err == -ENOENT)
2124                                                 err = +1; /* repeat split */
2125                                         else if (err == 0)
2126                                                 err = -EEXIST;
2127                                 }
2128                         } while (err > 0);
2129                         assert_inv(iam_path_check(path));
2130                         if (err == 0) {
2131                                 assert_corr(lh != NULL);
2132                                 do_corr(schedule());
2133                                 err = iam_new_leaf(handle, leaf);
2134                                 if (err == 0)
2135                                         err = iam_txn_dirty(handle, path,
2136                                                             path->ip_frame->bh);
2137                         }
2138                         iam_unlock_htree(path->ip_container, lh);
2139                         do_corr(schedule());
2140                 }
2141                 if (err == 0) {
2142                         iam_leaf_rec_add(leaf, k, r);
2143                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2144                 }
2145         }
2146         assert_inv(iam_path_check(path));
2147         return err;
2148 }
2149
2150 /*
2151  * Insert new record with key @k and contents from @r, shifting records to the
2152  * right. On success, iterator is positioned on the newly inserted record.
2153  *
2154  * precondition: it->ii_flags&IAM_IT_WRITE &&
2155  *               (it_state(it) == IAM_IT_ATTACHED ||
2156  *                it_state(it) == IAM_IT_SKEWED) &&
2157  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2158  *                    it_keycmp(it, k) <= 0) &&
2159  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2160  * postcondition: ergo(result == 0,
2161  *                     it_state(it) == IAM_IT_ATTACHED &&
2162  *                     it_keycmp(it, k) == 0 &&
2163  *                     !memcmp(iam_it_rec_get(it), r, ...))
2164  */
2165 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2166                       const struct iam_key *k, const struct iam_rec *r)
2167 {
2168         int result;
2169         struct iam_path *path;
2170
2171         path = &it->ii_path;
2172
2173         assert_corr(it->ii_flags&IAM_IT_WRITE);
2174         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2175                     it_state(it) == IAM_IT_SKEWED);
2176         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2177                          it_keycmp(it, k) <= 0));
2178         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2179         result = iam_add_rec(h, it, path, k, r);
2180         if (result == 0)
2181                 it->ii_state = IAM_IT_ATTACHED;
2182         assert_corr(ergo(result == 0,
2183                          it_state(it) == IAM_IT_ATTACHED &&
2184                          it_keycmp(it, k) == 0));
2185         return result;
2186 }
2187
2188 static inline int iam_idle_blocks_limit(struct inode *inode)
2189 {
2190         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2191 }
2192
2193 /*
2194  * If the leaf cannnot be recycled, we will lose one block for reusing.
2195  * It is not a serious issue because it almost the same of non-recycle.
2196  */
2197 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2198                                   struct iam_leaf *l, struct buffer_head **bh)
2199 {
2200         struct iam_container *c = p->ip_container;
2201         struct inode *inode = c->ic_object;
2202         struct iam_frame *frame = p->ip_frame;
2203         struct iam_entry *entries;
2204         struct iam_entry *pos;
2205         struct dynlock_handle *lh;
2206         int count;
2207         int rc;
2208
2209         if (c->ic_idle_failed)
2210                 return 0;
2211
2212         if (unlikely(frame == NULL))
2213                 return 0;
2214
2215         if (!iam_leaf_empty(l))
2216                 return 0;
2217
2218         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2219         if (lh == NULL) {
2220                 CWARN("%s: No memory to recycle idle blocks\n",
2221                       osd_ino2name(inode));
2222                 return 0;
2223         }
2224
2225         rc = iam_txn_add(h, p, frame->bh);
2226         if (rc != 0) {
2227                 iam_unlock_htree(c, lh);
2228                 return 0;
2229         }
2230
2231         iam_lock_bh(frame->bh);
2232         entries = frame->entries;
2233         count = dx_get_count(entries);
2234         /* NOT shrink the last entry in the index node, which can be reused
2235          * directly by next new node. */
2236         if (count == 2) {
2237                 iam_unlock_bh(frame->bh);
2238                 iam_unlock_htree(c, lh);
2239                 return 0;
2240         }
2241
2242         pos = iam_find_position(p, frame);
2243         /* There may be some new leaf nodes have been added or empty leaf nodes
2244          * have been shrinked during my delete operation.
2245          *
2246          * If the empty leaf is not under current index node because the index
2247          * node has been split, then just skip the empty leaf, which is rare. */
2248         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2249                 iam_unlock_bh(frame->bh);
2250                 iam_unlock_htree(c, lh);
2251                 return 0;
2252         }
2253
2254         frame->at = pos;
2255         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2256                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2257
2258                 memmove(frame->at, n,
2259                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2260                 frame->at_shifted = 1;
2261         }
2262         dx_set_count(entries, count - 1);
2263         iam_unlock_bh(frame->bh);
2264         rc = iam_txn_dirty(h, p, frame->bh);
2265         iam_unlock_htree(c, lh);
2266         if (rc != 0)
2267                 return 0;
2268
2269         get_bh(l->il_bh);
2270         *bh = l->il_bh;
2271         return frame->leaf;
2272 }
2273
2274 static int
2275 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2276                         __u32 *idle_blocks, iam_ptr_t blk)
2277 {
2278         struct iam_container *c = p->ip_container;
2279         struct buffer_head *old = c->ic_idle_bh;
2280         struct iam_idle_head *head;
2281         int rc;
2282
2283         head = (struct iam_idle_head *)(bh->b_data);
2284         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2285         head->iih_count = 0;
2286         head->iih_next = *idle_blocks;
2287         /* The bh already get_write_accessed. */
2288         rc = iam_txn_dirty(h, p, bh);
2289         if (rc != 0)
2290                 return rc;
2291
2292         rc = iam_txn_add(h, p, c->ic_root_bh);
2293         if (rc != 0)
2294                 return rc;
2295
2296         iam_lock_bh(c->ic_root_bh);
2297         *idle_blocks = cpu_to_le32(blk);
2298         iam_unlock_bh(c->ic_root_bh);
2299         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2300         if (rc == 0) {
2301                 /* NOT release old before new assigned. */
2302                 get_bh(bh);
2303                 c->ic_idle_bh = bh;
2304                 brelse(old);
2305         } else {
2306                 iam_lock_bh(c->ic_root_bh);
2307                 *idle_blocks = head->iih_next;
2308                 iam_unlock_bh(c->ic_root_bh);
2309         }
2310         return rc;
2311 }
2312
2313 /*
2314  * If the leaf cannnot be recycled, we will lose one block for reusing.
2315  * It is not a serious issue because it almost the same of non-recycle.
2316  */
2317 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2318                              struct buffer_head *bh, iam_ptr_t blk)
2319 {
2320         struct iam_container *c = p->ip_container;
2321         struct inode *inode = c->ic_object;
2322         struct iam_idle_head *head;
2323         __u32 *idle_blocks;
2324         int count;
2325         int rc;
2326
2327         mutex_lock(&c->ic_idle_mutex);
2328         if (unlikely(c->ic_idle_failed)) {
2329                 rc = -EFAULT;
2330                 goto unlock;
2331         }
2332
2333         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2334                                 c->ic_descr->id_root_gap +
2335                                 sizeof(struct dx_countlimit));
2336         /* It is the first idle block. */
2337         if (c->ic_idle_bh == NULL) {
2338                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2339                 goto unlock;
2340         }
2341
2342         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2343         count = le16_to_cpu(head->iih_count);
2344         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2345         if (count == iam_idle_blocks_limit(inode)) {
2346                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2347                 goto unlock;
2348         }
2349
2350         /* Just add to ic_idle_bh. */
2351         rc = iam_txn_add(h, p, c->ic_idle_bh);
2352         if (rc != 0)
2353                 goto unlock;
2354
2355         head->iih_blks[count] = cpu_to_le32(blk);
2356         head->iih_count = cpu_to_le16(count + 1);
2357         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2358
2359 unlock:
2360         mutex_unlock(&c->ic_idle_mutex);
2361         if (rc != 0)
2362                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2363                       osd_ino2name(inode), blk);
2364 }
2365
2366 /*
2367  * Delete record under iterator.
2368  *
2369  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2370  *                it->ii_flags&IAM_IT_WRITE &&
2371  *                it_at_rec(it)
2372  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2373  *                it_state(it) == IAM_IT_DETACHED
2374  */
2375 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2376 {
2377         int result;
2378         struct iam_leaf *leaf;
2379         struct iam_path *path;
2380
2381         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2382                     it->ii_flags&IAM_IT_WRITE);
2383         assert_corr(it_at_rec(it));
2384
2385         path = &it->ii_path;
2386         leaf = &path->ip_leaf;
2387
2388         assert_inv(iam_path_check(path));
2389
2390         result = iam_txn_add(h, path, leaf->il_bh);
2391         /*
2392          * no compaction for now.
2393          */
2394         if (result == 0) {
2395                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2396                 result = iam_txn_dirty(h, path, leaf->il_bh);
2397                 if (result == 0 && iam_leaf_at_end(leaf)) {
2398                         struct buffer_head *bh = NULL;
2399                         iam_ptr_t blk;
2400
2401                         blk = iam_index_shrink(h, path, leaf, &bh);
2402                         if (it->ii_flags & IAM_IT_MOVE) {
2403                                 result = iam_it_next(it);
2404                                 if (result > 0)
2405                                         result = 0;
2406                         }
2407
2408                         if (bh != NULL) {
2409                                 iam_recycle_leaf(h, path, bh, blk);
2410                                 brelse(bh);
2411                         }
2412                 }
2413         }
2414         assert_inv(iam_path_check(path));
2415         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2416                     it_state(it) == IAM_IT_DETACHED);
2417         return result;
2418 }
2419
2420 /*
2421  * Convert iterator to cookie.
2422  *
2423  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2424  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2425  * postcondition: it_state(it) == IAM_IT_ATTACHED
2426  */
2427 iam_pos_t iam_it_store(const struct iam_iterator *it)
2428 {
2429         iam_pos_t result;
2430
2431         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2432         assert_corr(it_at_rec(it));
2433         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2434                     sizeof result);
2435
2436         result = 0;
2437         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2438 }
2439
2440 /*
2441  * Restore iterator from cookie.
2442  *
2443  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2444  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2445  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2446  *                                  iam_it_store(it) == pos)
2447  */
2448 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2449 {
2450         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2451                     it->ii_flags&IAM_IT_MOVE);
2452         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2453         return iam_it_iget(it, (struct iam_ikey *)&pos);
2454 }
2455
2456 /***********************************************************************/
2457 /* invariants                                                          */
2458 /***********************************************************************/
2459
2460 static inline int ptr_inside(void *base, size_t size, void *ptr)
2461 {
2462         return (base <= ptr) && (ptr < base + size);
2463 }
2464
2465 static int iam_frame_invariant(struct iam_frame *f)
2466 {
2467         return
2468                 (f->bh != NULL &&
2469                 f->bh->b_data != NULL &&
2470                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2471                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2472                 f->entries <= f->at);
2473 }
2474
2475 static int iam_leaf_invariant(struct iam_leaf *l)
2476 {
2477         return
2478                 l->il_bh != NULL &&
2479                 l->il_bh->b_data != NULL &&
2480                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2481                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2482                 l->il_entries <= l->il_at;
2483 }
2484
2485 static int iam_path_invariant(struct iam_path *p)
2486 {
2487         int i;
2488
2489         if (p->ip_container == NULL ||
2490             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2491             p->ip_frame != p->ip_frames + p->ip_indirect ||
2492             !iam_leaf_invariant(&p->ip_leaf))
2493                 return 0;
2494         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2495                 if (i <= p->ip_indirect) {
2496                         if (!iam_frame_invariant(&p->ip_frames[i]))
2497                                 return 0;
2498                 }
2499         }
2500         return 1;
2501 }
2502
2503 int iam_it_invariant(struct iam_iterator *it)
2504 {
2505         return
2506                 (it->ii_state == IAM_IT_DETACHED ||
2507                  it->ii_state == IAM_IT_ATTACHED ||
2508                  it->ii_state == IAM_IT_SKEWED) &&
2509                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2510                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2511                      it->ii_state == IAM_IT_SKEWED,
2512                      iam_path_invariant(&it->ii_path) &&
2513                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2514 }
2515
2516 /*
2517  * Search container @c for record with key @k. If record is found, its data
2518  * are moved into @r.
2519  *
2520  * Return values: 0: found, -ENOENT: not-found, -ve: error
2521  */
2522 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2523                struct iam_rec *r, struct iam_path_descr *pd)
2524 {
2525         struct iam_iterator it;
2526         int result;
2527
2528         iam_it_init(&it, c, 0, pd);
2529
2530         result = iam_it_get_exact(&it, k);
2531         if (result == 0)
2532                 /*
2533                  * record with required key found, copy it into user buffer
2534                  */
2535                 iam_reccpy(&it.ii_path.ip_leaf, r);
2536         iam_it_put(&it);
2537         iam_it_fini(&it);
2538         return result;
2539 }
2540
2541 /*
2542  * Insert new record @r with key @k into container @c (within context of
2543  * transaction @h).
2544  *
2545  * Return values: 0: success, -ve: error, including -EEXIST when record with
2546  * given key is already present.
2547  *
2548  * postcondition: ergo(result == 0 || result == -EEXIST,
2549  *                                  iam_lookup(c, k, r2) > 0;
2550  */
2551 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2552                const struct iam_rec *r, struct iam_path_descr *pd)
2553 {
2554         struct iam_iterator it;
2555         int result;
2556
2557         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2558
2559         result = iam_it_get_exact(&it, k);
2560         if (result == -ENOENT)
2561                 result = iam_it_rec_insert(h, &it, k, r);
2562         else if (result == 0)
2563                 result = -EEXIST;
2564         iam_it_put(&it);
2565         iam_it_fini(&it);
2566         return result;
2567 }
2568
2569 /*
2570  * Update record with the key @k in container @c (within context of
2571  * transaction @h), new record is given by @r.
2572  *
2573  * Return values: +1: skip because of the same rec value, 0: success,
2574  * -ve: error, including -ENOENT if no record with the given key found.
2575  */
2576 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2577                const struct iam_rec *r, struct iam_path_descr *pd)
2578 {
2579         struct iam_iterator it;
2580         struct iam_leaf *folio;
2581         int result;
2582
2583         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2584
2585         result = iam_it_get_exact(&it, k);
2586         if (result == 0) {
2587                 folio = &it.ii_path.ip_leaf;
2588                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2589                 if (result == 0)
2590                         iam_it_rec_set(h, &it, r);
2591                 else
2592                         result = 1;
2593         }
2594         iam_it_put(&it);
2595         iam_it_fini(&it);
2596         return result;
2597 }
2598
2599 /*
2600  * Delete existing record with key @k.
2601  *
2602  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2603  *
2604  * postcondition: ergo(result == 0 || result == -ENOENT,
2605  *                                 !iam_lookup(c, k, *));
2606  */
2607 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2608                struct iam_path_descr *pd)
2609 {
2610         struct iam_iterator it;
2611         int result;
2612
2613         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2614
2615         result = iam_it_get_exact(&it, k);
2616         if (result == 0)
2617                 iam_it_rec_delete(h, &it);
2618         iam_it_put(&it);
2619         iam_it_fini(&it);
2620         return result;
2621 }
2622
2623 int iam_root_limit(int rootgap, int blocksize, int size)
2624 {
2625         int limit;
2626         int nlimit;
2627
2628         limit = (blocksize - rootgap) / size;
2629         nlimit = blocksize / size;
2630         if (limit == nlimit)
2631                 limit--;
2632         return limit;
2633 }