Whamcloud - gitweb
LU-13581 build: xarray and lockdep_is_held const clash
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * iam.c
33  * Top-level entry points into iam module
34  *
35  * Author: Wang Di <wangdi@clusterfs.com>
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 /*
40  * iam: big theory statement.
41  *
42  * iam (Index Access Module) is a module providing abstraction of persistent
43  * transactional container on top of generalized ldiskfs htree.
44  *
45  * iam supports:
46  *
47  *     - key, pointer, and record size specifiable per container.
48  *
49  *     - trees taller than 2 index levels.
50  *
51  *     - read/write to existing ldiskfs htree directories as iam containers.
52  *
53  * iam container is a tree, consisting of leaf nodes containing keys and
54  * records stored in this container, and index nodes, containing keys and
55  * pointers to leaf or index nodes.
56  *
57  * iam does not work with keys directly, instead it calls user-supplied key
58  * comparison function (->dpo_keycmp()).
59  *
60  * Pointers are (currently) interpreted as logical offsets (measured in
61  * blocksful) within underlying flat file on top of which iam tree lives.
62  *
63  * On-disk format:
64  *
65  * iam mostly tries to reuse existing htree formats.
66  *
67  * Format of index node:
68  *
69  * +-----+-------+-------+-------+------+-------+------------+
70  * |     | count |       |       |      |       |            |
71  * | gap |   /   | entry | entry | .... | entry | free space |
72  * |     | limit |       |       |      |       |            |
73  * +-----+-------+-------+-------+------+-------+------------+
74  *
75  *       gap           this part of node is never accessed by iam code. It
76  *                     exists for binary compatibility with ldiskfs htree (that,
77  *                     in turn, stores fake struct ext2_dirent for ext2
78  *                     compatibility), and to keep some unspecified per-node
79  *                     data. Gap can be different for root and non-root index
80  *                     nodes. Gap size can be specified for each container
81  *                     (gap of 0 is allowed).
82  *
83  *       count/limit   current number of entries in this node, and the maximal
84  *                     number of entries that can fit into node. count/limit
85  *                     has the same size as entry, and is itself counted in
86  *                     count.
87  *
88  *       entry         index entry: consists of a key immediately followed by
89  *                     a pointer to a child node. Size of a key and size of a
90  *                     pointer depends on container. Entry has neither
91  *                     alignment nor padding.
92  *
93  *       free space    portion of node new entries are added to
94  *
95  * Entries in index node are sorted by their key value.
96  *
97  * Format of a leaf node is not specified. Generic iam code accesses leaf
98  * nodes through ->id_leaf methods in struct iam_descr.
99  *
100  * The IAM root block is a special node, which contains the IAM descriptor.
101  * It is on disk format:
102  *
103  * +---------+-------+--------+---------+-------+------+-------+------------+
104  * |IAM desc | count |  idle  |         |       |      |       |            |
105  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
106  * |         | limit |        |         |       |      |       |            |
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  *
109  * The padding length is calculated with the parameters in the IAM descriptor.
110  *
111  * The field "idle_blocks" is used to record empty leaf nodes, which have not
112  * been released but all contained entries in them have been removed. Usually,
113  * the idle blocks in the IAM should be reused when need to allocate new leaf
114  * nodes for new entries, it depends on the IAM hash functions to map the new
115  * entries to these idle blocks. Unfortunately, it is not easy to design some
116  * hash functions for such clever mapping, especially considering the insert/
117  * lookup performance.
118  *
119  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
120  * idle blocks pool. If need some new leaf node, it will try to take idle block
121  * from such pool with priority, in spite of how the IAM hash functions to map
122  * the entry.
123  *
124  * The idle blocks pool is organized as a series of tables, and each table
125  * can be described as following (on-disk format):
126  *
127  * +---------+---------+---------+---------+------+---------+-------+
128  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
129  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
130  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
131  * +---------+---------+---------+---------+------+---------+-------+
132  *
133  * The logic blk# for the first table is stored in the root node "idle_blocks".
134  *
135  */
136
137 #include <linux/module.h>
138 #include <linux/fs.h>
139 #include <linux/pagemap.h>
140 #include <linux/time.h>
141 #include <linux/fcntl.h>
142 #include <linux/stat.h>
143 #include <linux/string.h>
144 #include <linux/quotaops.h>
145 #include <linux/buffer_head.h>
146
147 #include <ldiskfs/ldiskfs.h>
148 #include <ldiskfs/xattr.h>
149 #undef ENTRY
150
151 #include "osd_internal.h"
152
153 #include <ldiskfs/acl.h>
154
155 /*
156  * List of all registered formats.
157  *
158  * No locking. Callers synchronize.
159  */
160 static LIST_HEAD(iam_formats);
161
162 void iam_format_register(struct iam_format *fmt)
163 {
164         list_add(&fmt->if_linkage, &iam_formats);
165 }
166
167 static struct buffer_head *
168 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
169 {
170         struct inode *inode = c->ic_object;
171         struct iam_idle_head *head;
172         struct buffer_head *bh;
173
174         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
175
176         if (blk == 0)
177                 return NULL;
178
179         bh = __ldiskfs_bread(NULL, inode, blk, 0);
180         if (IS_ERR_OR_NULL(bh)) {
181                 CERROR("%s: cannot load idle blocks, blk = %u, err = %ld\n",
182                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
183                 c->ic_idle_failed = 1;
184                 if (bh == NULL)
185                         bh = ERR_PTR(-EIO);
186                 return bh;
187         }
188
189         head = (struct iam_idle_head *)(bh->b_data);
190         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
191                 CERROR("%s: invalid idle block head, blk = %u, magic = %d\n",
192                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic));
193                 brelse(bh);
194                 c->ic_idle_failed = 1;
195                 return ERR_PTR(-EBADF);
196         }
197
198         return bh;
199 }
200
201 /*
202  * Determine format of given container. This is done by scanning list of
203  * registered formats and calling ->if_guess() method of each in turn.
204  */
205 static int iam_format_guess(struct iam_container *c)
206 {
207         int result;
208         struct iam_format *fmt;
209
210         /*
211          * XXX temporary initialization hook.
212          */
213         {
214                 static int initialized = 0;
215
216                 if (!initialized) {
217                         iam_lvar_format_init();
218                         iam_lfix_format_init();
219                         initialized = 1;
220                 }
221         }
222
223         result = -ENOENT;
224         list_for_each_entry(fmt, &iam_formats, if_linkage) {
225                 result = fmt->if_guess(c);
226                 if (result == 0)
227                         break;
228         }
229
230         if (result == 0) {
231                 struct buffer_head *bh;
232                 __u32 *idle_blocks;
233
234                 LASSERT(c->ic_root_bh != NULL);
235
236                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
237                                         c->ic_descr->id_root_gap +
238                                         sizeof(struct dx_countlimit));
239                 mutex_lock(&c->ic_idle_mutex);
240                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
241                 if (bh != NULL && IS_ERR(bh))
242                         result = PTR_ERR(bh);
243                 else
244                         c->ic_idle_bh = bh;
245                 mutex_unlock(&c->ic_idle_mutex);
246         }
247
248         return result;
249 }
250
251 /*
252  * Initialize container @c.
253  */
254 int iam_container_init(struct iam_container *c,
255                        struct iam_descr *descr, struct inode *inode)
256 {
257         memset(c, 0, sizeof *c);
258         c->ic_descr = descr;
259         c->ic_object = inode;
260         init_rwsem(&c->ic_sem);
261         dynlock_init(&c->ic_tree_lock);
262         mutex_init(&c->ic_idle_mutex);
263         return 0;
264 }
265
266 /*
267  * Determine container format.
268  */
269 int iam_container_setup(struct iam_container *c)
270 {
271         return iam_format_guess(c);
272 }
273
274 /*
275  * Finalize container @c, release all resources.
276  */
277 void iam_container_fini(struct iam_container *c)
278 {
279         brelse(c->ic_idle_bh);
280         c->ic_idle_bh = NULL;
281         brelse(c->ic_root_bh);
282         c->ic_root_bh = NULL;
283 }
284
285 void iam_path_init(struct iam_path *path, struct iam_container *c,
286                    struct iam_path_descr *pd)
287 {
288         memset(path, 0, sizeof *path);
289         path->ip_container = c;
290         path->ip_frame = path->ip_frames;
291         path->ip_data = pd;
292         path->ip_leaf.il_path = path;
293 }
294
295 static void iam_leaf_fini(struct iam_leaf *leaf);
296
297 void iam_path_release(struct iam_path *path)
298 {
299         int i;
300
301         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
302                 if (path->ip_frames[i].bh != NULL) {
303                         path->ip_frames[i].at_shifted = 0;
304                         brelse(path->ip_frames[i].bh);
305                         path->ip_frames[i].bh = NULL;
306                 }
307         }
308 }
309
310 void iam_path_fini(struct iam_path *path)
311 {
312         iam_leaf_fini(&path->ip_leaf);
313         iam_path_release(path);
314 }
315
316
317 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
318 {
319         int i;
320
321         path->ipc_hinfo = &path->ipc_hinfo_area;
322         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
323                 path->ipc_descr.ipd_key_scratch[i] =
324                         (struct iam_ikey *)&path->ipc_scratch[i];
325
326         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
327 }
328
329 void iam_path_compat_fini(struct iam_path_compat *path)
330 {
331         iam_path_fini(&path->ipc_path);
332 }
333
334 /*
335  * Helper function initializing iam_path_descr and its key scratch area.
336  */
337 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
338 {
339         struct iam_path_descr *ipd;
340         void *karea;
341         int i;
342
343         ipd = area;
344         karea = ipd + 1;
345         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
346                 ipd->ipd_key_scratch[i] = karea;
347         return ipd;
348 }
349
350 void iam_ipd_free(struct iam_path_descr *ipd)
351 {
352 }
353
354 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
355                   handle_t *h, struct buffer_head **bh)
356 {
357         /*
358          * NB: it can be called by iam_lfix_guess() which is still at
359          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
360          * haven't been intialized yet.
361          * Also, we don't have this for IAM dir.
362          */
363         if (c->ic_root_bh != NULL &&
364             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
365                 get_bh(c->ic_root_bh);
366                 *bh = c->ic_root_bh;
367                 return 0;
368         }
369
370         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
371         if (IS_ERR(*bh))
372                 return PTR_ERR(*bh);
373
374         if (*bh == NULL)
375                 return -EIO;
376
377         return 0;
378 }
379
380 /*
381  * Return pointer to current leaf record. Pointer is valid while corresponding
382  * leaf node is locked and pinned.
383  */
384 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
385 {
386         return iam_leaf_ops(leaf)->rec(leaf);
387 }
388
389 /*
390  * Return pointer to the current leaf key. This function returns pointer to
391  * the key stored in node.
392  *
393  * Caller should assume that returned pointer is only valid while leaf node is
394  * pinned and locked.
395  */
396 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
397 {
398         return iam_leaf_ops(leaf)->key(leaf);
399 }
400
401 static int iam_leaf_key_size(const struct iam_leaf *leaf)
402 {
403         return iam_leaf_ops(leaf)->key_size(leaf);
404 }
405
406 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
407                                       struct iam_ikey *key)
408 {
409         return iam_leaf_ops(leaf)->ikey(leaf, key);
410 }
411
412 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
413                            const struct iam_key *key)
414 {
415         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
416 }
417
418 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
419                           const struct iam_key *key)
420 {
421         return iam_leaf_ops(leaf)->key_eq(leaf, key);
422 }
423
424 #if LDISKFS_INVARIANT_ON
425 static int iam_path_check(struct iam_path *p)
426 {
427         int i;
428         int result;
429         struct iam_frame *f;
430         struct iam_descr *param;
431
432         result = 1;
433         param = iam_path_descr(p);
434         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
435                 f = &p->ip_frames[i];
436                 if (f->bh != NULL) {
437                         result = dx_node_check(p, f);
438                         if (result)
439                                 result = !param->id_ops->id_node_check(p, f);
440                 }
441         }
442         if (result && p->ip_leaf.il_bh != NULL)
443                 result = 1;
444         if (result == 0)
445                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
446
447         return result;
448 }
449 #endif
450
451 static int iam_leaf_load(struct iam_path *path)
452 {
453         iam_ptr_t block;
454         int err;
455         struct iam_container *c;
456         struct buffer_head *bh;
457         struct iam_leaf *leaf;
458         struct iam_descr *descr;
459
460         c     = path->ip_container;
461         leaf  = &path->ip_leaf;
462         descr = iam_path_descr(path);
463         block = path->ip_frame->leaf;
464         if (block == 0) {
465                 /* XXX bug 11027 */
466                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
467                        (long unsigned)path->ip_frame->leaf,
468                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
469                        path->ip_frames[0].bh, path->ip_frames[1].bh,
470                        path->ip_frames[2].bh);
471         }
472         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
473         if (err == 0) {
474                 leaf->il_bh = bh;
475                 leaf->il_curidx = block;
476                 err = iam_leaf_ops(leaf)->init(leaf);
477         }
478         return err;
479 }
480
481 static void iam_unlock_htree(struct iam_container *ic,
482                              struct dynlock_handle *lh)
483 {
484         if (lh != NULL)
485                 dynlock_unlock(&ic->ic_tree_lock, lh);
486 }
487
488
489 static void iam_leaf_unlock(struct iam_leaf *leaf)
490 {
491         if (leaf->il_lock != NULL) {
492                 iam_unlock_htree(iam_leaf_container(leaf),
493                                  leaf->il_lock);
494                 do_corr(schedule());
495                 leaf->il_lock = NULL;
496         }
497 }
498
499 static void iam_leaf_fini(struct iam_leaf *leaf)
500 {
501         if (leaf->il_path != NULL) {
502                 iam_leaf_unlock(leaf);
503                 iam_leaf_ops(leaf)->fini(leaf);
504                 if (leaf->il_bh) {
505                         brelse(leaf->il_bh);
506                         leaf->il_bh = NULL;
507                         leaf->il_curidx = 0;
508                 }
509         }
510 }
511
512 static void iam_leaf_start(struct iam_leaf *folio)
513 {
514         iam_leaf_ops(folio)->start(folio);
515 }
516
517 void iam_leaf_next(struct iam_leaf *folio)
518 {
519         iam_leaf_ops(folio)->next(folio);
520 }
521
522 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
523                              const struct iam_rec *rec)
524 {
525         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
526 }
527
528 static void iam_rec_del(struct iam_leaf *leaf, int shift)
529 {
530         iam_leaf_ops(leaf)->rec_del(leaf, shift);
531 }
532
533 int iam_leaf_at_end(const struct iam_leaf *leaf)
534 {
535         return iam_leaf_ops(leaf)->at_end(leaf);
536 }
537
538 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
539                            iam_ptr_t nr)
540 {
541         iam_leaf_ops(l)->split(l, bh, nr);
542 }
543
544 static inline int iam_leaf_empty(struct iam_leaf *l)
545 {
546         return iam_leaf_ops(l)->leaf_empty(l);
547 }
548
549 int iam_leaf_can_add(const struct iam_leaf *l,
550                      const struct iam_key *k, const struct iam_rec *r)
551 {
552         return iam_leaf_ops(l)->can_add(l, k, r);
553 }
554
555 static int iam_txn_dirty(handle_t *handle,
556                          struct iam_path *path, struct buffer_head *bh)
557 {
558         int result;
559
560         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
561         if (result != 0)
562                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
563         return result;
564 }
565
566 static int iam_txn_add(handle_t *handle,
567                        struct iam_path *path, struct buffer_head *bh)
568 {
569         int result;
570
571         result = ldiskfs_journal_get_write_access(handle, bh);
572         if (result != 0)
573                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
574         return result;
575 }
576
577 /***********************************************************************/
578 /* iterator interface                                                  */
579 /***********************************************************************/
580
581 static enum iam_it_state it_state(const struct iam_iterator *it)
582 {
583         return it->ii_state;
584 }
585
586 /*
587  * Helper function returning scratch key.
588  */
589 static struct iam_container *iam_it_container(const struct iam_iterator *it)
590 {
591         return it->ii_path.ip_container;
592 }
593
594 static inline int it_keycmp(const struct iam_iterator *it,
595                             const struct iam_key *k)
596 {
597         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
598 }
599
600 static inline int it_keyeq(const struct iam_iterator *it,
601                            const struct iam_key *k)
602 {
603         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
604 }
605
606 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
607 {
608         return iam_ikeycmp(it->ii_path.ip_container,
609                            iam_leaf_ikey(&it->ii_path.ip_leaf,
610                                         iam_path_ikey(&it->ii_path, 0)), ik);
611 }
612
613 static inline int it_at_rec(const struct iam_iterator *it)
614 {
615         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
616 }
617
618 static inline int it_before(const struct iam_iterator *it)
619 {
620         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
621 }
622
623 /*
624  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
625  * with exactly the same key as asked is found.
626  */
627 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
628 {
629         int result;
630
631         result = iam_it_get(it, k);
632         if (result > 0)
633                 result = 0;
634         else if (result == 0)
635                 /*
636                  * Return -ENOENT if cursor is located above record with a key
637                  * different from one specified, or in the empty leaf.
638                  *
639                  * XXX returning -ENOENT only works if iam_it_get() never
640                  * returns -ENOENT as a legitimate error.
641                  */
642                 result = -ENOENT;
643         return result;
644 }
645
646 void iam_container_write_lock(struct iam_container *ic)
647 {
648         down_write(&ic->ic_sem);
649 }
650
651 void iam_container_write_unlock(struct iam_container *ic)
652 {
653         up_write(&ic->ic_sem);
654 }
655
656 void iam_container_read_lock(struct iam_container *ic)
657 {
658         down_read(&ic->ic_sem);
659 }
660
661 void iam_container_read_unlock(struct iam_container *ic)
662 {
663         up_read(&ic->ic_sem);
664 }
665
666 /*
667  * Initialize iterator to IAM_IT_DETACHED state.
668  *
669  * postcondition: it_state(it) == IAM_IT_DETACHED
670  */
671 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
672                  struct iam_path_descr *pd)
673 {
674         memset(it, 0, sizeof *it);
675         it->ii_flags  = flags;
676         it->ii_state  = IAM_IT_DETACHED;
677         iam_path_init(&it->ii_path, c, pd);
678         return 0;
679 }
680
681 /*
682  * Finalize iterator and release all resources.
683  *
684  * precondition: it_state(it) == IAM_IT_DETACHED
685  */
686 void iam_it_fini(struct iam_iterator *it)
687 {
688         assert_corr(it_state(it) == IAM_IT_DETACHED);
689         iam_path_fini(&it->ii_path);
690 }
691
692 /*
693  * this locking primitives are used to protect parts
694  * of dir's htree. protection unit is block: leaf or index
695  */
696 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
697                                              unsigned long value,
698                                              enum dynlock_type lt)
699 {
700         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
701 }
702
703 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
704 {
705         struct iam_frame *f;
706
707         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
708                 do_corr(schedule());
709                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
710                 if (*lh == NULL)
711                         return -ENOMEM;
712         }
713         return 0;
714 }
715
716 /*
717  * Fast check for frame consistency.
718  */
719 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
720 {
721         struct iam_container *bag;
722         struct iam_entry *next;
723         struct iam_entry *last;
724         struct iam_entry *entries;
725         struct iam_entry *at;
726
727         bag = path->ip_container;
728         at = frame->at;
729         entries = frame->entries;
730         last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
731
732         if (unlikely(at > last))
733                 return -EAGAIN;
734
735         if (unlikely(dx_get_block(path, at) != frame->leaf))
736                 return -EAGAIN;
737
738         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
739                      path->ip_ikey_target) > 0))
740                 return -EAGAIN;
741
742         next = iam_entry_shift(path, at, +1);
743         if (next <= last) {
744                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
745                                          path->ip_ikey_target) <= 0))
746                         return -EAGAIN;
747         }
748         return 0;
749 }
750
751 int dx_index_is_compat(struct iam_path *path)
752 {
753         return iam_path_descr(path) == NULL;
754 }
755
756 /*
757  * dx_find_position
758  *
759  * search position of specified hash in index
760  *
761  */
762
763 static struct iam_entry *iam_find_position(struct iam_path *path,
764                                            struct iam_frame *frame)
765 {
766         int count;
767         struct iam_entry *p;
768         struct iam_entry *q;
769         struct iam_entry *m;
770
771         count = dx_get_count(frame->entries);
772         assert_corr(count && count <= dx_get_limit(frame->entries));
773         p = iam_entry_shift(path, frame->entries,
774                             dx_index_is_compat(path) ? 1 : 2);
775         q = iam_entry_shift(path, frame->entries, count - 1);
776         while (p <= q) {
777                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
778                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
779                                 path->ip_ikey_target) > 0)
780                         q = iam_entry_shift(path, m, -1);
781                 else
782                         p = iam_entry_shift(path, m, +1);
783         }
784         return iam_entry_shift(path, p, -1);
785 }
786
787
788
789 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
790 {
791         return dx_get_block(path, iam_find_position(path, frame));
792 }
793
794 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
795                     const struct iam_ikey *key, iam_ptr_t ptr)
796 {
797         struct iam_entry *entries = frame->entries;
798         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
799         int count = dx_get_count(entries);
800
801         /*
802          * Unfortunately we cannot assert this, as this function is sometimes
803          * called by VFS under i_sem and without pdirops lock.
804          */
805         assert_corr(1 || iam_frame_is_locked(path, frame));
806         assert_corr(count < dx_get_limit(entries));
807         assert_corr(frame->at < iam_entry_shift(path, entries, count));
808         assert_inv(dx_node_check(path, frame));
809
810         memmove(iam_entry_shift(path, new, 1), new,
811                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
812         dx_set_ikey(path, new, key);
813         dx_set_block(path, new, ptr);
814         dx_set_count(entries, count + 1);
815         assert_inv(dx_node_check(path, frame));
816 }
817
818 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
819                          const struct iam_ikey *key, iam_ptr_t ptr)
820 {
821         iam_lock_bh(frame->bh);
822         iam_insert_key(path, frame, key, ptr);
823         iam_unlock_bh(frame->bh);
824 }
825 /*
826  * returns 0 if path was unchanged, -EAGAIN otherwise.
827  */
828 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
829 {
830         int equal;
831
832         iam_lock_bh(frame->bh);
833         equal = iam_check_fast(path, frame) == 0 ||
834                 frame->leaf == iam_find_ptr(path, frame);
835         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
836         iam_unlock_bh(frame->bh);
837
838         return equal ? 0 : -EAGAIN;
839 }
840
841 static int iam_lookup_try(struct iam_path *path)
842 {
843         u32 ptr;
844         int err = 0;
845         int i;
846
847         struct iam_descr *param;
848         struct iam_frame *frame;
849         struct iam_container *c;
850
851         param = iam_path_descr(path);
852         c = path->ip_container;
853
854         ptr = param->id_ops->id_root_ptr(c);
855         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
856              ++frame, ++i) {
857                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
858                                                   &frame->bh);
859                 do_corr(schedule());
860
861                 iam_lock_bh(frame->bh);
862                 /*
863                  * node must be initialized under bh lock because concurrent
864                  * creation procedure may change it and iam_lookup_try() will
865                  * see obsolete tree height. -bzzz
866                  */
867                 if (err != 0)
868                         break;
869
870                 if (LDISKFS_INVARIANT_ON) {
871                         err = param->id_ops->id_node_check(path, frame);
872                         if (err != 0)
873                                 break;
874                 }
875
876                 err = param->id_ops->id_node_load(path, frame);
877                 if (err != 0)
878                         break;
879
880                 assert_inv(dx_node_check(path, frame));
881                 /*
882                  * splitting may change root index block and move hash we're
883                  * looking for into another index block so, we have to check
884                  * this situation and repeat from begining if path got changed
885                  * -bzzz
886                  */
887                 if (i > 0) {
888                         err = iam_check_path(path, frame - 1);
889                         if (err != 0)
890                                 break;
891                 }
892
893                 frame->at = iam_find_position(path, frame);
894                 frame->curidx = ptr;
895                 frame->leaf = ptr = dx_get_block(path, frame->at);
896
897                 iam_unlock_bh(frame->bh);
898                 do_corr(schedule());
899         }
900         if (err != 0)
901                 iam_unlock_bh(frame->bh);
902         path->ip_frame = --frame;
903         return err;
904 }
905
906 static int __iam_path_lookup(struct iam_path *path)
907 {
908         int err;
909         int i;
910
911         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
912                 assert(path->ip_frames[i].bh == NULL);
913
914         do {
915                 err = iam_lookup_try(path);
916                 do_corr(schedule());
917                 if (err != 0)
918                         iam_path_fini(path);
919         } while (err == -EAGAIN);
920
921         return err;
922 }
923
924 /*
925  * returns 0 if path was unchanged, -EAGAIN otherwise.
926  */
927 static int iam_check_full_path(struct iam_path *path, int search)
928 {
929         struct iam_frame *bottom;
930         struct iam_frame *scan;
931         int i;
932         int result;
933
934         do_corr(schedule());
935
936         for (bottom = path->ip_frames, i = 0;
937              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
938                 ; /* find last filled in frame */
939         }
940
941         /*
942          * Lock frames, bottom to top.
943          */
944         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
945                 iam_lock_bh(scan->bh);
946         /*
947          * Check them top to bottom.
948          */
949         result = 0;
950         for (scan = path->ip_frames; scan < bottom; ++scan) {
951                 struct iam_entry *pos;
952
953                 if (search) {
954                         if (iam_check_fast(path, scan) == 0)
955                                 continue;
956
957                         pos = iam_find_position(path, scan);
958                         if (scan->leaf != dx_get_block(path, pos)) {
959                                 result = -EAGAIN;
960                                 break;
961                         }
962                         scan->at = pos;
963                 } else {
964                         pos = iam_entry_shift(path, scan->entries,
965                                               dx_get_count(scan->entries) - 1);
966                         if (scan->at > pos ||
967                             scan->leaf != dx_get_block(path, scan->at)) {
968                                 result = -EAGAIN;
969                                 break;
970                         }
971                 }
972         }
973
974         /*
975          * Unlock top to bottom.
976          */
977         for (scan = path->ip_frames; scan < bottom; ++scan)
978                 iam_unlock_bh(scan->bh);
979         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
980         do_corr(schedule());
981
982         return result;
983 }
984
985
986 /*
987  * Performs path lookup and returns with found leaf (if any) locked by htree
988  * lock.
989  */
990 static int iam_lookup_lock(struct iam_path *path,
991                            struct dynlock_handle **dl, enum dynlock_type lt)
992 {
993         int result;
994
995         while ((result = __iam_path_lookup(path)) == 0) {
996                 do_corr(schedule());
997                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
998                                      lt);
999                 if (*dl == NULL) {
1000                         iam_path_fini(path);
1001                         result = -ENOMEM;
1002                         break;
1003                 }
1004                 do_corr(schedule());
1005                 /*
1006                  * while locking leaf we just found may get split so we need
1007                  * to check this -bzzz
1008                  */
1009                 if (iam_check_full_path(path, 1) == 0)
1010                         break;
1011                 iam_unlock_htree(path->ip_container, *dl);
1012                 *dl = NULL;
1013                 iam_path_fini(path);
1014         }
1015         return result;
1016 }
1017 /*
1018  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1019  * node.
1020  */
1021 static int iam_path_lookup(struct iam_path *path, int index)
1022 {
1023         struct iam_leaf  *leaf;
1024         int result;
1025
1026         leaf = &path->ip_leaf;
1027         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1028         assert_inv(iam_path_check(path));
1029         do_corr(schedule());
1030         if (result == 0) {
1031                 result = iam_leaf_load(path);
1032                 if (result == 0) {
1033                         do_corr(schedule());
1034                         if (index)
1035                                 result = iam_leaf_ops(leaf)->
1036                                         ilookup(leaf, path->ip_ikey_target);
1037                         else
1038                                 result = iam_leaf_ops(leaf)->
1039                                         lookup(leaf, path->ip_key_target);
1040                         do_corr(schedule());
1041                 }
1042                 if (result < 0)
1043                         iam_leaf_unlock(leaf);
1044         }
1045         return result;
1046 }
1047
1048 /*
1049  * Common part of iam_it_{i,}get().
1050  */
1051 static int __iam_it_get(struct iam_iterator *it, int index)
1052 {
1053         int result;
1054
1055         assert_corr(it_state(it) == IAM_IT_DETACHED);
1056
1057         result = iam_path_lookup(&it->ii_path, index);
1058         if (result >= 0) {
1059                 int collision;
1060
1061                 collision = result & IAM_LOOKUP_LAST;
1062                 switch (result & ~IAM_LOOKUP_LAST) {
1063                 case IAM_LOOKUP_EXACT:
1064                         result = +1;
1065                         it->ii_state = IAM_IT_ATTACHED;
1066                         break;
1067                 case IAM_LOOKUP_OK:
1068                         result = 0;
1069                         it->ii_state = IAM_IT_ATTACHED;
1070                         break;
1071                 case IAM_LOOKUP_BEFORE:
1072                 case IAM_LOOKUP_EMPTY:
1073                         result = 0;
1074                         it->ii_state = IAM_IT_SKEWED;
1075                         break;
1076                 default:
1077                         assert(0);
1078                 }
1079                 result |= collision;
1080         }
1081         /*
1082          * See iam_it_get_exact() for explanation.
1083          */
1084         assert_corr(result != -ENOENT);
1085         return result;
1086 }
1087
1088 /*
1089  * Correct hash, but not the same key was found, iterate through hash
1090  * collision chain, looking for correct record.
1091  */
1092 static int iam_it_collision(struct iam_iterator *it)
1093 {
1094         int result;
1095
1096         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1097
1098         while ((result = iam_it_next(it)) == 0) {
1099                 do_corr(schedule());
1100                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1101                         return -ENOENT;
1102                 if (it_keyeq(it, it->ii_path.ip_key_target))
1103                         return 0;
1104         }
1105         return result;
1106 }
1107
1108 /*
1109  * Attach iterator. After successful completion, @it points to record with
1110  * least key not larger than @k.
1111  *
1112  * Return value: 0: positioned on existing record,
1113  *             +ve: exact position found,
1114  *             -ve: error.
1115  *
1116  * precondition:  it_state(it) == IAM_IT_DETACHED
1117  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1118  *                     it_keycmp(it, k) <= 0)
1119  */
1120 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1121 {
1122         int result;
1123
1124         assert_corr(it_state(it) == IAM_IT_DETACHED);
1125
1126         it->ii_path.ip_ikey_target = NULL;
1127         it->ii_path.ip_key_target  = k;
1128
1129         result = __iam_it_get(it, 0);
1130
1131         if (result == IAM_LOOKUP_LAST) {
1132                 result = iam_it_collision(it);
1133                 if (result != 0) {
1134                         iam_it_put(it);
1135                         iam_it_fini(it);
1136                         result = __iam_it_get(it, 0);
1137                 } else
1138                         result = +1;
1139         }
1140         if (result > 0)
1141                 result &= ~IAM_LOOKUP_LAST;
1142
1143         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1144         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1145                     it_keycmp(it, k) <= 0));
1146         return result;
1147 }
1148
1149 /*
1150  * Attach iterator by index key.
1151  */
1152 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1153 {
1154         assert_corr(it_state(it) == IAM_IT_DETACHED);
1155
1156         it->ii_path.ip_ikey_target = k;
1157         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1158 }
1159
1160 /*
1161  * Attach iterator, and assure it points to the record (not skewed).
1162  *
1163  * Return value: 0: positioned on existing record,
1164  *             +ve: exact position found,
1165  *             -ve: error.
1166  *
1167  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1168  *                !(it->ii_flags&IAM_IT_WRITE)
1169  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1170  */
1171 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1172 {
1173         int result;
1174
1175         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1176                     !(it->ii_flags&IAM_IT_WRITE));
1177         result = iam_it_get(it, k);
1178         if (result == 0) {
1179                 if (it_state(it) != IAM_IT_ATTACHED) {
1180                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1181                         result = iam_it_next(it);
1182                 }
1183         }
1184         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1185         return result;
1186 }
1187
1188 /*
1189  * Duplicates iterator.
1190  *
1191  * postcondition: it_state(dst) == it_state(src) &&
1192  *                iam_it_container(dst) == iam_it_container(src) &&
1193  *                dst->ii_flags = src->ii_flags &&
1194  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1195  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1196  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1197  */
1198 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1199 {
1200         dst->ii_flags = src->ii_flags;
1201         dst->ii_state = src->ii_state;
1202         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1203         /*
1204          * XXX: duplicate lock.
1205          */
1206         assert_corr(it_state(dst) == it_state(src));
1207         assert_corr(iam_it_container(dst) == iam_it_container(src));
1208         assert_corr(dst->ii_flags = src->ii_flags);
1209         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1210                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1211                     iam_it_key_get(dst) == iam_it_key_get(src)));
1212 }
1213
1214 /*
1215  * Detach iterator. Does nothing it detached state.
1216  *
1217  * postcondition: it_state(it) == IAM_IT_DETACHED
1218  */
1219 void iam_it_put(struct iam_iterator *it)
1220 {
1221         if (it->ii_state != IAM_IT_DETACHED) {
1222                 it->ii_state = IAM_IT_DETACHED;
1223                 iam_leaf_fini(&it->ii_path.ip_leaf);
1224         }
1225 }
1226
1227 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1228                                         struct iam_ikey *ikey);
1229
1230
1231 /*
1232  * This function increments the frame pointer to search the next leaf
1233  * block, and reads in the necessary intervening nodes if the search
1234  * should be necessary.  Whether or not the search is necessary is
1235  * controlled by the hash parameter.  If the hash value is even, then
1236  * the search is only continued if the next block starts with that
1237  * hash value.  This is used if we are searching for a specific file.
1238  *
1239  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1240  *
1241  * This function returns 1 if the caller should continue to search,
1242  * or 0 if it should not.  If there is an error reading one of the
1243  * index blocks, it will a negative error code.
1244  *
1245  * If start_hash is non-null, it will be filled in with the starting
1246  * hash of the next page.
1247  */
1248 static int iam_htree_advance(struct inode *dir, __u32 hash,
1249                               struct iam_path *path, __u32 *start_hash,
1250                               int compat)
1251 {
1252         struct iam_frame *p;
1253         struct buffer_head *bh;
1254         int err, num_frames = 0;
1255         __u32 bhash;
1256
1257         p = path->ip_frame;
1258         /*
1259          * Find the next leaf page by incrementing the frame pointer.
1260          * If we run out of entries in the interior node, loop around and
1261          * increment pointer in the parent node.  When we break out of
1262          * this loop, num_frames indicates the number of interior
1263          * nodes need to be read.
1264          */
1265         while (1) {
1266                 do_corr(schedule());
1267                 iam_lock_bh(p->bh);
1268                 if (p->at_shifted)
1269                         p->at_shifted = 0;
1270                 else
1271                         p->at = iam_entry_shift(path, p->at, +1);
1272                 if (p->at < iam_entry_shift(path, p->entries,
1273                                             dx_get_count(p->entries))) {
1274                         p->leaf = dx_get_block(path, p->at);
1275                         iam_unlock_bh(p->bh);
1276                         break;
1277                 }
1278                 iam_unlock_bh(p->bh);
1279                 if (p == path->ip_frames)
1280                         return 0;
1281                 num_frames++;
1282                 --p;
1283         }
1284
1285         if (compat) {
1286                 /*
1287                  * Htree hash magic.
1288                  */
1289
1290                 /*
1291                  * If the hash is 1, then continue only if the next page has a
1292                  * continuation hash of any value.  This is used for readdir
1293                  * handling.  Otherwise, check to see if the hash matches the
1294                  * desired contiuation hash.  If it doesn't, return since
1295                  * there's no point to read in the successive index pages.
1296                  */
1297                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1298                 if (start_hash)
1299                         *start_hash = bhash;
1300                 if ((hash & 1) == 0) {
1301                         if ((bhash & ~1) != hash)
1302                                 return 0;
1303                 }
1304         }
1305         /*
1306          * If the hash is HASH_NB_ALWAYS, we always go to the next
1307          * block so no check is necessary
1308          */
1309         while (num_frames--) {
1310                 iam_ptr_t idx;
1311
1312                 do_corr(schedule());
1313                 iam_lock_bh(p->bh);
1314                 idx = p->leaf = dx_get_block(path, p->at);
1315                 iam_unlock_bh(p->bh);
1316                 err = iam_path_descr(path)->id_ops->
1317                         id_node_read(path->ip_container, idx, NULL, &bh);
1318                 if (err != 0)
1319                         return err; /* Failure */
1320                 ++p;
1321                 brelse(p->bh);
1322                 assert_corr(p->bh != bh);
1323                 p->bh = bh;
1324                 p->entries = dx_node_get_entries(path, p);
1325                 p->at = iam_entry_shift(path, p->entries, !compat);
1326                 assert_corr(p->curidx != idx);
1327                 p->curidx = idx;
1328                 iam_lock_bh(p->bh);
1329                 assert_corr(p->leaf != dx_get_block(path, p->at));
1330                 p->leaf = dx_get_block(path, p->at);
1331                 iam_unlock_bh(p->bh);
1332                 assert_inv(dx_node_check(path, p));
1333         }
1334         return 1;
1335 }
1336
1337 static inline int iam_index_advance(struct iam_path *path)
1338 {
1339         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1340 }
1341
1342 static void iam_unlock_array(struct iam_container *ic,
1343                              struct dynlock_handle **lh)
1344 {
1345         int i;
1346
1347         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1348                 if (*lh != NULL) {
1349                         iam_unlock_htree(ic, *lh);
1350                         *lh = NULL;
1351                 }
1352         }
1353 }
1354 /*
1355  * Advance index part of @path to point to the next leaf. Returns 1 on
1356  * success, 0, when end of container was reached. Leaf node is locked.
1357  */
1358 int iam_index_next(struct iam_container *c, struct iam_path *path)
1359 {
1360         iam_ptr_t cursor;
1361         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1362         int result;
1363
1364         /*
1365          * Locking for iam_index_next()... is to be described.
1366          */
1367
1368         cursor = path->ip_frame->leaf;
1369
1370         while (1) {
1371                 result = iam_index_lock(path, lh);
1372                 do_corr(schedule());
1373                 if (result < 0)
1374                         break;
1375
1376                 result = iam_check_full_path(path, 0);
1377                 if (result == 0 && cursor == path->ip_frame->leaf) {
1378                         result = iam_index_advance(path);
1379
1380                         assert_corr(result == 0 ||
1381                                     cursor != path->ip_frame->leaf);
1382                         break;
1383                 }
1384                 do {
1385                         iam_unlock_array(c, lh);
1386
1387                         iam_path_release(path);
1388                         do_corr(schedule());
1389
1390                         result = __iam_path_lookup(path);
1391                         if (result < 0)
1392                                 break;
1393
1394                         while (path->ip_frame->leaf != cursor) {
1395                                 do_corr(schedule());
1396
1397                                 result = iam_index_lock(path, lh);
1398                                 do_corr(schedule());
1399                                 if (result < 0)
1400                                         break;
1401
1402                                 result = iam_check_full_path(path, 0);
1403                                 if (result != 0)
1404                                         break;
1405
1406                                 result = iam_index_advance(path);
1407                                 if (result == 0) {
1408                                         CERROR("cannot find cursor : %u\n",
1409                                                 cursor);
1410                                         result = -EIO;
1411                                 }
1412                                 if (result < 0)
1413                                         break;
1414                                 result = iam_check_full_path(path, 0);
1415                                 if (result != 0)
1416                                         break;
1417                                 iam_unlock_array(c, lh);
1418                         }
1419                 } while (result == -EAGAIN);
1420                 if (result < 0)
1421                         break;
1422         }
1423         iam_unlock_array(c, lh);
1424         return result;
1425 }
1426
1427 /*
1428  * Move iterator one record right.
1429  *
1430  * Return value: 0: success,
1431  *              +1: end of container reached
1432  *             -ve: error
1433  *
1434  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1435  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1436  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1437  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1438  */
1439 int iam_it_next(struct iam_iterator *it)
1440 {
1441         int result;
1442         struct iam_path *path;
1443         struct iam_leaf *leaf;
1444
1445         do_corr(struct iam_ikey *ik_orig);
1446
1447         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1448         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1449                     it_state(it) == IAM_IT_SKEWED);
1450
1451         path = &it->ii_path;
1452         leaf = &path->ip_leaf;
1453
1454         assert_corr(iam_leaf_is_locked(leaf));
1455
1456         result = 0;
1457         do_corr(ik_orig = it_at_rec(it) ?
1458                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1459         if (it_before(it)) {
1460                 assert_corr(!iam_leaf_at_end(leaf));
1461                 it->ii_state = IAM_IT_ATTACHED;
1462         } else {
1463                 if (!iam_leaf_at_end(leaf))
1464                         /* advance within leaf node */
1465                         iam_leaf_next(leaf);
1466                 /*
1467                  * multiple iterations may be necessary due to empty leaves.
1468                  */
1469                 while (result == 0 && iam_leaf_at_end(leaf)) {
1470                         do_corr(schedule());
1471                         /* advance index portion of the path */
1472                         result = iam_index_next(iam_it_container(it), path);
1473                         assert_corr(iam_leaf_is_locked(leaf));
1474                         if (result == 1) {
1475                                 struct dynlock_handle *lh;
1476                                 lh = iam_lock_htree(iam_it_container(it),
1477                                                     path->ip_frame->leaf,
1478                                                     DLT_WRITE);
1479                                 if (lh != NULL) {
1480                                         iam_leaf_fini(leaf);
1481                                         leaf->il_lock = lh;
1482                                         result = iam_leaf_load(path);
1483                                         if (result == 0)
1484                                                 iam_leaf_start(leaf);
1485                                 } else
1486                                         result = -ENOMEM;
1487                         } else if (result == 0)
1488                                 /* end of container reached */
1489                                 result = +1;
1490                         if (result != 0)
1491                                 iam_it_put(it);
1492                 }
1493                 if (result == 0)
1494                         it->ii_state = IAM_IT_ATTACHED;
1495         }
1496         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1497         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1498         assert_corr(ergo(result == 0 && ik_orig != NULL,
1499                     it_ikeycmp(it, ik_orig) >= 0));
1500         return result;
1501 }
1502
1503 /*
1504  * Return pointer to the record under iterator.
1505  *
1506  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1507  * postcondition: it_state(it) == IAM_IT_ATTACHED
1508  */
1509 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1510 {
1511         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1512         assert_corr(it_at_rec(it));
1513         return iam_leaf_rec(&it->ii_path.ip_leaf);
1514 }
1515
1516 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1517 {
1518         struct iam_leaf *folio;
1519
1520         folio = &it->ii_path.ip_leaf;
1521         iam_leaf_ops(folio)->rec_set(folio, r);
1522 }
1523
1524 /*
1525  * Replace contents of record under iterator.
1526  *
1527  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1528  *                it->ii_flags&IAM_IT_WRITE
1529  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1530  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1531  */
1532 int iam_it_rec_set(handle_t *h,
1533                    struct iam_iterator *it, const struct iam_rec *r)
1534 {
1535         int result;
1536         struct iam_path *path;
1537         struct buffer_head *bh;
1538
1539         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1540                     it->ii_flags&IAM_IT_WRITE);
1541         assert_corr(it_at_rec(it));
1542
1543         path = &it->ii_path;
1544         bh = path->ip_leaf.il_bh;
1545         result = iam_txn_add(h, path, bh);
1546         if (result == 0) {
1547                 iam_it_reccpy(it, r);
1548                 result = iam_txn_dirty(h, path, bh);
1549         }
1550         return result;
1551 }
1552
1553 /*
1554  * Return pointer to the index key under iterator.
1555  *
1556  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1557  *                it_state(it) == IAM_IT_SKEWED
1558  */
1559 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1560                                         struct iam_ikey *ikey)
1561 {
1562         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1563                     it_state(it) == IAM_IT_SKEWED);
1564         assert_corr(it_at_rec(it));
1565         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1566 }
1567
1568 /*
1569  * Return pointer to the key under iterator.
1570  *
1571  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1572  *                it_state(it) == IAM_IT_SKEWED
1573  */
1574 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1575 {
1576         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1577                     it_state(it) == IAM_IT_SKEWED);
1578         assert_corr(it_at_rec(it));
1579         return iam_leaf_key(&it->ii_path.ip_leaf);
1580 }
1581
1582 /*
1583  * Return size of key under iterator (in bytes)
1584  *
1585  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1586  *                it_state(it) == IAM_IT_SKEWED
1587  */
1588 int iam_it_key_size(const struct iam_iterator *it)
1589 {
1590         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1591                     it_state(it) == IAM_IT_SKEWED);
1592         assert_corr(it_at_rec(it));
1593         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1594 }
1595
1596 static struct buffer_head *
1597 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1598 {
1599         struct inode *inode = c->ic_object;
1600         struct buffer_head *bh = NULL;
1601         struct iam_idle_head *head;
1602         struct buffer_head *idle;
1603         __u32 *idle_blocks;
1604         __u16 count;
1605
1606         if (c->ic_idle_bh == NULL)
1607                 goto newblock;
1608
1609         mutex_lock(&c->ic_idle_mutex);
1610         if (unlikely(c->ic_idle_bh == NULL)) {
1611                 mutex_unlock(&c->ic_idle_mutex);
1612                 goto newblock;
1613         }
1614
1615         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1616         count = le16_to_cpu(head->iih_count);
1617         if (count > 0) {
1618                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1619                 if (*e != 0)
1620                         goto fail;
1621
1622                 --count;
1623                 *b = le32_to_cpu(head->iih_blks[count]);
1624                 head->iih_count = cpu_to_le16(count);
1625                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1626                 if (*e != 0)
1627                         goto fail;
1628
1629                 mutex_unlock(&c->ic_idle_mutex);
1630                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1631                 if (IS_ERR_OR_NULL(bh)) {
1632                         if (IS_ERR(bh))
1633                                 *e = PTR_ERR(bh);
1634                         else
1635                                 *e = -EIO;
1636                         return NULL;
1637                 }
1638                 goto got;
1639         }
1640
1641         /* The block itself which contains the iam_idle_head is
1642          * also an idle block, and can be used as the new node. */
1643         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1644                                 c->ic_descr->id_root_gap +
1645                                 sizeof(struct dx_countlimit));
1646         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1647         if (*e != 0)
1648                 goto fail;
1649
1650         *b = le32_to_cpu(*idle_blocks);
1651         iam_lock_bh(c->ic_root_bh);
1652         *idle_blocks = head->iih_next;
1653         iam_unlock_bh(c->ic_root_bh);
1654         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1655         if (*e != 0) {
1656                 iam_lock_bh(c->ic_root_bh);
1657                 *idle_blocks = cpu_to_le32(*b);
1658                 iam_unlock_bh(c->ic_root_bh);
1659                 goto fail;
1660         }
1661
1662         bh = c->ic_idle_bh;
1663         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1664         if (idle != NULL && IS_ERR(idle)) {
1665                 *e = PTR_ERR(idle);
1666                 c->ic_idle_bh = NULL;
1667                 brelse(bh);
1668                 goto fail;
1669         }
1670
1671         c->ic_idle_bh = idle;
1672         mutex_unlock(&c->ic_idle_mutex);
1673
1674 got:
1675         /* get write access for the found buffer head */
1676         *e = ldiskfs_journal_get_write_access(h, bh);
1677         if (*e != 0) {
1678                 brelse(bh);
1679                 bh = NULL;
1680                 ldiskfs_std_error(inode->i_sb, *e);
1681         } else {
1682                 /* Clear the reused node as new node does. */
1683                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1684                 set_buffer_uptodate(bh);
1685         }
1686         return bh;
1687
1688 newblock:
1689         bh = osd_ldiskfs_append(h, inode, b);
1690         if (IS_ERR(bh)) {
1691                 *e = PTR_ERR(bh);
1692                 bh = NULL;
1693         }
1694
1695         return bh;
1696
1697 fail:
1698         mutex_unlock(&c->ic_idle_mutex);
1699         ldiskfs_std_error(inode->i_sb, *e);
1700         return NULL;
1701 }
1702
1703 /*
1704  * Insertion of new record. Interaction with jbd during non-trivial case (when
1705  * split happens) is as following:
1706  *
1707  *  - new leaf node is involved into transaction by iam_new_node();
1708  *
1709  *  - old leaf node is involved into transaction by iam_add_rec();
1710  *
1711  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1712  *
1713  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1714  *  iam_new_leaf();
1715  *
1716  *  - split index nodes are involved into transaction and marked dirty by
1717  *  split_index_node().
1718  *
1719  *  - "safe" index node, which is no split, but where new pointer is inserted
1720  *  is involved into transaction and marked dirty by split_index_node().
1721  *
1722  *  - index node where pointer to new leaf is inserted is involved into
1723  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1724  *
1725  *  - inode is marked dirty by iam_add_rec().
1726  *
1727  */
1728
1729 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1730 {
1731         int err;
1732         iam_ptr_t blknr;
1733         struct buffer_head *new_leaf;
1734         struct buffer_head *old_leaf;
1735         struct iam_container *c;
1736         struct inode *obj;
1737         struct iam_path *path;
1738
1739         c = iam_leaf_container(leaf);
1740         path = leaf->il_path;
1741
1742         obj = c->ic_object;
1743         new_leaf = iam_new_node(handle, c, &blknr, &err);
1744         do_corr(schedule());
1745         if (new_leaf != NULL) {
1746                 struct dynlock_handle *lh;
1747
1748                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1749                 do_corr(schedule());
1750                 if (lh != NULL) {
1751                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1752                         do_corr(schedule());
1753                         old_leaf = leaf->il_bh;
1754                         iam_leaf_split(leaf, &new_leaf, blknr);
1755                         if (old_leaf != leaf->il_bh) {
1756                                 /*
1757                                  * Switched to the new leaf.
1758                                  */
1759                                 iam_leaf_unlock(leaf);
1760                                 leaf->il_lock = lh;
1761                                 path->ip_frame->leaf = blknr;
1762                         } else
1763                                 iam_unlock_htree(path->ip_container, lh);
1764                         do_corr(schedule());
1765                         err = iam_txn_dirty(handle, path, new_leaf);
1766                         if (err == 0)
1767                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1768                         do_corr(schedule());
1769                 } else
1770                         err = -ENOMEM;
1771                 brelse(new_leaf);
1772         }
1773         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1774         return err;
1775 }
1776
1777 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1778 {
1779         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1780 }
1781
1782 static int iam_shift_entries(struct iam_path *path,
1783                          struct iam_frame *frame, unsigned count,
1784                          struct iam_entry *entries, struct iam_entry *entries2,
1785                          u32 newblock)
1786 {
1787         unsigned count1;
1788         unsigned count2;
1789         int delta;
1790
1791         struct iam_frame *parent = frame - 1;
1792         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1793
1794         delta = dx_index_is_compat(path) ? 0 : +1;
1795
1796         count1 = count/2 + delta;
1797         count2 = count - count1;
1798         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1799
1800         dxtrace(printk("Split index %d/%d\n", count1, count2));
1801
1802         memcpy((char *) iam_entry_shift(path, entries2, delta),
1803                (char *) iam_entry_shift(path, entries, count1),
1804                count2 * iam_entry_size(path));
1805
1806         dx_set_count(entries2, count2 + delta);
1807         dx_set_limit(entries2, dx_node_limit(path));
1808
1809         /*
1810          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1811          * level index in root index, then we insert new index here and set
1812          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1813          * index w/o hash it looks for. the solution is to check root index
1814          * after we locked just founded 2nd level index -bzzz
1815          */
1816         iam_insert_key_lock(path, parent, pivot, newblock);
1817
1818         /*
1819          * now old and new 2nd level index blocks contain all pointers, so
1820          * dx_probe() may find it in the both.  it's OK -bzzz
1821          */
1822         iam_lock_bh(frame->bh);
1823         dx_set_count(entries, count1);
1824         iam_unlock_bh(frame->bh);
1825
1826         /*
1827          * now old 2nd level index block points to first half of leafs. it's
1828          * importand that dx_probe() must check root index block for changes
1829          * under dx_lock_bh(frame->bh) -bzzz
1830          */
1831
1832         return count1;
1833 }
1834
1835
1836 int split_index_node(handle_t *handle, struct iam_path *path,
1837                      struct dynlock_handle **lh)
1838 {
1839         struct iam_entry *entries;   /* old block contents */
1840         struct iam_entry *entries2;  /* new block contents */
1841         struct iam_frame *frame, *safe;
1842         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1843         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1844         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1845         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1846         struct inode *dir = iam_path_obj(path);
1847         struct iam_descr *descr;
1848         int nr_splet;
1849         int i, err;
1850
1851         descr = iam_path_descr(path);
1852         /*
1853          * Algorithm below depends on this.
1854          */
1855         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1856
1857         frame = path->ip_frame;
1858         entries = frame->entries;
1859
1860         /*
1861          * Tall-tree handling: we might have to split multiple index blocks
1862          * all the way up to tree root. Tricky point here is error handling:
1863          * to avoid complicated undo/rollback we
1864          *
1865          *   - first allocate all necessary blocks
1866          *
1867          *   - insert pointers into them atomically.
1868          */
1869
1870         /*
1871          * Locking: leaf is already locked. htree-locks are acquired on all
1872          * index nodes that require split bottom-to-top, on the "safe" node,
1873          * and on all new nodes
1874          */
1875
1876         dxtrace(printk("using %u of %u node entries\n",
1877                        dx_get_count(entries), dx_get_limit(entries)));
1878
1879         /* What levels need split? */
1880         for (nr_splet = 0; frame >= path->ip_frames &&
1881              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1882              --frame, ++nr_splet) {
1883                 do_corr(schedule());
1884                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1885                         /*
1886                          * CWARN(dir->i_sb, __FUNCTION__,
1887                          * "Directory index full!\n");
1888                          */
1889                         err = -ENOSPC;
1890                         goto cleanup;
1891                 }
1892         }
1893
1894         safe = frame;
1895
1896         /*
1897          * Lock all nodes, bottom to top.
1898          */
1899         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1900                 do_corr(schedule());
1901                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1902                                          DLT_WRITE);
1903                 if (lock[i] == NULL) {
1904                         err = -ENOMEM;
1905                         goto cleanup;
1906                 }
1907         }
1908
1909         /*
1910          * Check for concurrent index modification.
1911          */
1912         err = iam_check_full_path(path, 1);
1913         if (err)
1914                 goto cleanup;
1915         /*
1916          * And check that the same number of nodes is to be split.
1917          */
1918         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1919              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1920              --frame, ++i) {
1921                 ;
1922         }
1923         if (i != nr_splet) {
1924                 err = -EAGAIN;
1925                 goto cleanup;
1926         }
1927
1928         /*
1929          * Go back down, allocating blocks, locking them, and adding into
1930          * transaction...
1931          */
1932         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1933                 bh_new[i] = iam_new_node(handle, path->ip_container,
1934                                          &newblock[i], &err);
1935                 do_corr(schedule());
1936                 if (!bh_new[i] ||
1937                     descr->id_ops->id_node_init(path->ip_container,
1938                                                 bh_new[i], 0) != 0)
1939                         goto cleanup;
1940
1941                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1942                                              DLT_WRITE);
1943                 if (new_lock[i] == NULL) {
1944                         err = -ENOMEM;
1945                         goto cleanup;
1946                 }
1947                 do_corr(schedule());
1948                 BUFFER_TRACE(frame->bh, "get_write_access");
1949                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1950                 if (err)
1951                         goto journal_error;
1952         }
1953         /* Add "safe" node to transaction too */
1954         if (safe + 1 != path->ip_frames) {
1955                 do_corr(schedule());
1956                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1957                 if (err)
1958                         goto journal_error;
1959         }
1960
1961         /* Go through nodes once more, inserting pointers */
1962         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1963                 unsigned count;
1964                 int idx;
1965                 struct buffer_head *bh2;
1966                 struct buffer_head *bh;
1967
1968                 entries = frame->entries;
1969                 count = dx_get_count(entries);
1970                 idx = iam_entry_diff(path, frame->at, entries);
1971
1972                 bh2 = bh_new[i];
1973                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1974
1975                 bh = frame->bh;
1976                 if (frame == path->ip_frames) {
1977                         /* splitting root node. Tricky point:
1978                          *
1979                          * In the "normal" B-tree we'd split root *and* add
1980                          * new root to the tree with pointers to the old root
1981                          * and its sibling (thus introducing two new nodes).
1982                          *
1983                          * In htree it's enough to add one node, because
1984                          * capacity of the root node is smaller than that of
1985                          * non-root one.
1986                          */
1987                         struct iam_frame *frames;
1988                         struct iam_entry *next;
1989
1990                         assert_corr(i == 0);
1991
1992                         do_corr(schedule());
1993
1994                         frames = path->ip_frames;
1995                         memcpy((char *) entries2, (char *) entries,
1996                                count * iam_entry_size(path));
1997                         dx_set_limit(entries2, dx_node_limit(path));
1998
1999                         /* Set up root */
2000                         iam_lock_bh(frame->bh);
2001                         next = descr->id_ops->id_root_inc(path->ip_container,
2002                                                           path, frame);
2003                         dx_set_block(path, next, newblock[0]);
2004                         iam_unlock_bh(frame->bh);
2005
2006                         do_corr(schedule());
2007                         /* Shift frames in the path */
2008                         memmove(frames + 2, frames + 1,
2009                                (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2010                         /* Add new access path frame */
2011                         frames[1].at = iam_entry_shift(path, entries2, idx);
2012                         frames[1].entries = entries = entries2;
2013                         frames[1].bh = bh2;
2014                         assert_inv(dx_node_check(path, frame));
2015                         ++ path->ip_frame;
2016                         ++ frame;
2017                         assert_inv(dx_node_check(path, frame));
2018                         bh_new[0] = NULL; /* buffer head is "consumed" */
2019                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2020                         if (err)
2021                                 goto journal_error;
2022                         do_corr(schedule());
2023                 } else {
2024                         /* splitting non-root index node. */
2025                         struct iam_frame *parent = frame - 1;
2026
2027                         do_corr(schedule());
2028                         count = iam_shift_entries(path, frame, count,
2029                                                 entries, entries2, newblock[i]);
2030                         /* Which index block gets the new entry? */
2031                         if (idx >= count) {
2032                                 int d = dx_index_is_compat(path) ? 0 : +1;
2033
2034                                 frame->at = iam_entry_shift(path, entries2,
2035                                                             idx - count + d);
2036                                 frame->entries = entries = entries2;
2037                                 frame->curidx = newblock[i];
2038                                 swap(frame->bh, bh2);
2039                                 assert_corr(lock[i + 1] != NULL);
2040                                 assert_corr(new_lock[i] != NULL);
2041                                 swap(lock[i + 1], new_lock[i]);
2042                                 bh_new[i] = bh2;
2043                                 parent->at = iam_entry_shift(path,
2044                                                              parent->at, +1);
2045                         }
2046                         assert_inv(dx_node_check(path, frame));
2047                         assert_inv(dx_node_check(path, parent));
2048                         dxtrace(dx_show_index("node", frame->entries));
2049                         dxtrace(dx_show_index("node",
2050                                 ((struct dx_node *) bh2->b_data)->entries));
2051                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2052                         if (err)
2053                                 goto journal_error;
2054                         do_corr(schedule());
2055                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2056                                                             parent->bh);
2057                         if (err)
2058                                 goto journal_error;
2059                 }
2060                 do_corr(schedule());
2061                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2062                 if (err)
2063                         goto journal_error;
2064         }
2065                 /*
2066                  * This function was called to make insertion of new leaf
2067                  * possible. Check that it fulfilled its obligations.
2068                  */
2069                 assert_corr(dx_get_count(path->ip_frame->entries) <
2070                             dx_get_limit(path->ip_frame->entries));
2071         assert_corr(lock[nr_splet] != NULL);
2072         *lh = lock[nr_splet];
2073         lock[nr_splet] = NULL;
2074         if (nr_splet > 0) {
2075                 /*
2076                  * Log ->i_size modification.
2077                  */
2078                 err = ldiskfs_mark_inode_dirty(handle, dir);
2079                 if (err)
2080                         goto journal_error;
2081         }
2082         goto cleanup;
2083 journal_error:
2084         ldiskfs_std_error(dir->i_sb, err);
2085
2086 cleanup:
2087         iam_unlock_array(path->ip_container, lock);
2088         iam_unlock_array(path->ip_container, new_lock);
2089
2090         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2091
2092         do_corr(schedule());
2093         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2094                 if (bh_new[i] != NULL)
2095                         brelse(bh_new[i]);
2096         }
2097         return err;
2098 }
2099
2100 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2101                        struct iam_path *path,
2102                        const struct iam_key *k, const struct iam_rec *r)
2103 {
2104         int err;
2105         struct iam_leaf *leaf;
2106
2107         leaf = &path->ip_leaf;
2108         assert_inv(iam_path_check(path));
2109         err = iam_txn_add(handle, path, leaf->il_bh);
2110         if (err == 0) {
2111                 do_corr(schedule());
2112                 if (!iam_leaf_can_add(leaf, k, r)) {
2113                         struct dynlock_handle *lh = NULL;
2114
2115                         do {
2116                                 assert_corr(lh == NULL);
2117                                 do_corr(schedule());
2118                                 err = split_index_node(handle, path, &lh);
2119                                 if (err == -EAGAIN) {
2120                                         assert_corr(lh == NULL);
2121
2122                                         iam_path_fini(path);
2123                                         it->ii_state = IAM_IT_DETACHED;
2124
2125                                         do_corr(schedule());
2126                                         err = iam_it_get_exact(it, k);
2127                                         if (err == -ENOENT)
2128                                                 err = +1; /* repeat split */
2129                                         else if (err == 0)
2130                                                 err = -EEXIST;
2131                                 }
2132                         } while (err > 0);
2133                         assert_inv(iam_path_check(path));
2134                         if (err == 0) {
2135                                 assert_corr(lh != NULL);
2136                                 do_corr(schedule());
2137                                 err = iam_new_leaf(handle, leaf);
2138                                 if (err == 0)
2139                                         err = iam_txn_dirty(handle, path,
2140                                                             path->ip_frame->bh);
2141                         }
2142                         iam_unlock_htree(path->ip_container, lh);
2143                         do_corr(schedule());
2144                 }
2145                 if (err == 0) {
2146                         iam_leaf_rec_add(leaf, k, r);
2147                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2148                 }
2149         }
2150         assert_inv(iam_path_check(path));
2151         return err;
2152 }
2153
2154 /*
2155  * Insert new record with key @k and contents from @r, shifting records to the
2156  * right. On success, iterator is positioned on the newly inserted record.
2157  *
2158  * precondition: it->ii_flags&IAM_IT_WRITE &&
2159  *               (it_state(it) == IAM_IT_ATTACHED ||
2160  *                it_state(it) == IAM_IT_SKEWED) &&
2161  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2162  *                    it_keycmp(it, k) <= 0) &&
2163  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2164  * postcondition: ergo(result == 0,
2165  *                     it_state(it) == IAM_IT_ATTACHED &&
2166  *                     it_keycmp(it, k) == 0 &&
2167  *                     !memcmp(iam_it_rec_get(it), r, ...))
2168  */
2169 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2170                       const struct iam_key *k, const struct iam_rec *r)
2171 {
2172         int result;
2173         struct iam_path *path;
2174
2175         path = &it->ii_path;
2176
2177         assert_corr(it->ii_flags&IAM_IT_WRITE);
2178         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2179                     it_state(it) == IAM_IT_SKEWED);
2180         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2181                     it_keycmp(it, k) <= 0));
2182         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2183         result = iam_add_rec(h, it, path, k, r);
2184         if (result == 0)
2185                 it->ii_state = IAM_IT_ATTACHED;
2186         assert_corr(ergo(result == 0,
2187                          it_state(it) == IAM_IT_ATTACHED &&
2188                          it_keycmp(it, k) == 0));
2189         return result;
2190 }
2191
2192 static inline int iam_idle_blocks_limit(struct inode *inode)
2193 {
2194         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2195 }
2196
2197 /*
2198  * If the leaf cannnot be recycled, we will lose one block for reusing.
2199  * It is not a serious issue because it almost the same of non-recycle.
2200  */
2201 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2202                                   struct iam_leaf *l, struct buffer_head **bh)
2203 {
2204         struct iam_container *c = p->ip_container;
2205         struct inode *inode = c->ic_object;
2206         struct iam_frame *frame = p->ip_frame;
2207         struct iam_entry *entries;
2208         struct iam_entry *pos;
2209         struct dynlock_handle *lh;
2210         int count;
2211         int rc;
2212
2213         if (c->ic_idle_failed)
2214                 return 0;
2215
2216         if (unlikely(frame == NULL))
2217                 return 0;
2218
2219         if (!iam_leaf_empty(l))
2220                 return 0;
2221
2222         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2223         if (lh == NULL) {
2224                 CWARN("%s: No memory to recycle idle blocks\n",
2225                       osd_ino2name(inode));
2226                 return 0;
2227         }
2228
2229         rc = iam_txn_add(h, p, frame->bh);
2230         if (rc != 0) {
2231                 iam_unlock_htree(c, lh);
2232                 return 0;
2233         }
2234
2235         iam_lock_bh(frame->bh);
2236         entries = frame->entries;
2237         count = dx_get_count(entries);
2238         /*
2239          * NOT shrink the last entry in the index node, which can be reused
2240          * directly by next new node.
2241          */
2242         if (count == 2) {
2243                 iam_unlock_bh(frame->bh);
2244                 iam_unlock_htree(c, lh);
2245                 return 0;
2246         }
2247
2248         pos = iam_find_position(p, frame);
2249         /*
2250          * There may be some new leaf nodes have been added or empty leaf nodes
2251          * have been shrinked during my delete operation.
2252          *
2253          * If the empty leaf is not under current index node because the index
2254          * node has been split, then just skip the empty leaf, which is rare.
2255          */
2256         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2257                 iam_unlock_bh(frame->bh);
2258                 iam_unlock_htree(c, lh);
2259                 return 0;
2260         }
2261
2262         frame->at = pos;
2263         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2264                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2265
2266                 memmove(frame->at, n,
2267                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2268                 frame->at_shifted = 1;
2269         }
2270         dx_set_count(entries, count - 1);
2271         iam_unlock_bh(frame->bh);
2272         rc = iam_txn_dirty(h, p, frame->bh);
2273         iam_unlock_htree(c, lh);
2274         if (rc != 0)
2275                 return 0;
2276
2277         get_bh(l->il_bh);
2278         *bh = l->il_bh;
2279         return frame->leaf;
2280 }
2281
2282 static int
2283 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2284                         __u32 *idle_blocks, iam_ptr_t blk)
2285 {
2286         struct iam_container *c = p->ip_container;
2287         struct buffer_head *old = c->ic_idle_bh;
2288         struct iam_idle_head *head;
2289         int rc;
2290
2291         head = (struct iam_idle_head *)(bh->b_data);
2292         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2293         head->iih_count = 0;
2294         head->iih_next = *idle_blocks;
2295         /* The bh already get_write_accessed. */
2296         rc = iam_txn_dirty(h, p, bh);
2297         if (rc != 0)
2298                 return rc;
2299
2300         rc = iam_txn_add(h, p, c->ic_root_bh);
2301         if (rc != 0)
2302                 return rc;
2303
2304         iam_lock_bh(c->ic_root_bh);
2305         *idle_blocks = cpu_to_le32(blk);
2306         iam_unlock_bh(c->ic_root_bh);
2307         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2308         if (rc == 0) {
2309                 /* NOT release old before new assigned. */
2310                 get_bh(bh);
2311                 c->ic_idle_bh = bh;
2312                 brelse(old);
2313         } else {
2314                 iam_lock_bh(c->ic_root_bh);
2315                 *idle_blocks = head->iih_next;
2316                 iam_unlock_bh(c->ic_root_bh);
2317         }
2318         return rc;
2319 }
2320
2321 /*
2322  * If the leaf cannnot be recycled, we will lose one block for reusing.
2323  * It is not a serious issue because it almost the same of non-recycle.
2324  */
2325 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2326                              struct buffer_head *bh, iam_ptr_t blk)
2327 {
2328         struct iam_container *c = p->ip_container;
2329         struct inode *inode = c->ic_object;
2330         struct iam_idle_head *head;
2331         __u32 *idle_blocks;
2332         int count;
2333         int rc;
2334
2335         mutex_lock(&c->ic_idle_mutex);
2336         if (unlikely(c->ic_idle_failed)) {
2337                 rc = -EFAULT;
2338                 goto unlock;
2339         }
2340
2341         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2342                                 c->ic_descr->id_root_gap +
2343                                 sizeof(struct dx_countlimit));
2344         /* It is the first idle block. */
2345         if (c->ic_idle_bh == NULL) {
2346                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2347                 goto unlock;
2348         }
2349
2350         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2351         count = le16_to_cpu(head->iih_count);
2352         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2353         if (count == iam_idle_blocks_limit(inode)) {
2354                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2355                 goto unlock;
2356         }
2357
2358         /* Just add to ic_idle_bh. */
2359         rc = iam_txn_add(h, p, c->ic_idle_bh);
2360         if (rc != 0)
2361                 goto unlock;
2362
2363         head->iih_blks[count] = cpu_to_le32(blk);
2364         head->iih_count = cpu_to_le16(count + 1);
2365         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2366
2367 unlock:
2368         mutex_unlock(&c->ic_idle_mutex);
2369         if (rc != 0)
2370                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2371                       osd_ino2name(inode), blk);
2372 }
2373
2374 /*
2375  * Delete record under iterator.
2376  *
2377  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2378  *                it->ii_flags&IAM_IT_WRITE &&
2379  *                it_at_rec(it)
2380  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2381  *                it_state(it) == IAM_IT_DETACHED
2382  */
2383 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2384 {
2385         int result;
2386         struct iam_leaf *leaf;
2387         struct iam_path *path;
2388
2389         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2390                     it->ii_flags&IAM_IT_WRITE);
2391         assert_corr(it_at_rec(it));
2392
2393         path = &it->ii_path;
2394         leaf = &path->ip_leaf;
2395
2396         assert_inv(iam_path_check(path));
2397
2398         result = iam_txn_add(h, path, leaf->il_bh);
2399         /*
2400          * no compaction for now.
2401          */
2402         if (result == 0) {
2403                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2404                 result = iam_txn_dirty(h, path, leaf->il_bh);
2405                 if (result == 0 && iam_leaf_at_end(leaf)) {
2406                         struct buffer_head *bh = NULL;
2407                         iam_ptr_t blk;
2408
2409                         blk = iam_index_shrink(h, path, leaf, &bh);
2410                         if (it->ii_flags & IAM_IT_MOVE) {
2411                                 result = iam_it_next(it);
2412                                 if (result > 0)
2413                                         result = 0;
2414                         }
2415
2416                         if (bh != NULL) {
2417                                 iam_recycle_leaf(h, path, bh, blk);
2418                                 brelse(bh);
2419                         }
2420                 }
2421         }
2422         assert_inv(iam_path_check(path));
2423         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2424                     it_state(it) == IAM_IT_DETACHED);
2425         return result;
2426 }
2427
2428 /*
2429  * Convert iterator to cookie.
2430  *
2431  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2432  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2433  * postcondition: it_state(it) == IAM_IT_ATTACHED
2434  */
2435 iam_pos_t iam_it_store(const struct iam_iterator *it)
2436 {
2437         iam_pos_t result;
2438
2439         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2440         assert_corr(it_at_rec(it));
2441         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2442                     sizeof result);
2443
2444         result = 0;
2445         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2446 }
2447
2448 /*
2449  * Restore iterator from cookie.
2450  *
2451  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2452  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2453  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2454  *                                  iam_it_store(it) == pos)
2455  */
2456 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2457 {
2458         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2459                 it->ii_flags&IAM_IT_MOVE);
2460         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2461         return iam_it_iget(it, (struct iam_ikey *)&pos);
2462 }
2463
2464 /***********************************************************************/
2465 /* invariants                                                          */
2466 /***********************************************************************/
2467
2468 static inline int ptr_inside(void *base, size_t size, void *ptr)
2469 {
2470         return (base <= ptr) && (ptr < base + size);
2471 }
2472
2473 static int iam_frame_invariant(struct iam_frame *f)
2474 {
2475         return
2476                 (f->bh != NULL &&
2477                 f->bh->b_data != NULL &&
2478                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2479                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2480                 f->entries <= f->at);
2481 }
2482
2483 static int iam_leaf_invariant(struct iam_leaf *l)
2484 {
2485         return
2486                 l->il_bh != NULL &&
2487                 l->il_bh->b_data != NULL &&
2488                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2489                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2490                 l->il_entries <= l->il_at;
2491 }
2492
2493 static int iam_path_invariant(struct iam_path *p)
2494 {
2495         int i;
2496
2497         if (p->ip_container == NULL ||
2498             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2499             p->ip_frame != p->ip_frames + p->ip_indirect ||
2500             !iam_leaf_invariant(&p->ip_leaf))
2501                 return 0;
2502         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2503                 if (i <= p->ip_indirect) {
2504                         if (!iam_frame_invariant(&p->ip_frames[i]))
2505                                 return 0;
2506                 }
2507         }
2508         return 1;
2509 }
2510
2511 int iam_it_invariant(struct iam_iterator *it)
2512 {
2513         return
2514                 (it->ii_state == IAM_IT_DETACHED ||
2515                 it->ii_state == IAM_IT_ATTACHED ||
2516                 it->ii_state == IAM_IT_SKEWED) &&
2517                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2518                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2519                 it->ii_state == IAM_IT_SKEWED,
2520                 iam_path_invariant(&it->ii_path) &&
2521                 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2522 }
2523
2524 /*
2525  * Search container @c for record with key @k. If record is found, its data
2526  * are moved into @r.
2527  *
2528  * Return values: 0: found, -ENOENT: not-found, -ve: error
2529  */
2530 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2531                struct iam_rec *r, struct iam_path_descr *pd)
2532 {
2533         struct iam_iterator it;
2534         int result;
2535
2536         iam_it_init(&it, c, 0, pd);
2537
2538         result = iam_it_get_exact(&it, k);
2539         if (result == 0)
2540                 /*
2541                  * record with required key found, copy it into user buffer
2542                  */
2543                 iam_reccpy(&it.ii_path.ip_leaf, r);
2544         iam_it_put(&it);
2545         iam_it_fini(&it);
2546         return result;
2547 }
2548
2549 /*
2550  * Insert new record @r with key @k into container @c (within context of
2551  * transaction @h).
2552  *
2553  * Return values: 0: success, -ve: error, including -EEXIST when record with
2554  * given key is already present.
2555  *
2556  * postcondition: ergo(result == 0 || result == -EEXIST,
2557  *                                  iam_lookup(c, k, r2) > 0;
2558  */
2559 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2560                const struct iam_rec *r, struct iam_path_descr *pd)
2561 {
2562         struct iam_iterator it;
2563         int result;
2564
2565         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2566
2567         result = iam_it_get_exact(&it, k);
2568         if (result == -ENOENT)
2569                 result = iam_it_rec_insert(h, &it, k, r);
2570         else if (result == 0)
2571                 result = -EEXIST;
2572         iam_it_put(&it);
2573         iam_it_fini(&it);
2574         return result;
2575 }
2576
2577 /*
2578  * Update record with the key @k in container @c (within context of
2579  * transaction @h), new record is given by @r.
2580  *
2581  * Return values: +1: skip because of the same rec value, 0: success,
2582  * -ve: error, including -ENOENT if no record with the given key found.
2583  */
2584 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2585                const struct iam_rec *r, struct iam_path_descr *pd)
2586 {
2587         struct iam_iterator it;
2588         struct iam_leaf *folio;
2589         int result;
2590
2591         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2592
2593         result = iam_it_get_exact(&it, k);
2594         if (result == 0) {
2595                 folio = &it.ii_path.ip_leaf;
2596                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2597                 if (result == 0)
2598                         iam_it_rec_set(h, &it, r);
2599                 else
2600                         result = 1;
2601         }
2602         iam_it_put(&it);
2603         iam_it_fini(&it);
2604         return result;
2605 }
2606
2607 /*
2608  * Delete existing record with key @k.
2609  *
2610  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2611  *
2612  * postcondition: ergo(result == 0 || result == -ENOENT,
2613  *                                 !iam_lookup(c, k, *));
2614  */
2615 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2616                struct iam_path_descr *pd)
2617 {
2618         struct iam_iterator it;
2619         int result;
2620
2621         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2622
2623         result = iam_it_get_exact(&it, k);
2624         if (result == 0)
2625                 iam_it_rec_delete(h, &it);
2626         iam_it_put(&it);
2627         iam_it_fini(&it);
2628         return result;
2629 }
2630
2631 int iam_root_limit(int rootgap, int blocksize, int size)
2632 {
2633         int limit;
2634         int nlimit;
2635
2636         limit = (blocksize - rootgap) / size;
2637         nlimit = blocksize / size;
2638         if (limit == nlimit)
2639                 limit--;
2640         return limit;
2641 }