Whamcloud - gitweb
LU-8581 osd: misuse of RCU in osd xattr cache
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see [sun.com URL with a
18  * copy of GPLv2].
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * iam.c
37  * Top-level entry points into iam module
38  *
39  * Author: Wang Di <wangdi@clusterfs.com>
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * iam: big theory statement.
45  *
46  * iam (Index Access Module) is a module providing abstraction of persistent
47  * transactional container on top of generalized ldiskfs htree.
48  *
49  * iam supports:
50  *
51  *     - key, pointer, and record size specifiable per container.
52  *
53  *     - trees taller than 2 index levels.
54  *
55  *     - read/write to existing ldiskfs htree directories as iam containers.
56  *
57  * iam container is a tree, consisting of leaf nodes containing keys and
58  * records stored in this container, and index nodes, containing keys and
59  * pointers to leaf or index nodes.
60  *
61  * iam does not work with keys directly, instead it calls user-supplied key
62  * comparison function (->dpo_keycmp()).
63  *
64  * Pointers are (currently) interpreted as logical offsets (measured in
65  * blocksful) within underlying flat file on top of which iam tree lives.
66  *
67  * On-disk format:
68  *
69  * iam mostly tries to reuse existing htree formats.
70  *
71  * Format of index node:
72  *
73  * +-----+-------+-------+-------+------+-------+------------+
74  * |     | count |       |       |      |       |            |
75  * | gap |   /   | entry | entry | .... | entry | free space |
76  * |     | limit |       |       |      |       |            |
77  * +-----+-------+-------+-------+------+-------+------------+
78  *
79  *       gap           this part of node is never accessed by iam code. It
80  *                     exists for binary compatibility with ldiskfs htree (that,
81  *                     in turn, stores fake struct ext2_dirent for ext2
82  *                     compatibility), and to keep some unspecified per-node
83  *                     data. Gap can be different for root and non-root index
84  *                     nodes. Gap size can be specified for each container
85  *                     (gap of 0 is allowed).
86  *
87  *       count/limit   current number of entries in this node, and the maximal
88  *                     number of entries that can fit into node. count/limit
89  *                     has the same size as entry, and is itself counted in
90  *                     count.
91  *
92  *       entry         index entry: consists of a key immediately followed by
93  *                     a pointer to a child node. Size of a key and size of a
94  *                     pointer depends on container. Entry has neither
95  *                     alignment nor padding.
96  *
97  *       free space    portion of node new entries are added to
98  *
99  * Entries in index node are sorted by their key value.
100  *
101  * Format of a leaf node is not specified. Generic iam code accesses leaf
102  * nodes through ->id_leaf methods in struct iam_descr.
103  *
104  * The IAM root block is a special node, which contains the IAM descriptor.
105  * It is on disk format:
106  *
107  * +---------+-------+--------+---------+-------+------+-------+------------+
108  * |IAM desc | count |  idle  |         |       |      |       |            |
109  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
110  * |         | limit |        |         |       |      |       |            |
111  * +---------+-------+--------+---------+-------+------+-------+------------+
112  *
113  * The padding length is calculated with the parameters in the IAM descriptor.
114  *
115  * The field "idle_blocks" is used to record empty leaf nodes, which have not
116  * been released but all contained entries in them have been removed. Usually,
117  * the idle blocks in the IAM should be reused when need to allocate new leaf
118  * nodes for new entries, it depends on the IAM hash functions to map the new
119  * entries to these idle blocks. Unfortunately, it is not easy to design some
120  * hash functions for such clever mapping, especially considering the insert/
121  * lookup performance.
122  *
123  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
124  * idle blocks pool. If need some new leaf node, it will try to take idle block
125  * from such pool with priority, in spite of how the IAM hash functions to map
126  * the entry.
127  *
128  * The idle blocks pool is organized as a series of tables, and each table
129  * can be described as following (on-disk format):
130  *
131  * +---------+---------+---------+---------+------+---------+-------+
132  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
133  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
134  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
135  * +---------+---------+---------+---------+------+---------+-------+
136  *
137  * The logic blk# for the first table is stored in the root node "idle_blocks".
138  *
139  */
140
141 #include <linux/module.h>
142 #include <linux/fs.h>
143 #include <linux/pagemap.h>
144 #include <linux/time.h>
145 #include <linux/fcntl.h>
146 #include <linux/stat.h>
147 #include <linux/string.h>
148 #include <linux/quotaops.h>
149 #include <linux/buffer_head.h>
150
151 #include <ldiskfs/ldiskfs.h>
152 #include <ldiskfs/xattr.h>
153 #undef ENTRY
154
155 #include "osd_internal.h"
156
157 #include <ldiskfs/acl.h>
158
159 /*
160  * List of all registered formats.
161  *
162  * No locking. Callers synchronize.
163  */
164 static struct list_head iam_formats = LIST_HEAD_INIT(iam_formats);
165
166 void iam_format_register(struct iam_format *fmt)
167 {
168         list_add(&fmt->if_linkage, &iam_formats);
169 }
170
171 static struct buffer_head *
172 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
173 {
174         struct inode *inode = c->ic_object;
175         struct iam_idle_head *head;
176         struct buffer_head *bh;
177
178         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
179
180         if (blk == 0)
181                 return NULL;
182
183         bh = __ldiskfs_bread(NULL, inode, blk, 0);
184         if (IS_ERR_OR_NULL(bh)) {
185                 CERROR("%.16s: cannot load idle blocks, blk = %u, err = %ld\n",
186                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
187                        bh ? PTR_ERR(bh) : -EIO);
188                 c->ic_idle_failed = 1;
189                 if (bh == NULL)
190                         bh = ERR_PTR(-EIO);
191                 return bh;
192         }
193
194         head = (struct iam_idle_head *)(bh->b_data);
195         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
196                 CERROR("%.16s: invalid idle block head, blk = %u, magic = %d\n",
197                        LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk,
198                        le16_to_cpu(head->iih_magic));
199                 brelse(bh);
200                 c->ic_idle_failed = 1;
201                 return ERR_PTR(-EBADF);
202         }
203
204         return bh;
205 }
206
207 /*
208  * Determine format of given container. This is done by scanning list of
209  * registered formats and calling ->if_guess() method of each in turn.
210  */
211 static int iam_format_guess(struct iam_container *c)
212 {
213         int result;
214         struct iam_format *fmt;
215
216         /*
217          * XXX temporary initialization hook.
218          */
219         {
220                 static int initialized = 0;
221
222                 if (!initialized) {
223                         iam_lvar_format_init();
224                         iam_lfix_format_init();
225                         initialized = 1;
226                 }
227         }
228
229         result = -ENOENT;
230         list_for_each_entry(fmt, &iam_formats, if_linkage) {
231                 result = fmt->if_guess(c);
232                 if (result == 0)
233                         break;
234         }
235
236         if (result == 0) {
237                 struct buffer_head *bh;
238                 __u32 *idle_blocks;
239
240                 LASSERT(c->ic_root_bh != NULL);
241
242                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
243                                         c->ic_descr->id_root_gap +
244                                         sizeof(struct dx_countlimit));
245                 mutex_lock(&c->ic_idle_mutex);
246                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
247                 if (bh != NULL && IS_ERR(bh))
248                         result = PTR_ERR(bh);
249                 else
250                         c->ic_idle_bh = bh;
251                 mutex_unlock(&c->ic_idle_mutex);
252         }
253
254         return result;
255 }
256
257 /*
258  * Initialize container @c.
259  */
260 int iam_container_init(struct iam_container *c,
261                        struct iam_descr *descr, struct inode *inode)
262 {
263         memset(c, 0, sizeof *c);
264         c->ic_descr  = descr;
265         c->ic_object = inode;
266         init_rwsem(&c->ic_sem);
267         dynlock_init(&c->ic_tree_lock);
268         mutex_init(&c->ic_idle_mutex);
269         return 0;
270 }
271
272 /*
273  * Determine container format.
274  */
275 int iam_container_setup(struct iam_container *c)
276 {
277         return iam_format_guess(c);
278 }
279
280 /*
281  * Finalize container @c, release all resources.
282  */
283 void iam_container_fini(struct iam_container *c)
284 {
285         brelse(c->ic_idle_bh);
286         c->ic_idle_bh = NULL;
287         brelse(c->ic_root_bh);
288         c->ic_root_bh = NULL;
289 }
290
291 void iam_path_init(struct iam_path *path, struct iam_container *c,
292                    struct iam_path_descr *pd)
293 {
294         memset(path, 0, sizeof *path);
295         path->ip_container = c;
296         path->ip_frame = path->ip_frames;
297         path->ip_data = pd;
298         path->ip_leaf.il_path = path;
299 }
300
301 static void iam_leaf_fini(struct iam_leaf *leaf);
302
303 void iam_path_release(struct iam_path *path)
304 {
305         int i;
306
307         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
308                 if (path->ip_frames[i].bh != NULL) {
309                         path->ip_frames[i].at_shifted = 0;
310                         brelse(path->ip_frames[i].bh);
311                         path->ip_frames[i].bh = NULL;
312                 }
313         }
314 }
315
316 void iam_path_fini(struct iam_path *path)
317 {
318         iam_leaf_fini(&path->ip_leaf);
319         iam_path_release(path);
320 }
321
322
323 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
324 {
325         int i;
326
327         path->ipc_hinfo = &path->ipc_hinfo_area;
328         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
329                 path->ipc_descr.ipd_key_scratch[i] =
330                         (struct iam_ikey *)&path->ipc_scratch[i];
331
332         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
333 }
334
335 void iam_path_compat_fini(struct iam_path_compat *path)
336 {
337         iam_path_fini(&path->ipc_path);
338 }
339
340 /*
341  * Helper function initializing iam_path_descr and its key scratch area.
342  */
343 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
344 {
345         struct iam_path_descr *ipd;
346         void *karea;
347         int i;
348
349         ipd = area;
350         karea = ipd + 1;
351         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
352                 ipd->ipd_key_scratch[i] = karea;
353         return ipd;
354 }
355
356 void iam_ipd_free(struct iam_path_descr *ipd)
357 {
358 }
359
360 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
361                   handle_t *h, struct buffer_head **bh)
362 {
363         /* NB: it can be called by iam_lfix_guess() which is still at
364          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
365          * haven't been intialized yet.
366          * Also, we don't have this for IAM dir.
367          */
368         if (c->ic_root_bh != NULL &&
369             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
370                 get_bh(c->ic_root_bh);
371                 *bh = c->ic_root_bh;
372                 return 0;
373         }
374
375         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
376         if (IS_ERR(*bh))
377                 return PTR_ERR(*bh);
378
379         if (*bh == NULL)
380                 return -EIO;
381
382         return 0;
383 }
384
385 /*
386  * Return pointer to current leaf record. Pointer is valid while corresponding
387  * leaf node is locked and pinned.
388  */
389 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
390 {
391         return iam_leaf_ops(leaf)->rec(leaf);
392 }
393
394 /*
395  * Return pointer to the current leaf key. This function returns pointer to
396  * the key stored in node.
397  *
398  * Caller should assume that returned pointer is only valid while leaf node is
399  * pinned and locked.
400  */
401 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
402 {
403         return iam_leaf_ops(leaf)->key(leaf);
404 }
405
406 static int iam_leaf_key_size(const struct iam_leaf *leaf)
407 {
408         return iam_leaf_ops(leaf)->key_size(leaf);
409 }
410
411 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
412                                       struct iam_ikey *key)
413 {
414         return iam_leaf_ops(leaf)->ikey(leaf, key);
415 }
416
417 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
418                            const struct iam_key *key)
419 {
420         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
421 }
422
423 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
424                           const struct iam_key *key)
425 {
426         return iam_leaf_ops(leaf)->key_eq(leaf, key);
427 }
428
429 #if LDISKFS_INVARIANT_ON
430 extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
431
432 static int iam_path_check(struct iam_path *p)
433 {
434         int i;
435         int result;
436         struct iam_frame *f;
437         struct iam_descr *param;
438
439         result = 1;
440         param = iam_path_descr(p);
441         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
442                 f = &p->ip_frames[i];
443                 if (f->bh != NULL) {
444                         result = dx_node_check(p, f);
445                         if (result)
446                                 result = !param->id_ops->id_node_check(p, f);
447                 }
448         }
449         if (result && p->ip_leaf.il_bh != NULL)
450                 result = 1;
451         if (result == 0)
452                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
453
454         return result;
455 }
456 #endif
457
458 static int iam_leaf_load(struct iam_path *path)
459 {
460         iam_ptr_t block;
461         int err;
462         struct iam_container *c;
463         struct buffer_head   *bh;
464         struct iam_leaf      *leaf;
465         struct iam_descr     *descr;
466
467         c     = path->ip_container;
468         leaf  = &path->ip_leaf;
469         descr = iam_path_descr(path);
470         block = path->ip_frame->leaf;
471         if (block == 0) {
472                 /* XXX bug 11027 */
473                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
474                        (long unsigned)path->ip_frame->leaf,
475                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
476                        path->ip_frames[0].bh, path->ip_frames[1].bh,
477                        path->ip_frames[2].bh);
478         }
479         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
480         if (err == 0) {
481                 leaf->il_bh = bh;
482                 leaf->il_curidx = block;
483                 err = iam_leaf_ops(leaf)->init(leaf);
484         }
485         return err;
486 }
487
488 static void iam_unlock_htree(struct iam_container *ic,
489                              struct dynlock_handle *lh)
490 {
491         if (lh != NULL)
492                 dynlock_unlock(&ic->ic_tree_lock, lh);
493 }
494
495
496 static void iam_leaf_unlock(struct iam_leaf *leaf)
497 {
498         if (leaf->il_lock != NULL) {
499                 iam_unlock_htree(iam_leaf_container(leaf),
500                                  leaf->il_lock);
501                 do_corr(schedule());
502                 leaf->il_lock = NULL;
503         }
504 }
505
506 static void iam_leaf_fini(struct iam_leaf *leaf)
507 {
508         if (leaf->il_path != NULL) {
509                 iam_leaf_unlock(leaf);
510                 iam_leaf_ops(leaf)->fini(leaf);
511                 if (leaf->il_bh) {
512                         brelse(leaf->il_bh);
513                         leaf->il_bh = NULL;
514                         leaf->il_curidx = 0;
515                 }
516         }
517 }
518
519 static void iam_leaf_start(struct iam_leaf *folio)
520 {
521         iam_leaf_ops(folio)->start(folio);
522 }
523
524 void iam_leaf_next(struct iam_leaf *folio)
525 {
526         iam_leaf_ops(folio)->next(folio);
527 }
528
529 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
530                              const struct iam_rec *rec)
531 {
532         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
533 }
534
535 static void iam_rec_del(struct iam_leaf *leaf, int shift)
536 {
537         iam_leaf_ops(leaf)->rec_del(leaf, shift);
538 }
539
540 int iam_leaf_at_end(const struct iam_leaf *leaf)
541 {
542         return iam_leaf_ops(leaf)->at_end(leaf);
543 }
544
545 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
546                            iam_ptr_t nr)
547 {
548         iam_leaf_ops(l)->split(l, bh, nr);
549 }
550
551 static inline int iam_leaf_empty(struct iam_leaf *l)
552 {
553         return iam_leaf_ops(l)->leaf_empty(l);
554 }
555
556 int iam_leaf_can_add(const struct iam_leaf *l,
557                      const struct iam_key *k, const struct iam_rec *r)
558 {
559         return iam_leaf_ops(l)->can_add(l, k, r);
560 }
561
562 static int iam_txn_dirty(handle_t *handle,
563                          struct iam_path *path, struct buffer_head *bh)
564 {
565         int result;
566
567         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
568         if (result != 0)
569                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
570         return result;
571 }
572
573 static int iam_txn_add(handle_t *handle,
574                        struct iam_path *path, struct buffer_head *bh)
575 {
576         int result;
577
578         result = ldiskfs_journal_get_write_access(handle, bh);
579         if (result != 0)
580                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
581         return result;
582 }
583
584 /***********************************************************************/
585 /* iterator interface                                                  */
586 /***********************************************************************/
587
588 static enum iam_it_state it_state(const struct iam_iterator *it)
589 {
590         return it->ii_state;
591 }
592
593 /*
594  * Helper function returning scratch key.
595  */
596 static struct iam_container *iam_it_container(const struct iam_iterator *it)
597 {
598         return it->ii_path.ip_container;
599 }
600
601 static inline int it_keycmp(const struct iam_iterator *it,
602                             const struct iam_key *k)
603 {
604         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
605 }
606
607 static inline int it_keyeq(const struct iam_iterator *it,
608                            const struct iam_key *k)
609 {
610         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
611 }
612
613 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
614 {
615         return iam_ikeycmp(it->ii_path.ip_container,
616                            iam_leaf_ikey(&it->ii_path.ip_leaf,
617                                          iam_path_ikey(&it->ii_path, 0)), ik);
618 }
619
620 static inline int it_at_rec(const struct iam_iterator *it)
621 {
622         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
623 }
624
625 static inline int it_before(const struct iam_iterator *it)
626 {
627         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
628 }
629
630 /*
631  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
632  * with exactly the same key as asked is found.
633  */
634 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
635 {
636         int result;
637
638         result = iam_it_get(it, k);
639         if (result > 0)
640                 result = 0;
641         else if (result == 0)
642                 /*
643                  * Return -ENOENT if cursor is located above record with a key
644                  * different from one specified, or in the empty leaf.
645                  *
646                  * XXX returning -ENOENT only works if iam_it_get() never
647                  * returns -ENOENT as a legitimate error.
648                  */
649                 result = -ENOENT;
650         return result;
651 }
652
653 void iam_container_write_lock(struct iam_container *ic)
654 {
655         down_write(&ic->ic_sem);
656 }
657
658 void iam_container_write_unlock(struct iam_container *ic)
659 {
660         up_write(&ic->ic_sem);
661 }
662
663 void iam_container_read_lock(struct iam_container *ic)
664 {
665         down_read(&ic->ic_sem);
666 }
667
668 void iam_container_read_unlock(struct iam_container *ic)
669 {
670         up_read(&ic->ic_sem);
671 }
672
673 /*
674  * Initialize iterator to IAM_IT_DETACHED state.
675  *
676  * postcondition: it_state(it) == IAM_IT_DETACHED
677  */
678 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
679                  struct iam_path_descr *pd)
680 {
681         memset(it, 0, sizeof *it);
682         it->ii_flags  = flags;
683         it->ii_state  = IAM_IT_DETACHED;
684         iam_path_init(&it->ii_path, c, pd);
685         return 0;
686 }
687
688 /*
689  * Finalize iterator and release all resources.
690  *
691  * precondition: it_state(it) == IAM_IT_DETACHED
692  */
693 void iam_it_fini(struct iam_iterator *it)
694 {
695         assert_corr(it_state(it) == IAM_IT_DETACHED);
696         iam_path_fini(&it->ii_path);
697 }
698
699 /*
700  * this locking primitives are used to protect parts
701  * of dir's htree. protection unit is block: leaf or index
702  */
703 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
704                                              unsigned long value,
705                                              enum dynlock_type lt)
706 {
707         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
708 }
709
710 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
711 {
712         struct iam_frame *f;
713
714         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
715                 do_corr(schedule());
716                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
717                 if (*lh == NULL)
718                         return -ENOMEM;
719         }
720         return 0;
721 }
722
723 /*
724  * Fast check for frame consistency.
725  */
726 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
727 {
728         struct iam_container *bag;
729         struct iam_entry *next;
730         struct iam_entry *last;
731         struct iam_entry *entries;
732         struct iam_entry *at;
733
734         bag     = path->ip_container;
735         at      = frame->at;
736         entries = frame->entries;
737         last    = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
738
739         if (unlikely(at > last))
740                 return -EAGAIN;
741
742         if (unlikely(dx_get_block(path, at) != frame->leaf))
743                 return -EAGAIN;
744
745         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
746                                  path->ip_ikey_target) > 0))
747                 return -EAGAIN;
748
749         next = iam_entry_shift(path, at, +1);
750         if (next <= last) {
751                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
752                                          path->ip_ikey_target) <= 0))
753                         return -EAGAIN;
754         }
755         return 0;
756 }
757
758 int dx_index_is_compat(struct iam_path *path)
759 {
760         return iam_path_descr(path) == NULL;
761 }
762
763 /*
764  * dx_find_position
765  *
766  * search position of specified hash in index
767  *
768  */
769
770 static struct iam_entry *iam_find_position(struct iam_path *path,
771                                            struct iam_frame *frame)
772 {
773         int count;
774         struct iam_entry *p;
775         struct iam_entry *q;
776         struct iam_entry *m;
777
778         count = dx_get_count(frame->entries);
779         assert_corr(count && count <= dx_get_limit(frame->entries));
780         p = iam_entry_shift(path, frame->entries,
781                             dx_index_is_compat(path) ? 1 : 2);
782         q = iam_entry_shift(path, frame->entries, count - 1);
783         while (p <= q) {
784                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
785                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
786                                 path->ip_ikey_target) > 0)
787                         q = iam_entry_shift(path, m, -1);
788                 else
789                         p = iam_entry_shift(path, m, +1);
790         }
791         return iam_entry_shift(path, p, -1);
792 }
793
794
795
796 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
797 {
798         return dx_get_block(path, iam_find_position(path, frame));
799 }
800
801 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
802                     const struct iam_ikey *key, iam_ptr_t ptr)
803 {
804         struct iam_entry *entries = frame->entries;
805         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
806         int count = dx_get_count(entries);
807
808         /*
809          * Unfortunately we cannot assert this, as this function is sometimes
810          * called by VFS under i_sem and without pdirops lock.
811          */
812         assert_corr(1 || iam_frame_is_locked(path, frame));
813         assert_corr(count < dx_get_limit(entries));
814         assert_corr(frame->at < iam_entry_shift(path, entries, count));
815         assert_inv(dx_node_check(path, frame));
816
817         memmove(iam_entry_shift(path, new, 1), new,
818                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
819         dx_set_ikey(path, new, key);
820         dx_set_block(path, new, ptr);
821         dx_set_count(entries, count + 1);
822         assert_inv(dx_node_check(path, frame));
823 }
824
825 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
826                          const struct iam_ikey *key, iam_ptr_t ptr)
827 {
828         iam_lock_bh(frame->bh);
829         iam_insert_key(path, frame, key, ptr);
830         iam_unlock_bh(frame->bh);
831 }
832 /*
833  * returns 0 if path was unchanged, -EAGAIN otherwise.
834  */
835 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
836 {
837         int equal;
838
839         iam_lock_bh(frame->bh);
840         equal = iam_check_fast(path, frame) == 0 ||
841                 frame->leaf == iam_find_ptr(path, frame);
842         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
843         iam_unlock_bh(frame->bh);
844
845         return equal ? 0 : -EAGAIN;
846 }
847
848 static int iam_lookup_try(struct iam_path *path)
849 {
850         u32 ptr;
851         int err = 0;
852         int i;
853
854         struct iam_descr *param;
855         struct iam_frame *frame;
856         struct iam_container *c;
857
858         param = iam_path_descr(path);
859         c = path->ip_container;
860
861         ptr = param->id_ops->id_root_ptr(c);
862         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
863              ++frame, ++i) {
864                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
865                                                   &frame->bh);
866                 do_corr(schedule());
867
868                 iam_lock_bh(frame->bh);
869                 /*
870                  * node must be initialized under bh lock because concurrent
871                  * creation procedure may change it and iam_lookup_try() will
872                  * see obsolete tree height. -bzzz
873                  */
874                 if (err != 0)
875                         break;
876
877                 if (LDISKFS_INVARIANT_ON) {
878                         err = param->id_ops->id_node_check(path, frame);
879                         if (err != 0)
880                                 break;
881                 }
882
883                 err = param->id_ops->id_node_load(path, frame);
884                 if (err != 0)
885                         break;
886
887                 assert_inv(dx_node_check(path, frame));
888                 /*
889                  * splitting may change root index block and move hash we're
890                  * looking for into another index block so, we have to check
891                  * this situation and repeat from begining if path got changed
892                  * -bzzz
893                  */
894                 if (i > 0) {
895                         err = iam_check_path(path, frame - 1);
896                         if (err != 0)
897                                 break;
898                 }
899
900                 frame->at = iam_find_position(path, frame);
901                 frame->curidx = ptr;
902                 frame->leaf = ptr = dx_get_block(path, frame->at);
903
904                 iam_unlock_bh(frame->bh);
905                 do_corr(schedule());
906         }
907         if (err != 0)
908                 iam_unlock_bh(frame->bh);
909         path->ip_frame = --frame;
910         return err;
911 }
912
913 static int __iam_path_lookup(struct iam_path *path)
914 {
915         int err;
916         int i;
917
918         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
919                 assert(path->ip_frames[i].bh == NULL);
920
921         do {
922                 err = iam_lookup_try(path);
923                 do_corr(schedule());
924                 if (err != 0)
925                         iam_path_fini(path);
926         } while (err == -EAGAIN);
927
928         return err;
929 }
930
931 /*
932  * returns 0 if path was unchanged, -EAGAIN otherwise.
933  */
934 static int iam_check_full_path(struct iam_path *path, int search)
935 {
936         struct iam_frame *bottom;
937         struct iam_frame *scan;
938         int i;
939         int result;
940
941         do_corr(schedule());
942
943         for (bottom = path->ip_frames, i = 0;
944              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
945                 ; /* find last filled in frame */
946         }
947
948         /*
949          * Lock frames, bottom to top.
950          */
951         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
952                 iam_lock_bh(scan->bh);
953         /*
954          * Check them top to bottom.
955          */
956         result = 0;
957         for (scan = path->ip_frames; scan < bottom; ++scan) {
958                 struct iam_entry *pos;
959
960                 if (search) {
961                         if (iam_check_fast(path, scan) == 0)
962                                 continue;
963
964                         pos = iam_find_position(path, scan);
965                         if (scan->leaf != dx_get_block(path, pos)) {
966                                 result = -EAGAIN;
967                                 break;
968                         }
969                         scan->at = pos;
970                 } else {
971                         pos = iam_entry_shift(path, scan->entries,
972                                               dx_get_count(scan->entries) - 1);
973                         if (scan->at > pos ||
974                             scan->leaf != dx_get_block(path, scan->at)) {
975                                 result = -EAGAIN;
976                                 break;
977                         }
978                 }
979         }
980
981         /*
982          * Unlock top to bottom.
983          */
984         for (scan = path->ip_frames; scan < bottom; ++scan)
985                 iam_unlock_bh(scan->bh);
986         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
987         do_corr(schedule());
988
989         return result;
990 }
991
992
993 /*
994  * Performs path lookup and returns with found leaf (if any) locked by htree
995  * lock.
996  */
997 static int iam_lookup_lock(struct iam_path *path,
998                            struct dynlock_handle **dl, enum dynlock_type lt)
999 {
1000         int result;
1001
1002         while ((result = __iam_path_lookup(path)) == 0) {
1003                 do_corr(schedule());
1004                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
1005                                      lt);
1006                 if (*dl == NULL) {
1007                         iam_path_fini(path);
1008                         result = -ENOMEM;
1009                         break;
1010                 }
1011                 do_corr(schedule());
1012                 /*
1013                  * while locking leaf we just found may get split so we need
1014                  * to check this -bzzz
1015                  */
1016                 if (iam_check_full_path(path, 1) == 0)
1017                         break;
1018                 iam_unlock_htree(path->ip_container, *dl);
1019                 *dl = NULL;
1020                 iam_path_fini(path);
1021         }
1022         return result;
1023 }
1024 /*
1025  * Performs tree top-to-bottom traversal starting from root, and loads leaf
1026  * node.
1027  */
1028 static int iam_path_lookup(struct iam_path *path, int index)
1029 {
1030         struct iam_container *c;
1031         struct iam_leaf  *leaf;
1032         int result;
1033
1034         c = path->ip_container;
1035         leaf = &path->ip_leaf;
1036         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1037         assert_inv(iam_path_check(path));
1038         do_corr(schedule());
1039         if (result == 0) {
1040                 result = iam_leaf_load(path);
1041                 if (result == 0) {
1042                         do_corr(schedule());
1043                         if (index)
1044                                 result = iam_leaf_ops(leaf)->
1045                                         ilookup(leaf, path->ip_ikey_target);
1046                         else
1047                                 result = iam_leaf_ops(leaf)->
1048                                         lookup(leaf, path->ip_key_target);
1049                         do_corr(schedule());
1050                 }
1051                 if (result < 0)
1052                         iam_leaf_unlock(leaf);
1053         }
1054         return result;
1055 }
1056
1057 /*
1058  * Common part of iam_it_{i,}get().
1059  */
1060 static int __iam_it_get(struct iam_iterator *it, int index)
1061 {
1062         int result;
1063         assert_corr(it_state(it) == IAM_IT_DETACHED);
1064
1065         result = iam_path_lookup(&it->ii_path, index);
1066         if (result >= 0) {
1067                 int collision;
1068
1069                 collision = result & IAM_LOOKUP_LAST;
1070                 switch (result & ~IAM_LOOKUP_LAST) {
1071                 case IAM_LOOKUP_EXACT:
1072                         result = +1;
1073                         it->ii_state = IAM_IT_ATTACHED;
1074                         break;
1075                 case IAM_LOOKUP_OK:
1076                         result = 0;
1077                         it->ii_state = IAM_IT_ATTACHED;
1078                         break;
1079                 case IAM_LOOKUP_BEFORE:
1080                 case IAM_LOOKUP_EMPTY:
1081                         result = 0;
1082                         it->ii_state = IAM_IT_SKEWED;
1083                         break;
1084                 default:
1085                         assert(0);
1086                 }
1087                 result |= collision;
1088         }
1089         /*
1090          * See iam_it_get_exact() for explanation.
1091          */
1092         assert_corr(result != -ENOENT);
1093         return result;
1094 }
1095
1096 /*
1097  * Correct hash, but not the same key was found, iterate through hash
1098  * collision chain, looking for correct record.
1099  */
1100 static int iam_it_collision(struct iam_iterator *it)
1101 {
1102         int result;
1103
1104         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1105
1106         while ((result = iam_it_next(it)) == 0) {
1107                 do_corr(schedule());
1108                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1109                         return -ENOENT;
1110                 if (it_keyeq(it, it->ii_path.ip_key_target))
1111                         return 0;
1112         }
1113         return result;
1114 }
1115
1116 /*
1117  * Attach iterator. After successful completion, @it points to record with
1118  * least key not larger than @k.
1119  *
1120  * Return value: 0: positioned on existing record,
1121  *             +ve: exact position found,
1122  *             -ve: error.
1123  *
1124  * precondition:  it_state(it) == IAM_IT_DETACHED
1125  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1126  *                     it_keycmp(it, k) <= 0)
1127  */
1128 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1129 {
1130         int result;
1131         assert_corr(it_state(it) == IAM_IT_DETACHED);
1132
1133         it->ii_path.ip_ikey_target = NULL;
1134         it->ii_path.ip_key_target  = k;
1135
1136         result = __iam_it_get(it, 0);
1137
1138         if (result == IAM_LOOKUP_LAST) {
1139                 result = iam_it_collision(it);
1140                 if (result != 0) {
1141                         iam_it_put(it);
1142                         iam_it_fini(it);
1143                         result = __iam_it_get(it, 0);
1144                 } else
1145                         result = +1;
1146         }
1147         if (result > 0)
1148                 result &= ~IAM_LOOKUP_LAST;
1149
1150         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1151         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1152                          it_keycmp(it, k) <= 0));
1153         return result;
1154 }
1155
1156 /*
1157  * Attach iterator by index key.
1158  */
1159 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1160 {
1161         assert_corr(it_state(it) == IAM_IT_DETACHED);
1162
1163         it->ii_path.ip_ikey_target = k;
1164         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1165 }
1166
1167 /*
1168  * Attach iterator, and assure it points to the record (not skewed).
1169  *
1170  * Return value: 0: positioned on existing record,
1171  *             +ve: exact position found,
1172  *             -ve: error.
1173  *
1174  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1175  *                !(it->ii_flags&IAM_IT_WRITE)
1176  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1177  */
1178 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1179 {
1180         int result;
1181         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1182                     !(it->ii_flags&IAM_IT_WRITE));
1183         result = iam_it_get(it, k);
1184         if (result == 0) {
1185                 if (it_state(it) != IAM_IT_ATTACHED) {
1186                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1187                         result = iam_it_next(it);
1188                 }
1189         }
1190         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1191         return result;
1192 }
1193
1194 /*
1195  * Duplicates iterator.
1196  *
1197  * postcondition: it_state(dst) == it_state(src) &&
1198  *                iam_it_container(dst) == iam_it_container(src) &&
1199  *                dst->ii_flags = src->ii_flags &&
1200  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1201  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1202  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1203  */
1204 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1205 {
1206         dst->ii_flags     = src->ii_flags;
1207         dst->ii_state     = src->ii_state;
1208         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1209         /*
1210          * XXX: duplicate lock.
1211          */
1212         assert_corr(it_state(dst) == it_state(src));
1213         assert_corr(iam_it_container(dst) == iam_it_container(src));
1214         assert_corr(dst->ii_flags = src->ii_flags);
1215         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1216                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1217                     iam_it_key_get(dst) == iam_it_key_get(src)));
1218
1219 }
1220
1221 /*
1222  * Detach iterator. Does nothing it detached state.
1223  *
1224  * postcondition: it_state(it) == IAM_IT_DETACHED
1225  */
1226 void iam_it_put(struct iam_iterator *it)
1227 {
1228         if (it->ii_state != IAM_IT_DETACHED) {
1229                 it->ii_state = IAM_IT_DETACHED;
1230                 iam_leaf_fini(&it->ii_path.ip_leaf);
1231         }
1232 }
1233
1234 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1235                                         struct iam_ikey *ikey);
1236
1237
1238 /*
1239  * This function increments the frame pointer to search the next leaf
1240  * block, and reads in the necessary intervening nodes if the search
1241  * should be necessary.  Whether or not the search is necessary is
1242  * controlled by the hash parameter.  If the hash value is even, then
1243  * the search is only continued if the next block starts with that
1244  * hash value.  This is used if we are searching for a specific file.
1245  *
1246  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1247  *
1248  * This function returns 1 if the caller should continue to search,
1249  * or 0 if it should not.  If there is an error reading one of the
1250  * index blocks, it will a negative error code.
1251  *
1252  * If start_hash is non-null, it will be filled in with the starting
1253  * hash of the next page.
1254  */
1255 static int iam_htree_advance(struct inode *dir, __u32 hash,
1256                               struct iam_path *path, __u32 *start_hash,
1257                               int compat)
1258 {
1259         struct iam_frame *p;
1260         struct buffer_head *bh;
1261         int err, num_frames = 0;
1262         __u32 bhash;
1263
1264         p = path->ip_frame;
1265         /*
1266          * Find the next leaf page by incrementing the frame pointer.
1267          * If we run out of entries in the interior node, loop around and
1268          * increment pointer in the parent node.  When we break out of
1269          * this loop, num_frames indicates the number of interior
1270          * nodes need to be read.
1271          */
1272         while (1) {
1273                 do_corr(schedule());
1274                 iam_lock_bh(p->bh);
1275                 if (p->at_shifted)
1276                         p->at_shifted = 0;
1277                 else
1278                         p->at = iam_entry_shift(path, p->at, +1);
1279                 if (p->at < iam_entry_shift(path, p->entries,
1280                                             dx_get_count(p->entries))) {
1281                         p->leaf = dx_get_block(path, p->at);
1282                         iam_unlock_bh(p->bh);
1283                         break;
1284                 }
1285                 iam_unlock_bh(p->bh);
1286                 if (p == path->ip_frames)
1287                         return 0;
1288                 num_frames++;
1289                 --p;
1290         }
1291
1292         if (compat) {
1293                 /*
1294                  * Htree hash magic.
1295                  */
1296         /*
1297          * If the hash is 1, then continue only if the next page has a
1298          * continuation hash of any value.  This is used for readdir
1299          * handling.  Otherwise, check to see if the hash matches the
1300          * desired contiuation hash.  If it doesn't, return since
1301          * there's no point to read in the successive index pages.
1302          */
1303                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1304         if (start_hash)
1305                 *start_hash = bhash;
1306         if ((hash & 1) == 0) {
1307                 if ((bhash & ~1) != hash)
1308                         return 0;
1309         }
1310         }
1311         /*
1312          * If the hash is HASH_NB_ALWAYS, we always go to the next
1313          * block so no check is necessary
1314          */
1315         while (num_frames--) {
1316                 iam_ptr_t idx;
1317
1318                 do_corr(schedule());
1319                 iam_lock_bh(p->bh);
1320                 idx = p->leaf = dx_get_block(path, p->at);
1321                 iam_unlock_bh(p->bh);
1322                 err = iam_path_descr(path)->id_ops->
1323                         id_node_read(path->ip_container, idx, NULL, &bh);
1324                 if (err != 0)
1325                         return err; /* Failure */
1326                 ++p;
1327                 brelse(p->bh);
1328                 assert_corr(p->bh != bh);
1329                 p->bh = bh;
1330                 p->entries = dx_node_get_entries(path, p);
1331                 p->at = iam_entry_shift(path, p->entries, !compat);
1332                 assert_corr(p->curidx != idx);
1333                 p->curidx = idx;
1334                 iam_lock_bh(p->bh);
1335                 assert_corr(p->leaf != dx_get_block(path, p->at));
1336                 p->leaf = dx_get_block(path, p->at);
1337                 iam_unlock_bh(p->bh);
1338                 assert_inv(dx_node_check(path, p));
1339         }
1340         return 1;
1341 }
1342
1343
1344 static inline int iam_index_advance(struct iam_path *path)
1345 {
1346         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1347 }
1348
1349 static void iam_unlock_array(struct iam_container *ic,
1350                              struct dynlock_handle **lh)
1351 {
1352         int i;
1353
1354         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1355                 if (*lh != NULL) {
1356                         iam_unlock_htree(ic, *lh);
1357                         *lh = NULL;
1358                 }
1359         }
1360 }
1361 /*
1362  * Advance index part of @path to point to the next leaf. Returns 1 on
1363  * success, 0, when end of container was reached. Leaf node is locked.
1364  */
1365 int iam_index_next(struct iam_container *c, struct iam_path *path)
1366 {
1367         iam_ptr_t cursor;
1368         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1369         int result;
1370         struct inode *object;
1371
1372         /*
1373          * Locking for iam_index_next()... is to be described.
1374          */
1375
1376         object = c->ic_object;
1377         cursor = path->ip_frame->leaf;
1378
1379         while (1) {
1380                 result = iam_index_lock(path, lh);
1381                 do_corr(schedule());
1382                 if (result < 0)
1383                         break;
1384
1385                 result = iam_check_full_path(path, 0);
1386                 if (result == 0 && cursor == path->ip_frame->leaf) {
1387                         result = iam_index_advance(path);
1388
1389                         assert_corr(result == 0 ||
1390                                     cursor != path->ip_frame->leaf);
1391                         break;
1392                 }
1393                 do {
1394                         iam_unlock_array(c, lh);
1395
1396                         iam_path_release(path);
1397                         do_corr(schedule());
1398
1399                         result = __iam_path_lookup(path);
1400                         if (result < 0)
1401                                 break;
1402
1403                         while (path->ip_frame->leaf != cursor) {
1404                                 do_corr(schedule());
1405
1406                                 result = iam_index_lock(path, lh);
1407                                 do_corr(schedule());
1408                                 if (result < 0)
1409                                         break;
1410
1411                                 result = iam_check_full_path(path, 0);
1412                                 if (result != 0)
1413                                         break;
1414
1415                                 result = iam_index_advance(path);
1416                                 if (result == 0) {
1417                                         CERROR("cannot find cursor : %u\n",
1418                                                 cursor);
1419                                         result = -EIO;
1420                                 }
1421                                 if (result < 0)
1422                                         break;
1423                                 result = iam_check_full_path(path, 0);
1424                                 if (result != 0)
1425                                         break;
1426                                 iam_unlock_array(c, lh);
1427                         }
1428                 } while (result == -EAGAIN);
1429                 if (result < 0)
1430                         break;
1431         }
1432         iam_unlock_array(c, lh);
1433         return result;
1434 }
1435
1436 /*
1437  * Move iterator one record right.
1438  *
1439  * Return value: 0: success,
1440  *              +1: end of container reached
1441  *             -ve: error
1442  *
1443  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1444  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1445  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1446  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1447  */
1448 int iam_it_next(struct iam_iterator *it)
1449 {
1450         int result;
1451         struct iam_path      *path;
1452         struct iam_leaf      *leaf;
1453         do_corr(struct iam_ikey *ik_orig);
1454
1455         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1456         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1457                     it_state(it) == IAM_IT_SKEWED);
1458
1459         path = &it->ii_path;
1460         leaf = &path->ip_leaf;
1461
1462         assert_corr(iam_leaf_is_locked(leaf));
1463
1464         result = 0;
1465         do_corr(ik_orig = it_at_rec(it) ?
1466                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1467         if (it_before(it)) {
1468                 assert_corr(!iam_leaf_at_end(leaf));
1469                 it->ii_state = IAM_IT_ATTACHED;
1470         } else {
1471                 if (!iam_leaf_at_end(leaf))
1472                         /* advance within leaf node */
1473                         iam_leaf_next(leaf);
1474                 /*
1475                  * multiple iterations may be necessary due to empty leaves.
1476                  */
1477                 while (result == 0 && iam_leaf_at_end(leaf)) {
1478                         do_corr(schedule());
1479                         /* advance index portion of the path */
1480                         result = iam_index_next(iam_it_container(it), path);
1481                         assert_corr(iam_leaf_is_locked(leaf));
1482                         if (result == 1) {
1483                                 struct dynlock_handle *lh;
1484                                 lh = iam_lock_htree(iam_it_container(it),
1485                                                     path->ip_frame->leaf,
1486                                                     DLT_WRITE);
1487                                 if (lh != NULL) {
1488                                         iam_leaf_fini(leaf);
1489                                         leaf->il_lock = lh;
1490                                         result = iam_leaf_load(path);
1491                                         if (result == 0)
1492                                                 iam_leaf_start(leaf);
1493                                 } else
1494                                         result = -ENOMEM;
1495                         } else if (result == 0)
1496                                 /* end of container reached */
1497                                 result = +1;
1498                         if (result != 0)
1499                                 iam_it_put(it);
1500                 }
1501                 if (result == 0)
1502                         it->ii_state = IAM_IT_ATTACHED;
1503         }
1504         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1505         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1506         assert_corr(ergo(result == 0 && ik_orig != NULL,
1507                          it_ikeycmp(it, ik_orig) >= 0));
1508         return result;
1509 }
1510
1511 /*
1512  * Return pointer to the record under iterator.
1513  *
1514  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1515  * postcondition: it_state(it) == IAM_IT_ATTACHED
1516  */
1517 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1518 {
1519         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1520         assert_corr(it_at_rec(it));
1521         return iam_leaf_rec(&it->ii_path.ip_leaf);
1522 }
1523
1524 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1525 {
1526         struct iam_leaf *folio;
1527
1528         folio = &it->ii_path.ip_leaf;
1529         iam_leaf_ops(folio)->rec_set(folio, r);
1530 }
1531
1532 /*
1533  * Replace contents of record under iterator.
1534  *
1535  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1536  *                it->ii_flags&IAM_IT_WRITE
1537  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1538  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1539  */
1540 int iam_it_rec_set(handle_t *h,
1541                    struct iam_iterator *it, const struct iam_rec *r)
1542 {
1543         int result;
1544         struct iam_path *path;
1545         struct buffer_head *bh;
1546
1547         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1548                     it->ii_flags&IAM_IT_WRITE);
1549         assert_corr(it_at_rec(it));
1550
1551         path = &it->ii_path;
1552         bh   = path->ip_leaf.il_bh;
1553         result = iam_txn_add(h, path, bh);
1554         if (result == 0) {
1555                 iam_it_reccpy(it, r);
1556                 result = iam_txn_dirty(h, path, bh);
1557         }
1558         return result;
1559 }
1560
1561 /*
1562  * Return pointer to the index key under iterator.
1563  *
1564  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1565  *                it_state(it) == IAM_IT_SKEWED
1566  */
1567 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1568                                         struct iam_ikey *ikey)
1569 {
1570         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1571                     it_state(it) == IAM_IT_SKEWED);
1572         assert_corr(it_at_rec(it));
1573         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1574 }
1575
1576 /*
1577  * Return pointer to the key under iterator.
1578  *
1579  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1580  *                it_state(it) == IAM_IT_SKEWED
1581  */
1582 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1583 {
1584         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1585                     it_state(it) == IAM_IT_SKEWED);
1586         assert_corr(it_at_rec(it));
1587         return iam_leaf_key(&it->ii_path.ip_leaf);
1588 }
1589
1590 /*
1591  * Return size of key under iterator (in bytes)
1592  *
1593  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1594  *                it_state(it) == IAM_IT_SKEWED
1595  */
1596 int iam_it_key_size(const struct iam_iterator *it)
1597 {
1598         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1599                     it_state(it) == IAM_IT_SKEWED);
1600         assert_corr(it_at_rec(it));
1601         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1602 }
1603
1604 static struct buffer_head *
1605 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1606 {
1607         struct inode *inode = c->ic_object;
1608         struct buffer_head *bh = NULL;
1609         struct iam_idle_head *head;
1610         struct buffer_head *idle;
1611         __u32 *idle_blocks;
1612         __u16 count;
1613
1614         if (c->ic_idle_bh == NULL)
1615                 goto newblock;
1616
1617         mutex_lock(&c->ic_idle_mutex);
1618         if (unlikely(c->ic_idle_bh == NULL)) {
1619                 mutex_unlock(&c->ic_idle_mutex);
1620                 goto newblock;
1621         }
1622
1623         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1624         count = le16_to_cpu(head->iih_count);
1625         if (count > 0) {
1626                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1627                 if (*e != 0)
1628                         goto fail;
1629
1630                 --count;
1631                 *b = le32_to_cpu(head->iih_blks[count]);
1632                 head->iih_count = cpu_to_le16(count);
1633                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1634                 if (*e != 0)
1635                         goto fail;
1636
1637                 mutex_unlock(&c->ic_idle_mutex);
1638                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1639                 if (IS_ERR_OR_NULL(bh)) {
1640                         if (IS_ERR(bh))
1641                                 *e = PTR_ERR(bh);
1642                         else
1643                                 *e = -EIO;
1644                         return NULL;
1645                 }
1646                 goto got;
1647         }
1648
1649         /* The block itself which contains the iam_idle_head is
1650          * also an idle block, and can be used as the new node. */
1651         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1652                                 c->ic_descr->id_root_gap +
1653                                 sizeof(struct dx_countlimit));
1654         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1655         if (*e != 0)
1656                 goto fail;
1657
1658         *b = le32_to_cpu(*idle_blocks);
1659         iam_lock_bh(c->ic_root_bh);
1660         *idle_blocks = head->iih_next;
1661         iam_unlock_bh(c->ic_root_bh);
1662         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1663         if (*e != 0) {
1664                 iam_lock_bh(c->ic_root_bh);
1665                 *idle_blocks = cpu_to_le32(*b);
1666                 iam_unlock_bh(c->ic_root_bh);
1667                 goto fail;
1668         }
1669
1670         bh = c->ic_idle_bh;
1671         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1672         if (idle != NULL && IS_ERR(idle)) {
1673                 *e = PTR_ERR(idle);
1674                 c->ic_idle_bh = NULL;
1675                 brelse(bh);
1676                 goto fail;
1677         }
1678
1679         c->ic_idle_bh = idle;
1680         mutex_unlock(&c->ic_idle_mutex);
1681
1682 got:
1683         /* get write access for the found buffer head */
1684         *e = ldiskfs_journal_get_write_access(h, bh);
1685         if (*e != 0) {
1686                 brelse(bh);
1687                 bh = NULL;
1688                 ldiskfs_std_error(inode->i_sb, *e);
1689         } else {
1690                 /* Clear the reused node as new node does. */
1691                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1692                 set_buffer_uptodate(bh);
1693         }
1694         return bh;
1695
1696 newblock:
1697         bh = osd_ldiskfs_append(h, inode, b);
1698         if (IS_ERR(bh)) {
1699                 *e = PTR_ERR(bh);
1700                 bh = NULL;
1701         }
1702
1703         return bh;
1704
1705 fail:
1706         mutex_unlock(&c->ic_idle_mutex);
1707         ldiskfs_std_error(inode->i_sb, *e);
1708         return NULL;
1709 }
1710
1711 /*
1712  * Insertion of new record. Interaction with jbd during non-trivial case (when
1713  * split happens) is as following:
1714  *
1715  *  - new leaf node is involved into transaction by iam_new_node();
1716  *
1717  *  - old leaf node is involved into transaction by iam_add_rec();
1718  *
1719  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1720  *
1721  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1722  *  iam_new_leaf();
1723  *
1724  *  - split index nodes are involved into transaction and marked dirty by
1725  *  split_index_node().
1726  *
1727  *  - "safe" index node, which is no split, but where new pointer is inserted
1728  *  is involved into transaction and marked dirty by split_index_node().
1729  *
1730  *  - index node where pointer to new leaf is inserted is involved into
1731  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1732  *
1733  *  - inode is marked dirty by iam_add_rec().
1734  *
1735  */
1736
1737 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1738 {
1739         int err;
1740         iam_ptr_t blknr;
1741         struct buffer_head   *new_leaf;
1742         struct buffer_head   *old_leaf;
1743         struct iam_container *c;
1744         struct inode         *obj;
1745         struct iam_path      *path;
1746
1747         c = iam_leaf_container(leaf);
1748         path = leaf->il_path;
1749
1750         obj = c->ic_object;
1751         new_leaf = iam_new_node(handle, c, &blknr, &err);
1752         do_corr(schedule());
1753         if (new_leaf != NULL) {
1754                 struct dynlock_handle *lh;
1755
1756                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1757                 do_corr(schedule());
1758                 if (lh != NULL) {
1759                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1760                         do_corr(schedule());
1761                         old_leaf = leaf->il_bh;
1762                         iam_leaf_split(leaf, &new_leaf, blknr);
1763                         if (old_leaf != leaf->il_bh) {
1764                                 /*
1765                                  * Switched to the new leaf.
1766                                  */
1767                                 iam_leaf_unlock(leaf);
1768                                 leaf->il_lock = lh;
1769                                 path->ip_frame->leaf = blknr;
1770                         } else
1771                                 iam_unlock_htree(path->ip_container, lh);
1772                         do_corr(schedule());
1773                         err = iam_txn_dirty(handle, path, new_leaf);
1774                         if (err == 0)
1775                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1776                         do_corr(schedule());
1777                 } else
1778                         err = -ENOMEM;
1779                 brelse(new_leaf);
1780         }
1781         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1782         return err;
1783 }
1784
1785 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1786 {
1787         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1788 }
1789
1790 static int iam_shift_entries(struct iam_path *path,
1791                          struct iam_frame *frame, unsigned count,
1792                          struct iam_entry *entries, struct iam_entry *entries2,
1793                          u32 newblock)
1794 {
1795         unsigned count1;
1796         unsigned count2;
1797         int delta;
1798
1799         struct iam_frame *parent = frame - 1;
1800         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1801
1802         delta = dx_index_is_compat(path) ? 0 : +1;
1803
1804         count1 = count/2 + delta;
1805         count2 = count - count1;
1806         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1807
1808         dxtrace(printk("Split index %d/%d\n", count1, count2));
1809
1810         memcpy((char *) iam_entry_shift(path, entries2, delta),
1811                (char *) iam_entry_shift(path, entries, count1),
1812                count2 * iam_entry_size(path));
1813
1814         dx_set_count(entries2, count2 + delta);
1815         dx_set_limit(entries2, dx_node_limit(path));
1816
1817         /*
1818          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1819          * level index in root index, then we insert new index here and set
1820          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1821          * index w/o hash it looks for. the solution is to check root index
1822          * after we locked just founded 2nd level index -bzzz
1823          */
1824         iam_insert_key_lock(path, parent, pivot, newblock);
1825
1826         /*
1827          * now old and new 2nd level index blocks contain all pointers, so
1828          * dx_probe() may find it in the both.  it's OK -bzzz
1829          */
1830         iam_lock_bh(frame->bh);
1831         dx_set_count(entries, count1);
1832         iam_unlock_bh(frame->bh);
1833
1834         /*
1835          * now old 2nd level index block points to first half of leafs. it's
1836          * importand that dx_probe() must check root index block for changes
1837          * under dx_lock_bh(frame->bh) -bzzz
1838          */
1839
1840         return count1;
1841 }
1842
1843
1844 int split_index_node(handle_t *handle, struct iam_path *path,
1845                      struct dynlock_handle **lh)
1846 {
1847
1848         struct iam_entry *entries;   /* old block contents */
1849         struct iam_entry *entries2;  /* new block contents */
1850         struct iam_frame *frame, *safe;
1851         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1852         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1853         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1854         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1855         struct inode *dir = iam_path_obj(path);
1856         struct iam_descr *descr;
1857         int nr_splet;
1858         int i, err;
1859
1860         descr = iam_path_descr(path);
1861         /*
1862          * Algorithm below depends on this.
1863          */
1864         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1865
1866         frame = path->ip_frame;
1867         entries = frame->entries;
1868
1869         /*
1870          * Tall-tree handling: we might have to split multiple index blocks
1871          * all the way up to tree root. Tricky point here is error handling:
1872          * to avoid complicated undo/rollback we
1873          *
1874          *   - first allocate all necessary blocks
1875          *
1876          *   - insert pointers into them atomically.
1877          */
1878
1879         /*
1880          * Locking: leaf is already locked. htree-locks are acquired on all
1881          * index nodes that require split bottom-to-top, on the "safe" node,
1882          * and on all new nodes
1883          */
1884
1885         dxtrace(printk("using %u of %u node entries\n",
1886                        dx_get_count(entries), dx_get_limit(entries)));
1887
1888         /* What levels need split? */
1889         for (nr_splet = 0; frame >= path->ip_frames &&
1890              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1891              --frame, ++nr_splet) {
1892                 do_corr(schedule());
1893                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1894                         /*
1895                         CWARN(dir->i_sb, __FUNCTION__,
1896                                      "Directory index full!\n");
1897                                      */
1898                         err = -ENOSPC;
1899                         goto cleanup;
1900                 }
1901         }
1902
1903         safe = frame;
1904
1905         /*
1906          * Lock all nodes, bottom to top.
1907          */
1908         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1909                 do_corr(schedule());
1910                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1911                                          DLT_WRITE);
1912                 if (lock[i] == NULL) {
1913                         err = -ENOMEM;
1914                         goto cleanup;
1915                 }
1916         }
1917
1918         /*
1919          * Check for concurrent index modification.
1920          */
1921         err = iam_check_full_path(path, 1);
1922         if (err)
1923                 goto cleanup;
1924         /*
1925          * And check that the same number of nodes is to be split.
1926          */
1927         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1928              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1929              --frame, ++i) {
1930                 ;
1931         }
1932         if (i != nr_splet) {
1933                 err = -EAGAIN;
1934                 goto cleanup;
1935         }
1936
1937         /* Go back down, allocating blocks, locking them, and adding into
1938          * transaction... */
1939         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1940                 bh_new[i] = iam_new_node(handle, path->ip_container,
1941                                          &newblock[i], &err);
1942                 do_corr(schedule());
1943                 if (!bh_new[i] ||
1944                     descr->id_ops->id_node_init(path->ip_container,
1945                                                 bh_new[i], 0) != 0)
1946                         goto cleanup;
1947                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1948                                              DLT_WRITE);
1949                 if (new_lock[i] == NULL) {
1950                         err = -ENOMEM;
1951                         goto cleanup;
1952                 }
1953                 do_corr(schedule());
1954                 BUFFER_TRACE(frame->bh, "get_write_access");
1955                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1956                 if (err)
1957                         goto journal_error;
1958         }
1959         /* Add "safe" node to transaction too */
1960         if (safe + 1 != path->ip_frames) {
1961                 do_corr(schedule());
1962                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1963                 if (err)
1964                         goto journal_error;
1965         }
1966
1967         /* Go through nodes once more, inserting pointers */
1968         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1969                 unsigned count;
1970                 int idx;
1971                 struct buffer_head *bh2;
1972                 struct buffer_head *bh;
1973
1974                 entries = frame->entries;
1975                 count = dx_get_count(entries);
1976                 idx = iam_entry_diff(path, frame->at, entries);
1977
1978                 bh2 = bh_new[i];
1979                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1980
1981                 bh = frame->bh;
1982                 if (frame == path->ip_frames) {
1983                         /* splitting root node. Tricky point:
1984                          *
1985                          * In the "normal" B-tree we'd split root *and* add
1986                          * new root to the tree with pointers to the old root
1987                          * and its sibling (thus introducing two new nodes).
1988                          *
1989                          * In htree it's enough to add one node, because
1990                          * capacity of the root node is smaller than that of
1991                          * non-root one.
1992                          */
1993                         struct iam_frame *frames;
1994                         struct iam_entry *next;
1995
1996                         assert_corr(i == 0);
1997
1998                         do_corr(schedule());
1999
2000                         frames = path->ip_frames;
2001                         memcpy((char *) entries2, (char *) entries,
2002                                count * iam_entry_size(path));
2003                         dx_set_limit(entries2, dx_node_limit(path));
2004
2005                         /* Set up root */
2006                           iam_lock_bh(frame->bh);
2007                         next = descr->id_ops->id_root_inc(path->ip_container,
2008                                                           path, frame);
2009                         dx_set_block(path, next, newblock[0]);
2010                           iam_unlock_bh(frame->bh);
2011
2012                         do_corr(schedule());
2013                         /* Shift frames in the path */
2014                         memmove(frames + 2, frames + 1,
2015                                 (sizeof path->ip_frames) - 2 * sizeof frames[0]);
2016                         /* Add new access path frame */
2017                         frames[1].at = iam_entry_shift(path, entries2, idx);
2018                         frames[1].entries = entries = entries2;
2019                         frames[1].bh = bh2;
2020                         assert_inv(dx_node_check(path, frame));
2021                         ++ path->ip_frame;
2022                         ++ frame;
2023                         assert_inv(dx_node_check(path, frame));
2024                         bh_new[0] = NULL; /* buffer head is "consumed" */
2025                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2026                         if (err)
2027                                 goto journal_error;
2028                         do_corr(schedule());
2029                 } else {
2030                         /* splitting non-root index node. */
2031                         struct iam_frame *parent = frame - 1;
2032
2033                         do_corr(schedule());
2034                         count = iam_shift_entries(path, frame, count,
2035                                               entries, entries2, newblock[i]);
2036                         /* Which index block gets the new entry? */
2037                         if (idx >= count) {
2038                                 int d = dx_index_is_compat(path) ? 0 : +1;
2039
2040                                 frame->at = iam_entry_shift(path, entries2,
2041                                                             idx - count + d);
2042                                 frame->entries = entries = entries2;
2043                                 frame->curidx = newblock[i];
2044                                 swap(frame->bh, bh2);
2045                                 assert_corr(lock[i + 1] != NULL);
2046                                 assert_corr(new_lock[i] != NULL);
2047                                 swap(lock[i + 1], new_lock[i]);
2048                                 bh_new[i] = bh2;
2049                                 parent->at = iam_entry_shift(path,
2050                                                              parent->at, +1);
2051                         }
2052                         assert_inv(dx_node_check(path, frame));
2053                         assert_inv(dx_node_check(path, parent));
2054                         dxtrace(dx_show_index ("node", frame->entries));
2055                         dxtrace(dx_show_index ("node",
2056                                ((struct dx_node *) bh2->b_data)->entries));
2057                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2058                         if (err)
2059                                 goto journal_error;
2060                         do_corr(schedule());
2061                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2062                                                             parent->bh);
2063                         if (err)
2064                                 goto journal_error;
2065                 }
2066                 do_corr(schedule());
2067                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2068                 if (err)
2069                         goto journal_error;
2070         }
2071                 /*
2072                  * This function was called to make insertion of new leaf
2073                  * possible. Check that it fulfilled its obligations.
2074                  */
2075                 assert_corr(dx_get_count(path->ip_frame->entries) <
2076                             dx_get_limit(path->ip_frame->entries));
2077         assert_corr(lock[nr_splet] != NULL);
2078         *lh = lock[nr_splet];
2079         lock[nr_splet] = NULL;
2080         if (nr_splet > 0) {
2081                 /*
2082                  * Log ->i_size modification.
2083                  */
2084                 err = ldiskfs_mark_inode_dirty(handle, dir);
2085                 if (err)
2086                         goto journal_error;
2087         }
2088         goto cleanup;
2089 journal_error:
2090         ldiskfs_std_error(dir->i_sb, err);
2091
2092 cleanup:
2093         iam_unlock_array(path->ip_container, lock);
2094         iam_unlock_array(path->ip_container, new_lock);
2095
2096         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2097
2098         do_corr(schedule());
2099         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2100                 if (bh_new[i] != NULL)
2101                         brelse(bh_new[i]);
2102         }
2103         return err;
2104 }
2105
2106 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2107                        struct iam_path *path,
2108                        const struct iam_key *k, const struct iam_rec *r)
2109 {
2110         int err;
2111         struct iam_leaf *leaf;
2112
2113         leaf = &path->ip_leaf;
2114         assert_inv(iam_path_check(path));
2115         err = iam_txn_add(handle, path, leaf->il_bh);
2116         if (err == 0) {
2117                 do_corr(schedule());
2118                 if (!iam_leaf_can_add(leaf, k, r)) {
2119                         struct dynlock_handle *lh = NULL;
2120
2121                         do {
2122                                 assert_corr(lh == NULL);
2123                                 do_corr(schedule());
2124                                 err = split_index_node(handle, path, &lh);
2125                                 if (err == -EAGAIN) {
2126                                         assert_corr(lh == NULL);
2127
2128                                         iam_path_fini(path);
2129                                         it->ii_state = IAM_IT_DETACHED;
2130
2131                                         do_corr(schedule());
2132                                         err = iam_it_get_exact(it, k);
2133                                         if (err == -ENOENT)
2134                                                 err = +1; /* repeat split */
2135                                         else if (err == 0)
2136                                                 err = -EEXIST;
2137                                 }
2138                         } while (err > 0);
2139                         assert_inv(iam_path_check(path));
2140                         if (err == 0) {
2141                                 assert_corr(lh != NULL);
2142                                 do_corr(schedule());
2143                                 err = iam_new_leaf(handle, leaf);
2144                                 if (err == 0)
2145                                         err = iam_txn_dirty(handle, path,
2146                                                             path->ip_frame->bh);
2147                         }
2148                         iam_unlock_htree(path->ip_container, lh);
2149                         do_corr(schedule());
2150                 }
2151                 if (err == 0) {
2152                         iam_leaf_rec_add(leaf, k, r);
2153                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2154                 }
2155         }
2156         assert_inv(iam_path_check(path));
2157         return err;
2158 }
2159
2160 /*
2161  * Insert new record with key @k and contents from @r, shifting records to the
2162  * right. On success, iterator is positioned on the newly inserted record.
2163  *
2164  * precondition: it->ii_flags&IAM_IT_WRITE &&
2165  *               (it_state(it) == IAM_IT_ATTACHED ||
2166  *                it_state(it) == IAM_IT_SKEWED) &&
2167  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2168  *                    it_keycmp(it, k) <= 0) &&
2169  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2170  * postcondition: ergo(result == 0,
2171  *                     it_state(it) == IAM_IT_ATTACHED &&
2172  *                     it_keycmp(it, k) == 0 &&
2173  *                     !memcmp(iam_it_rec_get(it), r, ...))
2174  */
2175 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2176                       const struct iam_key *k, const struct iam_rec *r)
2177 {
2178         int result;
2179         struct iam_path *path;
2180
2181         path = &it->ii_path;
2182
2183         assert_corr(it->ii_flags&IAM_IT_WRITE);
2184         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2185                     it_state(it) == IAM_IT_SKEWED);
2186         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2187                          it_keycmp(it, k) <= 0));
2188         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2189         result = iam_add_rec(h, it, path, k, r);
2190         if (result == 0)
2191                 it->ii_state = IAM_IT_ATTACHED;
2192         assert_corr(ergo(result == 0,
2193                          it_state(it) == IAM_IT_ATTACHED &&
2194                          it_keycmp(it, k) == 0));
2195         return result;
2196 }
2197
2198 static inline int iam_idle_blocks_limit(struct inode *inode)
2199 {
2200         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2201 }
2202
2203 /*
2204  * If the leaf cannnot be recycled, we will lose one block for reusing.
2205  * It is not a serious issue because it almost the same of non-recycle.
2206  */
2207 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2208                                   struct iam_leaf *l, struct buffer_head **bh)
2209 {
2210         struct iam_container *c = p->ip_container;
2211         struct inode *inode = c->ic_object;
2212         struct iam_frame *frame = p->ip_frame;
2213         struct iam_entry *entries;
2214         struct iam_entry *pos;
2215         struct dynlock_handle *lh;
2216         int count;
2217         int rc;
2218
2219         if (c->ic_idle_failed)
2220                 return 0;
2221
2222         if (unlikely(frame == NULL))
2223                 return 0;
2224
2225         if (!iam_leaf_empty(l))
2226                 return 0;
2227
2228         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2229         if (lh == NULL) {
2230                 CWARN("%.16s: No memory to recycle idle blocks\n",
2231                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name);
2232                 return 0;
2233         }
2234
2235         rc = iam_txn_add(h, p, frame->bh);
2236         if (rc != 0) {
2237                 iam_unlock_htree(c, lh);
2238                 return 0;
2239         }
2240
2241         iam_lock_bh(frame->bh);
2242         entries = frame->entries;
2243         count = dx_get_count(entries);
2244         /* NOT shrink the last entry in the index node, which can be reused
2245          * directly by next new node. */
2246         if (count == 2) {
2247                 iam_unlock_bh(frame->bh);
2248                 iam_unlock_htree(c, lh);
2249                 return 0;
2250         }
2251
2252         pos = iam_find_position(p, frame);
2253         /* There may be some new leaf nodes have been added or empty leaf nodes
2254          * have been shrinked during my delete operation.
2255          *
2256          * If the empty leaf is not under current index node because the index
2257          * node has been split, then just skip the empty leaf, which is rare. */
2258         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2259                 iam_unlock_bh(frame->bh);
2260                 iam_unlock_htree(c, lh);
2261                 return 0;
2262         }
2263
2264         frame->at = pos;
2265         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2266                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2267
2268                 memmove(frame->at, n,
2269                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2270                 frame->at_shifted = 1;
2271         }
2272         dx_set_count(entries, count - 1);
2273         iam_unlock_bh(frame->bh);
2274         rc = iam_txn_dirty(h, p, frame->bh);
2275         iam_unlock_htree(c, lh);
2276         if (rc != 0)
2277                 return 0;
2278
2279         get_bh(l->il_bh);
2280         *bh = l->il_bh;
2281         return frame->leaf;
2282 }
2283
2284 static int
2285 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2286                         __u32 *idle_blocks, iam_ptr_t blk)
2287 {
2288         struct iam_container *c = p->ip_container;
2289         struct buffer_head *old = c->ic_idle_bh;
2290         struct iam_idle_head *head;
2291         int rc;
2292
2293         head = (struct iam_idle_head *)(bh->b_data);
2294         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2295         head->iih_count = 0;
2296         head->iih_next = *idle_blocks;
2297         /* The bh already get_write_accessed. */
2298         rc = iam_txn_dirty(h, p, bh);
2299         if (rc != 0)
2300                 return rc;
2301
2302         rc = iam_txn_add(h, p, c->ic_root_bh);
2303         if (rc != 0)
2304                 return rc;
2305
2306         iam_lock_bh(c->ic_root_bh);
2307         *idle_blocks = cpu_to_le32(blk);
2308         iam_unlock_bh(c->ic_root_bh);
2309         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2310         if (rc == 0) {
2311                 /* NOT release old before new assigned. */
2312                 get_bh(bh);
2313                 c->ic_idle_bh = bh;
2314                 brelse(old);
2315         } else {
2316                 iam_lock_bh(c->ic_root_bh);
2317                 *idle_blocks = head->iih_next;
2318                 iam_unlock_bh(c->ic_root_bh);
2319         }
2320         return rc;
2321 }
2322
2323 /*
2324  * If the leaf cannnot be recycled, we will lose one block for reusing.
2325  * It is not a serious issue because it almost the same of non-recycle.
2326  */
2327 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2328                              struct buffer_head *bh, iam_ptr_t blk)
2329 {
2330         struct iam_container *c = p->ip_container;
2331         struct inode *inode = c->ic_object;
2332         struct iam_idle_head *head;
2333         __u32 *idle_blocks;
2334         int count;
2335         int rc;
2336
2337         mutex_lock(&c->ic_idle_mutex);
2338         if (unlikely(c->ic_idle_failed)) {
2339                 rc = -EFAULT;
2340                 goto unlock;
2341         }
2342
2343         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2344                                 c->ic_descr->id_root_gap +
2345                                 sizeof(struct dx_countlimit));
2346         /* It is the first idle block. */
2347         if (c->ic_idle_bh == NULL) {
2348                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2349                 goto unlock;
2350         }
2351
2352         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2353         count = le16_to_cpu(head->iih_count);
2354         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2355         if (count == iam_idle_blocks_limit(inode)) {
2356                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2357                 goto unlock;
2358         }
2359
2360         /* Just add to ic_idle_bh. */
2361         rc = iam_txn_add(h, p, c->ic_idle_bh);
2362         if (rc != 0)
2363                 goto unlock;
2364
2365         head->iih_blks[count] = cpu_to_le32(blk);
2366         head->iih_count = cpu_to_le16(count + 1);
2367         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2368
2369 unlock:
2370         mutex_unlock(&c->ic_idle_mutex);
2371         if (rc != 0)
2372                 CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
2373                       LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
2374 }
2375
2376 /*
2377  * Delete record under iterator.
2378  *
2379  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2380  *                it->ii_flags&IAM_IT_WRITE &&
2381  *                it_at_rec(it)
2382  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2383  *                it_state(it) == IAM_IT_DETACHED
2384  */
2385 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2386 {
2387         int result;
2388         struct iam_leaf *leaf;
2389         struct iam_path *path;
2390
2391         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2392                     it->ii_flags&IAM_IT_WRITE);
2393         assert_corr(it_at_rec(it));
2394
2395         path = &it->ii_path;
2396         leaf = &path->ip_leaf;
2397
2398         assert_inv(iam_path_check(path));
2399
2400         result = iam_txn_add(h, path, leaf->il_bh);
2401         /*
2402          * no compaction for now.
2403          */
2404         if (result == 0) {
2405                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2406                 result = iam_txn_dirty(h, path, leaf->il_bh);
2407                 if (result == 0 && iam_leaf_at_end(leaf)) {
2408                         struct buffer_head *bh = NULL;
2409                         iam_ptr_t blk;
2410
2411                         blk = iam_index_shrink(h, path, leaf, &bh);
2412                         if (it->ii_flags & IAM_IT_MOVE) {
2413                                 result = iam_it_next(it);
2414                                 if (result > 0)
2415                                         result = 0;
2416                         }
2417
2418                         if (bh != NULL) {
2419                                 iam_recycle_leaf(h, path, bh, blk);
2420                                 brelse(bh);
2421                         }
2422                 }
2423         }
2424         assert_inv(iam_path_check(path));
2425         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2426                     it_state(it) == IAM_IT_DETACHED);
2427         return result;
2428 }
2429
2430 /*
2431  * Convert iterator to cookie.
2432  *
2433  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2434  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2435  * postcondition: it_state(it) == IAM_IT_ATTACHED
2436  */
2437 iam_pos_t iam_it_store(const struct iam_iterator *it)
2438 {
2439         iam_pos_t result;
2440
2441         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2442         assert_corr(it_at_rec(it));
2443         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2444                     sizeof result);
2445
2446         result = 0;
2447         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2448 }
2449
2450 /*
2451  * Restore iterator from cookie.
2452  *
2453  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2454  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2455  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2456  *                                  iam_it_store(it) == pos)
2457  */
2458 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2459 {
2460         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2461                     it->ii_flags&IAM_IT_MOVE);
2462         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2463         return iam_it_iget(it, (struct iam_ikey *)&pos);
2464 }
2465
2466 /***********************************************************************/
2467 /* invariants                                                          */
2468 /***********************************************************************/
2469
2470 static inline int ptr_inside(void *base, size_t size, void *ptr)
2471 {
2472         return (base <= ptr) && (ptr < base + size);
2473 }
2474
2475 static int iam_frame_invariant(struct iam_frame *f)
2476 {
2477         return
2478                 (f->bh != NULL &&
2479                 f->bh->b_data != NULL &&
2480                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2481                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2482                 f->entries <= f->at);
2483 }
2484
2485 static int iam_leaf_invariant(struct iam_leaf *l)
2486 {
2487         return
2488                 l->il_bh != NULL &&
2489                 l->il_bh->b_data != NULL &&
2490                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2491                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2492                 l->il_entries <= l->il_at;
2493 }
2494
2495 static int iam_path_invariant(struct iam_path *p)
2496 {
2497         int i;
2498
2499         if (p->ip_container == NULL ||
2500             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2501             p->ip_frame != p->ip_frames + p->ip_indirect ||
2502             !iam_leaf_invariant(&p->ip_leaf))
2503                 return 0;
2504         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2505                 if (i <= p->ip_indirect) {
2506                         if (!iam_frame_invariant(&p->ip_frames[i]))
2507                                 return 0;
2508                 }
2509         }
2510         return 1;
2511 }
2512
2513 int iam_it_invariant(struct iam_iterator *it)
2514 {
2515         return
2516                 (it->ii_state == IAM_IT_DETACHED ||
2517                  it->ii_state == IAM_IT_ATTACHED ||
2518                  it->ii_state == IAM_IT_SKEWED) &&
2519                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2520                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2521                      it->ii_state == IAM_IT_SKEWED,
2522                      iam_path_invariant(&it->ii_path) &&
2523                      equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2524 }
2525
2526 /*
2527  * Search container @c for record with key @k. If record is found, its data
2528  * are moved into @r.
2529  *
2530  * Return values: 0: found, -ENOENT: not-found, -ve: error
2531  */
2532 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2533                struct iam_rec *r, struct iam_path_descr *pd)
2534 {
2535         struct iam_iterator it;
2536         int result;
2537
2538         iam_it_init(&it, c, 0, pd);
2539
2540         result = iam_it_get_exact(&it, k);
2541         if (result == 0)
2542                 /*
2543                  * record with required key found, copy it into user buffer
2544                  */
2545                 iam_reccpy(&it.ii_path.ip_leaf, r);
2546         iam_it_put(&it);
2547         iam_it_fini(&it);
2548         return result;
2549 }
2550
2551 /*
2552  * Insert new record @r with key @k into container @c (within context of
2553  * transaction @h).
2554  *
2555  * Return values: 0: success, -ve: error, including -EEXIST when record with
2556  * given key is already present.
2557  *
2558  * postcondition: ergo(result == 0 || result == -EEXIST,
2559  *                                  iam_lookup(c, k, r2) > 0;
2560  */
2561 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2562                const struct iam_rec *r, struct iam_path_descr *pd)
2563 {
2564         struct iam_iterator it;
2565         int result;
2566
2567         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2568
2569         result = iam_it_get_exact(&it, k);
2570         if (result == -ENOENT)
2571                 result = iam_it_rec_insert(h, &it, k, r);
2572         else if (result == 0)
2573                 result = -EEXIST;
2574         iam_it_put(&it);
2575         iam_it_fini(&it);
2576         return result;
2577 }
2578
2579 /*
2580  * Update record with the key @k in container @c (within context of
2581  * transaction @h), new record is given by @r.
2582  *
2583  * Return values: +1: skip because of the same rec value, 0: success,
2584  * -ve: error, including -ENOENT if no record with the given key found.
2585  */
2586 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2587                const struct iam_rec *r, struct iam_path_descr *pd)
2588 {
2589         struct iam_iterator it;
2590         struct iam_leaf *folio;
2591         int result;
2592
2593         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2594
2595         result = iam_it_get_exact(&it, k);
2596         if (result == 0) {
2597                 folio = &it.ii_path.ip_leaf;
2598                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2599                 if (result == 0)
2600                         iam_it_rec_set(h, &it, r);
2601                 else
2602                         result = 1;
2603         }
2604         iam_it_put(&it);
2605         iam_it_fini(&it);
2606         return result;
2607 }
2608
2609 /*
2610  * Delete existing record with key @k.
2611  *
2612  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2613  *
2614  * postcondition: ergo(result == 0 || result == -ENOENT,
2615  *                                 !iam_lookup(c, k, *));
2616  */
2617 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2618                struct iam_path_descr *pd)
2619 {
2620         struct iam_iterator it;
2621         int result;
2622
2623         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2624
2625         result = iam_it_get_exact(&it, k);
2626         if (result == 0)
2627                 iam_it_rec_delete(h, &it);
2628         iam_it_put(&it);
2629         iam_it_fini(&it);
2630         return result;
2631 }
2632
2633 int iam_root_limit(int rootgap, int blocksize, int size)
2634 {
2635         int limit;
2636         int nlimit;
2637
2638         limit = (blocksize - rootgap) / size;
2639         nlimit = blocksize / size;
2640         if (limit == nlimit)
2641                 limit--;
2642         return limit;
2643 }