Whamcloud - gitweb
LU-15154 kernel: kernel update SLES15 SP3 [5.3.18-59.27.1]
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_iam.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * iam.c
32  * Top-level entry points into iam module
33  *
34  * Author: Wang Di <wangdi@clusterfs.com>
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  */
37
38 /*
39  * iam: big theory statement.
40  *
41  * iam (Index Access Module) is a module providing abstraction of persistent
42  * transactional container on top of generalized ldiskfs htree.
43  *
44  * iam supports:
45  *
46  *     - key, pointer, and record size specifiable per container.
47  *
48  *     - trees taller than 2 index levels.
49  *
50  *     - read/write to existing ldiskfs htree directories as iam containers.
51  *
52  * iam container is a tree, consisting of leaf nodes containing keys and
53  * records stored in this container, and index nodes, containing keys and
54  * pointers to leaf or index nodes.
55  *
56  * iam does not work with keys directly, instead it calls user-supplied key
57  * comparison function (->dpo_keycmp()).
58  *
59  * Pointers are (currently) interpreted as logical offsets (measured in
60  * blocksful) within underlying flat file on top of which iam tree lives.
61  *
62  * On-disk format:
63  *
64  * iam mostly tries to reuse existing htree formats.
65  *
66  * Format of index node:
67  *
68  * +-----+-------+-------+-------+------+-------+------------+
69  * |     | count |       |       |      |       |            |
70  * | gap |   /   | entry | entry | .... | entry | free space |
71  * |     | limit |       |       |      |       |            |
72  * +-----+-------+-------+-------+------+-------+------------+
73  *
74  *       gap           this part of node is never accessed by iam code. It
75  *                     exists for binary compatibility with ldiskfs htree (that,
76  *                     in turn, stores fake struct ext2_dirent for ext2
77  *                     compatibility), and to keep some unspecified per-node
78  *                     data. Gap can be different for root and non-root index
79  *                     nodes. Gap size can be specified for each container
80  *                     (gap of 0 is allowed).
81  *
82  *       count/limit   current number of entries in this node, and the maximal
83  *                     number of entries that can fit into node. count/limit
84  *                     has the same size as entry, and is itself counted in
85  *                     count.
86  *
87  *       entry         index entry: consists of a key immediately followed by
88  *                     a pointer to a child node. Size of a key and size of a
89  *                     pointer depends on container. Entry has neither
90  *                     alignment nor padding.
91  *
92  *       free space    portion of node new entries are added to
93  *
94  * Entries in index node are sorted by their key value.
95  *
96  * Format of a leaf node is not specified. Generic iam code accesses leaf
97  * nodes through ->id_leaf methods in struct iam_descr.
98  *
99  * The IAM root block is a special node, which contains the IAM descriptor.
100  * It is on disk format:
101  *
102  * +---------+-------+--------+---------+-------+------+-------+------------+
103  * |IAM desc | count |  idle  |         |       |      |       |            |
104  * |(fix/var)|   /   | blocks | padding | entry | .... | entry | free space |
105  * |         | limit |        |         |       |      |       |            |
106  * +---------+-------+--------+---------+-------+------+-------+------------+
107  *
108  * The padding length is calculated with the parameters in the IAM descriptor.
109  *
110  * The field "idle_blocks" is used to record empty leaf nodes, which have not
111  * been released but all contained entries in them have been removed. Usually,
112  * the idle blocks in the IAM should be reused when need to allocate new leaf
113  * nodes for new entries, it depends on the IAM hash functions to map the new
114  * entries to these idle blocks. Unfortunately, it is not easy to design some
115  * hash functions for such clever mapping, especially considering the insert/
116  * lookup performance.
117  *
118  * So the IAM recycles the empty leaf nodes, and put them into a per-file based
119  * idle blocks pool. If need some new leaf node, it will try to take idle block
120  * from such pool with priority, in spite of how the IAM hash functions to map
121  * the entry.
122  *
123  * The idle blocks pool is organized as a series of tables, and each table
124  * can be described as following (on-disk format):
125  *
126  * +---------+---------+---------+---------+------+---------+-------+
127  * |  magic  |  count  |  next   |  logic  |      |  logic  | free  |
128  * |(16 bits)|(16 bits)|  table  |  blk #  | .... |  blk #  | space |
129  * |         |         |(32 bits)|(32 bits)|      |(32 bits)|       |
130  * +---------+---------+---------+---------+------+---------+-------+
131  *
132  * The logic blk# for the first table is stored in the root node "idle_blocks".
133  *
134  */
135
136 #include <linux/module.h>
137 #include <linux/fs.h>
138 #include <linux/pagemap.h>
139 #include <linux/time.h>
140 #include <linux/fcntl.h>
141 #include <linux/stat.h>
142 #include <linux/string.h>
143 #include <linux/quotaops.h>
144 #include <linux/buffer_head.h>
145
146 #include <ldiskfs/ldiskfs.h>
147 #include <ldiskfs/xattr.h>
148 #undef ENTRY
149
150 #include "osd_internal.h"
151
152 #include <ldiskfs/acl.h>
153
154 static struct buffer_head *
155 iam_load_idle_blocks(struct iam_container *c, iam_ptr_t blk)
156 {
157         struct inode *inode = c->ic_object;
158         struct iam_idle_head *head;
159         struct buffer_head *bh;
160
161         LASSERT(mutex_is_locked(&c->ic_idle_mutex));
162
163         if (blk == 0)
164                 return NULL;
165
166         bh = __ldiskfs_bread(NULL, inode, blk, 0);
167         if (IS_ERR_OR_NULL(bh)) {
168                 CERROR("%s: cannot load idle blocks, blk = %u: rc = %ld\n",
169                        osd_ino2name(inode), blk, bh ? PTR_ERR(bh) : -EIO);
170                 c->ic_idle_failed = 1;
171                 if (bh == NULL)
172                         bh = ERR_PTR(-EIO);
173                 return bh;
174         }
175
176         head = (struct iam_idle_head *)(bh->b_data);
177         if (le16_to_cpu(head->iih_magic) != IAM_IDLE_HEADER_MAGIC) {
178                 int rc = -EBADF;
179
180                 CERROR("%s: invalid idle block head, blk = %u, magic = %x: rc = %d\n",
181                        osd_ino2name(inode), blk, le16_to_cpu(head->iih_magic),
182                        rc);
183                 brelse(bh);
184                 c->ic_idle_failed = 1;
185                 return ERR_PTR(rc);
186         }
187
188         return bh;
189 }
190
191 /*
192  * Determine format of given container. This is done by scanning list of
193  * registered formats and calling ->if_guess() method of each in turn.
194  */
195 static int iam_format_guess(struct iam_container *c)
196 {
197         int result;
198
199         result = iam_lvar_guess(c);
200         if (result)
201                 result = iam_lfix_guess(c);
202
203         if (result == 0) {
204                 struct buffer_head *bh;
205                 __u32 *idle_blocks;
206
207                 LASSERT(c->ic_root_bh != NULL);
208
209                 idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
210                                         c->ic_descr->id_root_gap +
211                                         sizeof(struct dx_countlimit));
212                 mutex_lock(&c->ic_idle_mutex);
213                 bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
214                 if (bh != NULL && IS_ERR(bh))
215                         result = PTR_ERR(bh);
216                 else
217                         c->ic_idle_bh = bh;
218                 mutex_unlock(&c->ic_idle_mutex);
219         }
220
221         return result;
222 }
223
224 /*
225  * Initialize container @c.
226  */
227 int iam_container_init(struct iam_container *c,
228                        struct iam_descr *descr, struct inode *inode)
229 {
230         memset(c, 0, sizeof *c);
231         c->ic_descr = descr;
232         c->ic_object = inode;
233         init_rwsem(&c->ic_sem);
234         dynlock_init(&c->ic_tree_lock);
235         mutex_init(&c->ic_idle_mutex);
236         return 0;
237 }
238
239 /*
240  * Determine container format.
241  */
242 int iam_container_setup(struct iam_container *c)
243 {
244         return iam_format_guess(c);
245 }
246
247 /*
248  * Finalize container @c, release all resources.
249  */
250 void iam_container_fini(struct iam_container *c)
251 {
252         brelse(c->ic_idle_bh);
253         c->ic_idle_bh = NULL;
254         brelse(c->ic_root_bh);
255         c->ic_root_bh = NULL;
256 }
257
258 void iam_path_init(struct iam_path *path, struct iam_container *c,
259                    struct iam_path_descr *pd)
260 {
261         memset(path, 0, sizeof *path);
262         path->ip_container = c;
263         path->ip_frame = path->ip_frames;
264         path->ip_data = pd;
265         path->ip_leaf.il_path = path;
266 }
267
268 static void iam_leaf_fini(struct iam_leaf *leaf);
269
270 void iam_path_release(struct iam_path *path)
271 {
272         int i;
273
274         for (i = 0; i < ARRAY_SIZE(path->ip_frames); i++) {
275                 if (path->ip_frames[i].bh != NULL) {
276                         path->ip_frames[i].at_shifted = 0;
277                         brelse(path->ip_frames[i].bh);
278                         path->ip_frames[i].bh = NULL;
279                 }
280         }
281 }
282
283 void iam_path_fini(struct iam_path *path)
284 {
285         iam_leaf_fini(&path->ip_leaf);
286         iam_path_release(path);
287 }
288
289
290 void iam_path_compat_init(struct iam_path_compat *path, struct inode *inode)
291 {
292         int i;
293
294         path->ipc_hinfo = &path->ipc_hinfo_area;
295         for (i = 0; i < ARRAY_SIZE(path->ipc_scratch); ++i)
296                 path->ipc_descr.ipd_key_scratch[i] =
297                         (struct iam_ikey *)&path->ipc_scratch[i];
298
299         iam_path_init(&path->ipc_path, &path->ipc_container, &path->ipc_descr);
300 }
301
302 void iam_path_compat_fini(struct iam_path_compat *path)
303 {
304         iam_path_fini(&path->ipc_path);
305 }
306
307 /*
308  * Helper function initializing iam_path_descr and its key scratch area.
309  */
310 struct iam_path_descr *iam_ipd_alloc(void *area, int keysize)
311 {
312         struct iam_path_descr *ipd;
313         void *karea;
314         int i;
315
316         ipd = area;
317         karea = ipd + 1;
318         for (i = 0; i < ARRAY_SIZE(ipd->ipd_key_scratch); ++i, karea += keysize)
319                 ipd->ipd_key_scratch[i] = karea;
320         return ipd;
321 }
322
323 void iam_ipd_free(struct iam_path_descr *ipd)
324 {
325 }
326
327 int iam_node_read(struct iam_container *c, iam_ptr_t ptr,
328                   handle_t *h, struct buffer_head **bh)
329 {
330         /*
331          * NB: it can be called by iam_lfix_guess() which is still at
332          * very early stage, c->ic_root_bh and c->ic_descr->id_ops
333          * haven't been intialized yet.
334          * Also, we don't have this for IAM dir.
335          */
336         if (c->ic_root_bh != NULL &&
337             c->ic_descr->id_ops->id_root_ptr(c) == ptr) {
338                 get_bh(c->ic_root_bh);
339                 *bh = c->ic_root_bh;
340                 return 0;
341         }
342
343         *bh = __ldiskfs_bread(h, c->ic_object, (int)ptr, 0);
344         if (IS_ERR(*bh))
345                 return PTR_ERR(*bh);
346
347         if (*bh == NULL)
348                 return -EIO;
349
350         return 0;
351 }
352
353 /*
354  * Return pointer to current leaf record. Pointer is valid while corresponding
355  * leaf node is locked and pinned.
356  */
357 static struct iam_rec *iam_leaf_rec(const struct iam_leaf *leaf)
358 {
359         return iam_leaf_ops(leaf)->rec(leaf);
360 }
361
362 /*
363  * Return pointer to the current leaf key. This function returns pointer to
364  * the key stored in node.
365  *
366  * Caller should assume that returned pointer is only valid while leaf node is
367  * pinned and locked.
368  */
369 static struct iam_key *iam_leaf_key(const struct iam_leaf *leaf)
370 {
371         return iam_leaf_ops(leaf)->key(leaf);
372 }
373
374 static int iam_leaf_key_size(const struct iam_leaf *leaf)
375 {
376         return iam_leaf_ops(leaf)->key_size(leaf);
377 }
378
379 static struct iam_ikey *iam_leaf_ikey(const struct iam_leaf *leaf,
380                                       struct iam_ikey *key)
381 {
382         return iam_leaf_ops(leaf)->ikey(leaf, key);
383 }
384
385 static int iam_leaf_keycmp(const struct iam_leaf *leaf,
386                            const struct iam_key *key)
387 {
388         return iam_leaf_ops(leaf)->key_cmp(leaf, key);
389 }
390
391 static int iam_leaf_keyeq(const struct iam_leaf *leaf,
392                           const struct iam_key *key)
393 {
394         return iam_leaf_ops(leaf)->key_eq(leaf, key);
395 }
396
397 #if LDISKFS_INVARIANT_ON
398 static int iam_path_check(struct iam_path *p)
399 {
400         int i;
401         int result;
402         struct iam_frame *f;
403         struct iam_descr *param;
404
405         result = 1;
406         param = iam_path_descr(p);
407         for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
408                 f = &p->ip_frames[i];
409                 if (f->bh != NULL) {
410                         result = dx_node_check(p, f);
411                         if (result)
412                                 result = !param->id_ops->id_node_check(p, f);
413                 }
414         }
415         if (result && p->ip_leaf.il_bh != NULL)
416                 result = 1;
417         if (result == 0)
418                 ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
419
420         return result;
421 }
422 #endif
423
424 static int iam_leaf_load(struct iam_path *path)
425 {
426         iam_ptr_t block;
427         int err;
428         struct iam_container *c;
429         struct buffer_head *bh;
430         struct iam_leaf *leaf;
431         struct iam_descr *descr;
432
433         c     = path->ip_container;
434         leaf  = &path->ip_leaf;
435         descr = iam_path_descr(path);
436         block = path->ip_frame->leaf;
437         if (block == 0) {
438                 /* XXX bug 11027 */
439                 printk(KERN_EMERG "wrong leaf: %lu %d [%p %p %p]\n",
440                        (long unsigned)path->ip_frame->leaf,
441                        dx_get_count(dx_node_get_entries(path, path->ip_frame)),
442                        path->ip_frames[0].bh, path->ip_frames[1].bh,
443                        path->ip_frames[2].bh);
444         }
445         err = descr->id_ops->id_node_read(c, block, NULL, &bh);
446         if (err == 0) {
447                 leaf->il_bh = bh;
448                 leaf->il_curidx = block;
449                 err = iam_leaf_ops(leaf)->init(leaf);
450         }
451         return err;
452 }
453
454 static void iam_unlock_htree(struct iam_container *ic,
455                              struct dynlock_handle *lh)
456 {
457         if (lh != NULL)
458                 dynlock_unlock(&ic->ic_tree_lock, lh);
459 }
460
461
462 static void iam_leaf_unlock(struct iam_leaf *leaf)
463 {
464         if (leaf->il_lock != NULL) {
465                 iam_unlock_htree(iam_leaf_container(leaf),
466                                  leaf->il_lock);
467                 do_corr(schedule());
468                 leaf->il_lock = NULL;
469         }
470 }
471
472 static void iam_leaf_fini(struct iam_leaf *leaf)
473 {
474         if (leaf->il_path != NULL) {
475                 iam_leaf_unlock(leaf);
476                 iam_leaf_ops(leaf)->fini(leaf);
477                 if (leaf->il_bh) {
478                         brelse(leaf->il_bh);
479                         leaf->il_bh = NULL;
480                         leaf->il_curidx = 0;
481                 }
482         }
483 }
484
485 static void iam_leaf_start(struct iam_leaf *folio)
486 {
487         iam_leaf_ops(folio)->start(folio);
488 }
489
490 void iam_leaf_next(struct iam_leaf *folio)
491 {
492         iam_leaf_ops(folio)->next(folio);
493 }
494
495 static void iam_leaf_rec_add(struct iam_leaf *leaf, const struct iam_key *key,
496                              const struct iam_rec *rec)
497 {
498         iam_leaf_ops(leaf)->rec_add(leaf, key, rec);
499 }
500
501 static void iam_rec_del(struct iam_leaf *leaf, int shift)
502 {
503         iam_leaf_ops(leaf)->rec_del(leaf, shift);
504 }
505
506 int iam_leaf_at_end(const struct iam_leaf *leaf)
507 {
508         return iam_leaf_ops(leaf)->at_end(leaf);
509 }
510
511 static void iam_leaf_split(struct iam_leaf *l, struct buffer_head **bh,
512                            iam_ptr_t nr)
513 {
514         iam_leaf_ops(l)->split(l, bh, nr);
515 }
516
517 static inline int iam_leaf_empty(struct iam_leaf *l)
518 {
519         return iam_leaf_ops(l)->leaf_empty(l);
520 }
521
522 int iam_leaf_can_add(const struct iam_leaf *l,
523                      const struct iam_key *k, const struct iam_rec *r)
524 {
525         return iam_leaf_ops(l)->can_add(l, k, r);
526 }
527
528 static int iam_txn_dirty(handle_t *handle,
529                          struct iam_path *path, struct buffer_head *bh)
530 {
531         int result;
532
533         result = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
534         if (result != 0)
535                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
536         return result;
537 }
538
539 static int iam_txn_add(handle_t *handle,
540                        struct iam_path *path, struct buffer_head *bh)
541 {
542         int result;
543
544         result = ldiskfs_journal_get_write_access(handle, bh);
545         if (result != 0)
546                 ldiskfs_std_error(iam_path_obj(path)->i_sb, result);
547         return result;
548 }
549
550 /***********************************************************************/
551 /* iterator interface                                                  */
552 /***********************************************************************/
553
554 static enum iam_it_state it_state(const struct iam_iterator *it)
555 {
556         return it->ii_state;
557 }
558
559 /*
560  * Helper function returning scratch key.
561  */
562 static struct iam_container *iam_it_container(const struct iam_iterator *it)
563 {
564         return it->ii_path.ip_container;
565 }
566
567 static inline int it_keycmp(const struct iam_iterator *it,
568                             const struct iam_key *k)
569 {
570         return iam_leaf_keycmp(&it->ii_path.ip_leaf, k);
571 }
572
573 static inline int it_keyeq(const struct iam_iterator *it,
574                            const struct iam_key *k)
575 {
576         return iam_leaf_keyeq(&it->ii_path.ip_leaf, k);
577 }
578
579 static int it_ikeycmp(const struct iam_iterator *it, const struct iam_ikey *ik)
580 {
581         return iam_ikeycmp(it->ii_path.ip_container,
582                            iam_leaf_ikey(&it->ii_path.ip_leaf,
583                                         iam_path_ikey(&it->ii_path, 0)), ik);
584 }
585
586 static inline int it_at_rec(const struct iam_iterator *it)
587 {
588         return !iam_leaf_at_end(&it->ii_path.ip_leaf);
589 }
590
591 static inline int it_before(const struct iam_iterator *it)
592 {
593         return it_state(it) == IAM_IT_SKEWED && it_at_rec(it);
594 }
595
596 /*
597  * Helper wrapper around iam_it_get(): returns 0 (success) only when record
598  * with exactly the same key as asked is found.
599  */
600 static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
601 {
602         int result;
603
604         result = iam_it_get(it, k);
605         if (result > 0)
606                 result = 0;
607         else if (result == 0)
608                 /*
609                  * Return -ENOENT if cursor is located above record with a key
610                  * different from one specified, or in the empty leaf.
611                  *
612                  * XXX returning -ENOENT only works if iam_it_get() never
613                  * returns -ENOENT as a legitimate error.
614                  */
615                 result = -ENOENT;
616         return result;
617 }
618
619 void iam_container_write_lock(struct iam_container *ic)
620 {
621         down_write(&ic->ic_sem);
622 }
623
624 void iam_container_write_unlock(struct iam_container *ic)
625 {
626         up_write(&ic->ic_sem);
627 }
628
629 void iam_container_read_lock(struct iam_container *ic)
630 {
631         down_read(&ic->ic_sem);
632 }
633
634 void iam_container_read_unlock(struct iam_container *ic)
635 {
636         up_read(&ic->ic_sem);
637 }
638
639 /*
640  * Initialize iterator to IAM_IT_DETACHED state.
641  *
642  * postcondition: it_state(it) == IAM_IT_DETACHED
643  */
644 int  iam_it_init(struct iam_iterator *it, struct iam_container *c, __u32 flags,
645                  struct iam_path_descr *pd)
646 {
647         memset(it, 0, sizeof *it);
648         it->ii_flags  = flags;
649         it->ii_state  = IAM_IT_DETACHED;
650         iam_path_init(&it->ii_path, c, pd);
651         return 0;
652 }
653
654 /*
655  * Finalize iterator and release all resources.
656  *
657  * precondition: it_state(it) == IAM_IT_DETACHED
658  */
659 void iam_it_fini(struct iam_iterator *it)
660 {
661         assert_corr(it_state(it) == IAM_IT_DETACHED);
662         iam_path_fini(&it->ii_path);
663 }
664
665 /*
666  * this locking primitives are used to protect parts
667  * of dir's htree. protection unit is block: leaf or index
668  */
669 static struct dynlock_handle *iam_lock_htree(struct iam_container *ic,
670                                              unsigned long value,
671                                              enum dynlock_type lt)
672 {
673         return dynlock_lock(&ic->ic_tree_lock, value, lt, GFP_NOFS);
674 }
675
676 static int iam_index_lock(struct iam_path *path, struct dynlock_handle **lh)
677 {
678         struct iam_frame *f;
679
680         for (f = path->ip_frame; f >= path->ip_frames; --f, ++lh) {
681                 do_corr(schedule());
682                 *lh = iam_lock_htree(path->ip_container, f->curidx, DLT_READ);
683                 if (*lh == NULL)
684                         return -ENOMEM;
685         }
686         return 0;
687 }
688
689 /*
690  * Fast check for frame consistency.
691  */
692 static int iam_check_fast(struct iam_path *path, struct iam_frame *frame)
693 {
694         struct iam_container *bag;
695         struct iam_entry *next;
696         struct iam_entry *last;
697         struct iam_entry *entries;
698         struct iam_entry *at;
699
700         bag = path->ip_container;
701         at = frame->at;
702         entries = frame->entries;
703         last = iam_entry_shift(path, entries, dx_get_count(entries) - 1);
704
705         if (unlikely(at > last))
706                 return -EAGAIN;
707
708         if (unlikely(dx_get_block(path, at) != frame->leaf))
709                 return -EAGAIN;
710
711         if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, at),
712                      path->ip_ikey_target) > 0))
713                 return -EAGAIN;
714
715         next = iam_entry_shift(path, at, +1);
716         if (next <= last) {
717                 if (unlikely(iam_ikeycmp(bag, iam_ikey_at(path, next),
718                                          path->ip_ikey_target) <= 0))
719                         return -EAGAIN;
720         }
721         return 0;
722 }
723
724 int dx_index_is_compat(struct iam_path *path)
725 {
726         return iam_path_descr(path) == NULL;
727 }
728
729 /*
730  * dx_find_position
731  *
732  * search position of specified hash in index
733  *
734  */
735
736 static struct iam_entry *iam_find_position(struct iam_path *path,
737                                            struct iam_frame *frame)
738 {
739         int count;
740         struct iam_entry *p;
741         struct iam_entry *q;
742         struct iam_entry *m;
743
744         count = dx_get_count(frame->entries);
745         assert_corr(count && count <= dx_get_limit(frame->entries));
746         p = iam_entry_shift(path, frame->entries,
747                             dx_index_is_compat(path) ? 1 : 2);
748         q = iam_entry_shift(path, frame->entries, count - 1);
749         while (p <= q) {
750                 m = iam_entry_shift(path, p, iam_entry_diff(path, q, p) / 2);
751                 if (iam_ikeycmp(path->ip_container, iam_ikey_at(path, m),
752                                 path->ip_ikey_target) > 0)
753                         q = iam_entry_shift(path, m, -1);
754                 else
755                         p = iam_entry_shift(path, m, +1);
756         }
757         return iam_entry_shift(path, p, -1);
758 }
759
760
761
762 static iam_ptr_t iam_find_ptr(struct iam_path *path, struct iam_frame *frame)
763 {
764         return dx_get_block(path, iam_find_position(path, frame));
765 }
766
767 void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
768                     const struct iam_ikey *key, iam_ptr_t ptr)
769 {
770         struct iam_entry *entries = frame->entries;
771         struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
772         int count = dx_get_count(entries);
773
774         /*
775          * Unfortunately we cannot assert this, as this function is sometimes
776          * called by VFS under i_sem and without pdirops lock.
777          */
778         assert_corr(1 || iam_frame_is_locked(path, frame));
779         assert_corr(count < dx_get_limit(entries));
780         assert_corr(frame->at < iam_entry_shift(path, entries, count));
781         assert_inv(dx_node_check(path, frame));
782         /* Prevent memory corruption outside of buffer_head */
783         BUG_ON(count >= dx_get_limit(entries));
784         BUG_ON((char *)iam_entry_shift(path, entries, count + 1) >
785                (frame->bh->b_data + frame->bh->b_size));
786
787         memmove(iam_entry_shift(path, new, 1), new,
788                 (char *)iam_entry_shift(path, entries, count) - (char *)new);
789         dx_set_ikey(path, new, key);
790         dx_set_block(path, new, ptr);
791         dx_set_count(entries, count + 1);
792
793         BUG_ON(count > dx_get_limit(entries));
794         assert_inv(dx_node_check(path, frame));
795 }
796
797 void iam_insert_key_lock(struct iam_path *path, struct iam_frame *frame,
798                          const struct iam_ikey *key, iam_ptr_t ptr)
799 {
800         iam_lock_bh(frame->bh);
801         iam_insert_key(path, frame, key, ptr);
802         iam_unlock_bh(frame->bh);
803 }
804 /*
805  * returns 0 if path was unchanged, -EAGAIN otherwise.
806  */
807 static int iam_check_path(struct iam_path *path, struct iam_frame *frame)
808 {
809         int equal;
810
811         iam_lock_bh(frame->bh);
812         equal = iam_check_fast(path, frame) == 0 ||
813                 frame->leaf == iam_find_ptr(path, frame);
814         DX_DEVAL(iam_lock_stats.dls_bh_again += !equal);
815         iam_unlock_bh(frame->bh);
816
817         return equal ? 0 : -EAGAIN;
818 }
819
820 static int iam_lookup_try(struct iam_path *path)
821 {
822         u32 ptr;
823         int err = 0;
824         int i;
825
826         struct iam_descr *param;
827         struct iam_frame *frame;
828         struct iam_container *c;
829
830         param = iam_path_descr(path);
831         c = path->ip_container;
832
833         ptr = param->id_ops->id_root_ptr(c);
834         for (frame = path->ip_frames, i = 0; i <= path->ip_indirect;
835              ++frame, ++i) {
836                 err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
837                                                   &frame->bh);
838                 do_corr(schedule());
839
840                 iam_lock_bh(frame->bh);
841                 /*
842                  * node must be initialized under bh lock because concurrent
843                  * creation procedure may change it and iam_lookup_try() will
844                  * see obsolete tree height. -bzzz
845                  */
846                 if (err != 0)
847                         break;
848
849                 if (LDISKFS_INVARIANT_ON) {
850                         err = param->id_ops->id_node_check(path, frame);
851                         if (err != 0)
852                                 break;
853                 }
854
855                 err = param->id_ops->id_node_load(path, frame);
856                 if (err != 0)
857                         break;
858
859                 assert_inv(dx_node_check(path, frame));
860                 /*
861                  * splitting may change root index block and move hash we're
862                  * looking for into another index block so, we have to check
863                  * this situation and repeat from begining if path got changed
864                  * -bzzz
865                  */
866                 if (i > 0) {
867                         err = iam_check_path(path, frame - 1);
868                         if (err != 0)
869                                 break;
870                 }
871
872                 frame->at = iam_find_position(path, frame);
873                 frame->curidx = ptr;
874                 frame->leaf = ptr = dx_get_block(path, frame->at);
875
876                 iam_unlock_bh(frame->bh);
877                 do_corr(schedule());
878         }
879         if (err != 0)
880                 iam_unlock_bh(frame->bh);
881         path->ip_frame = --frame;
882         return err;
883 }
884
885 static int __iam_path_lookup(struct iam_path *path)
886 {
887         int err;
888         int i;
889
890         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++ i)
891                 assert(path->ip_frames[i].bh == NULL);
892
893         do {
894                 err = iam_lookup_try(path);
895                 do_corr(schedule());
896                 if (err != 0)
897                         iam_path_fini(path);
898         } while (err == -EAGAIN);
899
900         return err;
901 }
902
903 /*
904  * returns 0 if path was unchanged, -EAGAIN otherwise.
905  */
906 static int iam_check_full_path(struct iam_path *path, int search)
907 {
908         struct iam_frame *bottom;
909         struct iam_frame *scan;
910         int i;
911         int result;
912
913         do_corr(schedule());
914
915         for (bottom = path->ip_frames, i = 0;
916              i < DX_MAX_TREE_HEIGHT && bottom->bh != NULL; ++bottom, ++i) {
917                 ; /* find last filled in frame */
918         }
919
920         /*
921          * Lock frames, bottom to top.
922          */
923         for (scan = bottom - 1; scan >= path->ip_frames; --scan)
924                 iam_lock_bh(scan->bh);
925         /*
926          * Check them top to bottom.
927          */
928         result = 0;
929         for (scan = path->ip_frames; scan < bottom; ++scan) {
930                 struct iam_entry *pos;
931
932                 if (search) {
933                         if (iam_check_fast(path, scan) == 0)
934                                 continue;
935
936                         pos = iam_find_position(path, scan);
937                         if (scan->leaf != dx_get_block(path, pos)) {
938                                 result = -EAGAIN;
939                                 break;
940                         }
941                         scan->at = pos;
942                 } else {
943                         pos = iam_entry_shift(path, scan->entries,
944                                               dx_get_count(scan->entries) - 1);
945                         if (scan->at > pos ||
946                             scan->leaf != dx_get_block(path, scan->at)) {
947                                 result = -EAGAIN;
948                                 break;
949                         }
950                 }
951         }
952
953         /*
954          * Unlock top to bottom.
955          */
956         for (scan = path->ip_frames; scan < bottom; ++scan)
957                 iam_unlock_bh(scan->bh);
958         DX_DEVAL(iam_lock_stats.dls_bh_full_again += !!result);
959         do_corr(schedule());
960
961         return result;
962 }
963
964
965 /*
966  * Performs path lookup and returns with found leaf (if any) locked by htree
967  * lock.
968  */
969 static int iam_lookup_lock(struct iam_path *path,
970                            struct dynlock_handle **dl, enum dynlock_type lt)
971 {
972         int result;
973
974         while ((result = __iam_path_lookup(path)) == 0) {
975                 do_corr(schedule());
976                 *dl = iam_lock_htree(path->ip_container, path->ip_frame->leaf,
977                                      lt);
978                 if (*dl == NULL) {
979                         iam_path_fini(path);
980                         result = -ENOMEM;
981                         break;
982                 }
983                 do_corr(schedule());
984                 /*
985                  * while locking leaf we just found may get split so we need
986                  * to check this -bzzz
987                  */
988                 if (iam_check_full_path(path, 1) == 0)
989                         break;
990                 iam_unlock_htree(path->ip_container, *dl);
991                 *dl = NULL;
992                 iam_path_fini(path);
993         }
994         return result;
995 }
996 /*
997  * Performs tree top-to-bottom traversal starting from root, and loads leaf
998  * node.
999  */
1000 static int iam_path_lookup(struct iam_path *path, int index)
1001 {
1002         struct iam_leaf  *leaf;
1003         int result;
1004
1005         leaf = &path->ip_leaf;
1006         result = iam_lookup_lock(path, &leaf->il_lock, DLT_WRITE);
1007         assert_inv(iam_path_check(path));
1008         do_corr(schedule());
1009         if (result == 0) {
1010                 result = iam_leaf_load(path);
1011                 if (result == 0) {
1012                         do_corr(schedule());
1013                         if (index)
1014                                 result = iam_leaf_ops(leaf)->
1015                                         ilookup(leaf, path->ip_ikey_target);
1016                         else
1017                                 result = iam_leaf_ops(leaf)->
1018                                         lookup(leaf, path->ip_key_target);
1019                         do_corr(schedule());
1020                 }
1021                 if (result < 0)
1022                         iam_leaf_unlock(leaf);
1023         }
1024         return result;
1025 }
1026
1027 /*
1028  * Common part of iam_it_{i,}get().
1029  */
1030 static int __iam_it_get(struct iam_iterator *it, int index)
1031 {
1032         int result;
1033
1034         assert_corr(it_state(it) == IAM_IT_DETACHED);
1035
1036         result = iam_path_lookup(&it->ii_path, index);
1037         if (result >= 0) {
1038                 int collision;
1039
1040                 collision = result & IAM_LOOKUP_LAST;
1041                 switch (result & ~IAM_LOOKUP_LAST) {
1042                 case IAM_LOOKUP_EXACT:
1043                         result = +1;
1044                         it->ii_state = IAM_IT_ATTACHED;
1045                         break;
1046                 case IAM_LOOKUP_OK:
1047                         result = 0;
1048                         it->ii_state = IAM_IT_ATTACHED;
1049                         break;
1050                 case IAM_LOOKUP_BEFORE:
1051                 case IAM_LOOKUP_EMPTY:
1052                         result = 0;
1053                         it->ii_state = IAM_IT_SKEWED;
1054                         break;
1055                 default:
1056                         assert(0);
1057                 }
1058                 result |= collision;
1059         }
1060         /*
1061          * See iam_it_get_exact() for explanation.
1062          */
1063         assert_corr(result != -ENOENT);
1064         return result;
1065 }
1066
1067 /*
1068  * Correct hash, but not the same key was found, iterate through hash
1069  * collision chain, looking for correct record.
1070  */
1071 static int iam_it_collision(struct iam_iterator *it)
1072 {
1073         int result;
1074
1075         assert(ergo(it_at_rec(it), !it_keyeq(it, it->ii_path.ip_key_target)));
1076
1077         while ((result = iam_it_next(it)) == 0) {
1078                 do_corr(schedule());
1079                 if (it_ikeycmp(it, it->ii_path.ip_ikey_target) != 0)
1080                         return -ENOENT;
1081                 if (it_keyeq(it, it->ii_path.ip_key_target))
1082                         return 0;
1083         }
1084         return result;
1085 }
1086
1087 /*
1088  * Attach iterator. After successful completion, @it points to record with
1089  * least key not larger than @k.
1090  *
1091  * Return value: 0: positioned on existing record,
1092  *             +ve: exact position found,
1093  *             -ve: error.
1094  *
1095  * precondition:  it_state(it) == IAM_IT_DETACHED
1096  * postcondition: ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1097  *                     it_keycmp(it, k) <= 0)
1098  */
1099 int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
1100 {
1101         int result;
1102
1103         assert_corr(it_state(it) == IAM_IT_DETACHED);
1104
1105         it->ii_path.ip_ikey_target = NULL;
1106         it->ii_path.ip_key_target  = k;
1107
1108         result = __iam_it_get(it, 0);
1109
1110         if (result == IAM_LOOKUP_LAST) {
1111                 result = iam_it_collision(it);
1112                 if (result != 0) {
1113                         iam_it_put(it);
1114                         iam_it_fini(it);
1115                         result = __iam_it_get(it, 0);
1116                 } else
1117                         result = +1;
1118         }
1119         if (result > 0)
1120                 result &= ~IAM_LOOKUP_LAST;
1121
1122         assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
1123         assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
1124                     it_keycmp(it, k) <= 0));
1125         return result;
1126 }
1127
1128 /*
1129  * Attach iterator by index key.
1130  */
1131 static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
1132 {
1133         assert_corr(it_state(it) == IAM_IT_DETACHED);
1134
1135         it->ii_path.ip_ikey_target = k;
1136         return __iam_it_get(it, 1) & ~IAM_LOOKUP_LAST;
1137 }
1138
1139 /*
1140  * Attach iterator, and assure it points to the record (not skewed).
1141  *
1142  * Return value: 0: positioned on existing record,
1143  *             +ve: exact position found,
1144  *             -ve: error.
1145  *
1146  * precondition:  it_state(it) == IAM_IT_DETACHED &&
1147  *                !(it->ii_flags&IAM_IT_WRITE)
1148  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED)
1149  */
1150 int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
1151 {
1152         int result;
1153
1154         assert_corr(it_state(it) == IAM_IT_DETACHED &&
1155                     !(it->ii_flags&IAM_IT_WRITE));
1156         result = iam_it_get(it, k);
1157         if (result == 0) {
1158                 if (it_state(it) != IAM_IT_ATTACHED) {
1159                         assert_corr(it_state(it) == IAM_IT_SKEWED);
1160                         result = iam_it_next(it);
1161                 }
1162         }
1163         assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
1164         return result;
1165 }
1166
1167 /*
1168  * Duplicates iterator.
1169  *
1170  * postcondition: it_state(dst) == it_state(src) &&
1171  *                iam_it_container(dst) == iam_it_container(src) &&
1172  *                dst->ii_flags = src->ii_flags &&
1173  *                ergo(it_state(src) == IAM_IT_ATTACHED,
1174  *                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1175  *                     iam_it_key_get(dst) == iam_it_key_get(src))
1176  */
1177 void iam_it_dup(struct iam_iterator *dst, const struct iam_iterator *src)
1178 {
1179         dst->ii_flags = src->ii_flags;
1180         dst->ii_state = src->ii_state;
1181         /* XXX not yet. iam_path_dup(&dst->ii_path, &src->ii_path); */
1182         /*
1183          * XXX: duplicate lock.
1184          */
1185         assert_corr(it_state(dst) == it_state(src));
1186         assert_corr(iam_it_container(dst) == iam_it_container(src));
1187         assert_corr(dst->ii_flags = src->ii_flags);
1188         assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
1189                     iam_it_rec_get(dst) == iam_it_rec_get(src) &&
1190                     iam_it_key_get(dst) == iam_it_key_get(src)));
1191 }
1192
1193 /*
1194  * Detach iterator. Does nothing it detached state.
1195  *
1196  * postcondition: it_state(it) == IAM_IT_DETACHED
1197  */
1198 void iam_it_put(struct iam_iterator *it)
1199 {
1200         if (it->ii_state != IAM_IT_DETACHED) {
1201                 it->ii_state = IAM_IT_DETACHED;
1202                 iam_leaf_fini(&it->ii_path.ip_leaf);
1203         }
1204 }
1205
1206 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1207                                         struct iam_ikey *ikey);
1208
1209
1210 /*
1211  * This function increments the frame pointer to search the next leaf
1212  * block, and reads in the necessary intervening nodes if the search
1213  * should be necessary.  Whether or not the search is necessary is
1214  * controlled by the hash parameter.  If the hash value is even, then
1215  * the search is only continued if the next block starts with that
1216  * hash value.  This is used if we are searching for a specific file.
1217  *
1218  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
1219  *
1220  * This function returns 1 if the caller should continue to search,
1221  * or 0 if it should not.  If there is an error reading one of the
1222  * index blocks, it will a negative error code.
1223  *
1224  * If start_hash is non-null, it will be filled in with the starting
1225  * hash of the next page.
1226  */
1227 static int iam_htree_advance(struct inode *dir, __u32 hash,
1228                               struct iam_path *path, __u32 *start_hash,
1229                               int compat)
1230 {
1231         struct iam_frame *p;
1232         struct buffer_head *bh;
1233         int err, num_frames = 0;
1234         __u32 bhash;
1235
1236         p = path->ip_frame;
1237         /*
1238          * Find the next leaf page by incrementing the frame pointer.
1239          * If we run out of entries in the interior node, loop around and
1240          * increment pointer in the parent node.  When we break out of
1241          * this loop, num_frames indicates the number of interior
1242          * nodes need to be read.
1243          */
1244         while (1) {
1245                 do_corr(schedule());
1246                 iam_lock_bh(p->bh);
1247                 if (p->at_shifted)
1248                         p->at_shifted = 0;
1249                 else
1250                         p->at = iam_entry_shift(path, p->at, +1);
1251                 if (p->at < iam_entry_shift(path, p->entries,
1252                                             dx_get_count(p->entries))) {
1253                         p->leaf = dx_get_block(path, p->at);
1254                         iam_unlock_bh(p->bh);
1255                         break;
1256                 }
1257                 iam_unlock_bh(p->bh);
1258                 if (p == path->ip_frames)
1259                         return 0;
1260                 num_frames++;
1261                 --p;
1262         }
1263
1264         if (compat) {
1265                 /*
1266                  * Htree hash magic.
1267                  */
1268
1269                 /*
1270                  * If the hash is 1, then continue only if the next page has a
1271                  * continuation hash of any value.  This is used for readdir
1272                  * handling.  Otherwise, check to see if the hash matches the
1273                  * desired contiuation hash.  If it doesn't, return since
1274                  * there's no point to read in the successive index pages.
1275                  */
1276                 dx_get_ikey(path, p->at, (struct iam_ikey *)&bhash);
1277                 if (start_hash)
1278                         *start_hash = bhash;
1279                 if ((hash & 1) == 0) {
1280                         if ((bhash & ~1) != hash)
1281                                 return 0;
1282                 }
1283         }
1284         /*
1285          * If the hash is HASH_NB_ALWAYS, we always go to the next
1286          * block so no check is necessary
1287          */
1288         while (num_frames--) {
1289                 iam_ptr_t idx;
1290
1291                 do_corr(schedule());
1292                 iam_lock_bh(p->bh);
1293                 idx = p->leaf = dx_get_block(path, p->at);
1294                 iam_unlock_bh(p->bh);
1295                 err = iam_path_descr(path)->id_ops->
1296                         id_node_read(path->ip_container, idx, NULL, &bh);
1297                 if (err != 0)
1298                         return err; /* Failure */
1299                 ++p;
1300                 brelse(p->bh);
1301                 assert_corr(p->bh != bh);
1302                 p->bh = bh;
1303                 p->entries = dx_node_get_entries(path, p);
1304                 p->at = iam_entry_shift(path, p->entries, !compat);
1305                 assert_corr(p->curidx != idx);
1306                 p->curidx = idx;
1307                 iam_lock_bh(p->bh);
1308                 assert_corr(p->leaf != dx_get_block(path, p->at));
1309                 p->leaf = dx_get_block(path, p->at);
1310                 iam_unlock_bh(p->bh);
1311                 assert_inv(dx_node_check(path, p));
1312         }
1313         return 1;
1314 }
1315
1316 static inline int iam_index_advance(struct iam_path *path)
1317 {
1318         return iam_htree_advance(iam_path_obj(path), 0, path, NULL, 0);
1319 }
1320
1321 static void iam_unlock_array(struct iam_container *ic,
1322                              struct dynlock_handle **lh)
1323 {
1324         int i;
1325
1326         for (i = 0; i < DX_MAX_TREE_HEIGHT; ++i, ++lh) {
1327                 if (*lh != NULL) {
1328                         iam_unlock_htree(ic, *lh);
1329                         *lh = NULL;
1330                 }
1331         }
1332 }
1333 /*
1334  * Advance index part of @path to point to the next leaf. Returns 1 on
1335  * success, 0, when end of container was reached. Leaf node is locked.
1336  */
1337 int iam_index_next(struct iam_container *c, struct iam_path *path)
1338 {
1339         iam_ptr_t cursor;
1340         struct dynlock_handle *lh[DX_MAX_TREE_HEIGHT] = { NULL, };
1341         int result;
1342
1343         /*
1344          * Locking for iam_index_next()... is to be described.
1345          */
1346
1347         cursor = path->ip_frame->leaf;
1348
1349         while (1) {
1350                 result = iam_index_lock(path, lh);
1351                 do_corr(schedule());
1352                 if (result < 0)
1353                         break;
1354
1355                 result = iam_check_full_path(path, 0);
1356                 if (result == 0 && cursor == path->ip_frame->leaf) {
1357                         result = iam_index_advance(path);
1358
1359                         assert_corr(result == 0 ||
1360                                     cursor != path->ip_frame->leaf);
1361                         break;
1362                 }
1363                 do {
1364                         iam_unlock_array(c, lh);
1365
1366                         iam_path_release(path);
1367                         do_corr(schedule());
1368
1369                         result = __iam_path_lookup(path);
1370                         if (result < 0)
1371                                 break;
1372
1373                         while (path->ip_frame->leaf != cursor) {
1374                                 do_corr(schedule());
1375
1376                                 result = iam_index_lock(path, lh);
1377                                 do_corr(schedule());
1378                                 if (result < 0)
1379                                         break;
1380
1381                                 result = iam_check_full_path(path, 0);
1382                                 if (result != 0)
1383                                         break;
1384
1385                                 result = iam_index_advance(path);
1386                                 if (result == 0) {
1387                                         CERROR("cannot find cursor : %u\n",
1388                                                 cursor);
1389                                         result = -EIO;
1390                                 }
1391                                 if (result < 0)
1392                                         break;
1393                                 result = iam_check_full_path(path, 0);
1394                                 if (result != 0)
1395                                         break;
1396                                 iam_unlock_array(c, lh);
1397                         }
1398                 } while (result == -EAGAIN);
1399                 if (result < 0)
1400                         break;
1401         }
1402         iam_unlock_array(c, lh);
1403         return result;
1404 }
1405
1406 /*
1407  * Move iterator one record right.
1408  *
1409  * Return value: 0: success,
1410  *              +1: end of container reached
1411  *             -ve: error
1412  *
1413  * precondition:  (it_state(it) == IAM_IT_ATTACHED ||
1414  *                 it_state(it) == IAM_IT_SKEWED) && it->ii_flags&IAM_IT_MOVE
1415  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED) &&
1416  *                ergo(result >  0, it_state(it) == IAM_IT_DETACHED)
1417  */
1418 int iam_it_next(struct iam_iterator *it)
1419 {
1420         int result;
1421         struct iam_path *path;
1422         struct iam_leaf *leaf;
1423
1424         do_corr(struct iam_ikey *ik_orig);
1425
1426         /* assert_corr(it->ii_flags&IAM_IT_MOVE); */
1427         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1428                     it_state(it) == IAM_IT_SKEWED);
1429
1430         path = &it->ii_path;
1431         leaf = &path->ip_leaf;
1432
1433         assert_corr(iam_leaf_is_locked(leaf));
1434
1435         result = 0;
1436         do_corr(ik_orig = it_at_rec(it) ?
1437                 iam_it_ikey_get(it, iam_path_ikey(path, 2)) : NULL);
1438         if (it_before(it)) {
1439                 assert_corr(!iam_leaf_at_end(leaf));
1440                 it->ii_state = IAM_IT_ATTACHED;
1441         } else {
1442                 if (!iam_leaf_at_end(leaf))
1443                         /* advance within leaf node */
1444                         iam_leaf_next(leaf);
1445                 /*
1446                  * multiple iterations may be necessary due to empty leaves.
1447                  */
1448                 while (result == 0 && iam_leaf_at_end(leaf)) {
1449                         do_corr(schedule());
1450                         /* advance index portion of the path */
1451                         result = iam_index_next(iam_it_container(it), path);
1452                         assert_corr(iam_leaf_is_locked(leaf));
1453                         if (result == 1) {
1454                                 struct dynlock_handle *lh;
1455                                 lh = iam_lock_htree(iam_it_container(it),
1456                                                     path->ip_frame->leaf,
1457                                                     DLT_WRITE);
1458                                 if (lh != NULL) {
1459                                         iam_leaf_fini(leaf);
1460                                         leaf->il_lock = lh;
1461                                         result = iam_leaf_load(path);
1462                                         if (result == 0)
1463                                                 iam_leaf_start(leaf);
1464                                 } else
1465                                         result = -ENOMEM;
1466                         } else if (result == 0)
1467                                 /* end of container reached */
1468                                 result = +1;
1469                         if (result != 0)
1470                                 iam_it_put(it);
1471                 }
1472                 if (result == 0)
1473                         it->ii_state = IAM_IT_ATTACHED;
1474         }
1475         assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
1476         assert_corr(ergo(result >  0, it_state(it) == IAM_IT_DETACHED));
1477         assert_corr(ergo(result == 0 && ik_orig != NULL,
1478                     it_ikeycmp(it, ik_orig) >= 0));
1479         return result;
1480 }
1481
1482 /*
1483  * Return pointer to the record under iterator.
1484  *
1485  * precondition:  it_state(it) == IAM_IT_ATTACHED && it_at_rec(it)
1486  * postcondition: it_state(it) == IAM_IT_ATTACHED
1487  */
1488 struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
1489 {
1490         assert_corr(it_state(it) == IAM_IT_ATTACHED);
1491         assert_corr(it_at_rec(it));
1492         return iam_leaf_rec(&it->ii_path.ip_leaf);
1493 }
1494
1495 static void iam_it_reccpy(struct iam_iterator *it, const struct iam_rec *r)
1496 {
1497         struct iam_leaf *folio;
1498
1499         folio = &it->ii_path.ip_leaf;
1500         iam_leaf_ops(folio)->rec_set(folio, r);
1501 }
1502
1503 /*
1504  * Replace contents of record under iterator.
1505  *
1506  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
1507  *                it->ii_flags&IAM_IT_WRITE
1508  * postcondition: it_state(it) == IAM_IT_ATTACHED &&
1509  *                ergo(result == 0, !memcmp(iam_it_rec_get(it), r, ...))
1510  */
1511 int iam_it_rec_set(handle_t *h,
1512                    struct iam_iterator *it, const struct iam_rec *r)
1513 {
1514         int result;
1515         struct iam_path *path;
1516         struct buffer_head *bh;
1517
1518         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
1519                     it->ii_flags&IAM_IT_WRITE);
1520         assert_corr(it_at_rec(it));
1521
1522         path = &it->ii_path;
1523         bh = path->ip_leaf.il_bh;
1524         result = iam_txn_add(h, path, bh);
1525         if (result == 0) {
1526                 iam_it_reccpy(it, r);
1527                 result = iam_txn_dirty(h, path, bh);
1528         }
1529         return result;
1530 }
1531
1532 /*
1533  * Return pointer to the index key under iterator.
1534  *
1535  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1536  *                it_state(it) == IAM_IT_SKEWED
1537  */
1538 static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
1539                                         struct iam_ikey *ikey)
1540 {
1541         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1542                     it_state(it) == IAM_IT_SKEWED);
1543         assert_corr(it_at_rec(it));
1544         return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
1545 }
1546
1547 /*
1548  * Return pointer to the key under iterator.
1549  *
1550  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1551  *                it_state(it) == IAM_IT_SKEWED
1552  */
1553 struct iam_key *iam_it_key_get(const struct iam_iterator *it)
1554 {
1555         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1556                     it_state(it) == IAM_IT_SKEWED);
1557         assert_corr(it_at_rec(it));
1558         return iam_leaf_key(&it->ii_path.ip_leaf);
1559 }
1560
1561 /*
1562  * Return size of key under iterator (in bytes)
1563  *
1564  * precondition:  it_state(it) == IAM_IT_ATTACHED ||
1565  *                it_state(it) == IAM_IT_SKEWED
1566  */
1567 int iam_it_key_size(const struct iam_iterator *it)
1568 {
1569         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
1570                     it_state(it) == IAM_IT_SKEWED);
1571         assert_corr(it_at_rec(it));
1572         return iam_leaf_key_size(&it->ii_path.ip_leaf);
1573 }
1574
1575 static struct buffer_head *
1576 iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
1577 {
1578         struct inode *inode = c->ic_object;
1579         struct buffer_head *bh = NULL;
1580         struct iam_idle_head *head;
1581         struct buffer_head *idle;
1582         __u32 *idle_blocks;
1583         __u16 count;
1584
1585         if (c->ic_idle_bh == NULL)
1586                 goto newblock;
1587
1588         mutex_lock(&c->ic_idle_mutex);
1589         if (unlikely(c->ic_idle_bh == NULL)) {
1590                 mutex_unlock(&c->ic_idle_mutex);
1591                 goto newblock;
1592         }
1593
1594         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
1595         count = le16_to_cpu(head->iih_count);
1596         if (count > 0) {
1597                 *e = ldiskfs_journal_get_write_access(h, c->ic_idle_bh);
1598                 if (*e != 0)
1599                         goto fail;
1600
1601                 --count;
1602                 *b = le32_to_cpu(head->iih_blks[count]);
1603                 head->iih_count = cpu_to_le16(count);
1604                 *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_idle_bh);
1605                 if (*e != 0)
1606                         goto fail;
1607
1608                 mutex_unlock(&c->ic_idle_mutex);
1609                 bh = __ldiskfs_bread(NULL, inode, *b, 0);
1610                 if (IS_ERR_OR_NULL(bh)) {
1611                         if (IS_ERR(bh))
1612                                 *e = PTR_ERR(bh);
1613                         else
1614                                 *e = -EIO;
1615                         return NULL;
1616                 }
1617                 goto got;
1618         }
1619
1620         /* The block itself which contains the iam_idle_head is
1621          * also an idle block, and can be used as the new node. */
1622         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
1623                                 c->ic_descr->id_root_gap +
1624                                 sizeof(struct dx_countlimit));
1625         *e = ldiskfs_journal_get_write_access(h, c->ic_root_bh);
1626         if (*e != 0)
1627                 goto fail;
1628
1629         *b = le32_to_cpu(*idle_blocks);
1630         iam_lock_bh(c->ic_root_bh);
1631         *idle_blocks = head->iih_next;
1632         iam_unlock_bh(c->ic_root_bh);
1633         *e = ldiskfs_handle_dirty_metadata(h, inode, c->ic_root_bh);
1634         if (*e != 0) {
1635                 iam_lock_bh(c->ic_root_bh);
1636                 *idle_blocks = cpu_to_le32(*b);
1637                 iam_unlock_bh(c->ic_root_bh);
1638                 goto fail;
1639         }
1640
1641         bh = c->ic_idle_bh;
1642         idle = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
1643         if (idle != NULL && IS_ERR(idle)) {
1644                 *e = PTR_ERR(idle);
1645                 c->ic_idle_bh = NULL;
1646                 brelse(bh);
1647                 goto fail;
1648         }
1649
1650         c->ic_idle_bh = idle;
1651         mutex_unlock(&c->ic_idle_mutex);
1652
1653 got:
1654         /* get write access for the found buffer head */
1655         *e = ldiskfs_journal_get_write_access(h, bh);
1656         if (*e != 0) {
1657                 brelse(bh);
1658                 bh = NULL;
1659                 ldiskfs_std_error(inode->i_sb, *e);
1660         } else {
1661                 /* Clear the reused node as new node does. */
1662                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1663                 set_buffer_uptodate(bh);
1664         }
1665         return bh;
1666
1667 newblock:
1668         bh = osd_ldiskfs_append(h, inode, b);
1669         if (IS_ERR(bh)) {
1670                 *e = PTR_ERR(bh);
1671                 bh = NULL;
1672         }
1673
1674         return bh;
1675
1676 fail:
1677         mutex_unlock(&c->ic_idle_mutex);
1678         ldiskfs_std_error(inode->i_sb, *e);
1679         return NULL;
1680 }
1681
1682 /*
1683  * Insertion of new record. Interaction with jbd during non-trivial case (when
1684  * split happens) is as following:
1685  *
1686  *  - new leaf node is involved into transaction by iam_new_node();
1687  *
1688  *  - old leaf node is involved into transaction by iam_add_rec();
1689  *
1690  *  - leaf where insertion point ends in, is marked dirty by iam_add_rec();
1691  *
1692  *  - leaf without insertion point is marked dirty (as @new_leaf) by
1693  *  iam_new_leaf();
1694  *
1695  *  - split index nodes are involved into transaction and marked dirty by
1696  *  split_index_node().
1697  *
1698  *  - "safe" index node, which is no split, but where new pointer is inserted
1699  *  is involved into transaction and marked dirty by split_index_node().
1700  *
1701  *  - index node where pointer to new leaf is inserted is involved into
1702  *  transaction by split_index_node() and marked dirty by iam_add_rec().
1703  *
1704  *  - inode is marked dirty by iam_add_rec().
1705  *
1706  */
1707
1708 static int iam_new_leaf(handle_t *handle, struct iam_leaf *leaf)
1709 {
1710         int err;
1711         iam_ptr_t blknr;
1712         struct buffer_head *new_leaf;
1713         struct buffer_head *old_leaf;
1714         struct iam_container *c;
1715         struct inode *obj;
1716         struct iam_path *path;
1717
1718         c = iam_leaf_container(leaf);
1719         path = leaf->il_path;
1720
1721         obj = c->ic_object;
1722         new_leaf = iam_new_node(handle, c, &blknr, &err);
1723         do_corr(schedule());
1724         if (new_leaf != NULL) {
1725                 struct dynlock_handle *lh;
1726
1727                 lh = iam_lock_htree(c, blknr, DLT_WRITE);
1728                 do_corr(schedule());
1729                 if (lh != NULL) {
1730                         iam_leaf_ops(leaf)->init_new(c, new_leaf);
1731                         do_corr(schedule());
1732                         old_leaf = leaf->il_bh;
1733                         iam_leaf_split(leaf, &new_leaf, blknr);
1734                         if (old_leaf != leaf->il_bh) {
1735                                 /*
1736                                  * Switched to the new leaf.
1737                                  */
1738                                 iam_leaf_unlock(leaf);
1739                                 leaf->il_lock = lh;
1740                                 path->ip_frame->leaf = blknr;
1741                         } else
1742                                 iam_unlock_htree(path->ip_container, lh);
1743                         do_corr(schedule());
1744                         err = iam_txn_dirty(handle, path, new_leaf);
1745                         if (err == 0)
1746                                 err = ldiskfs_mark_inode_dirty(handle, obj);
1747                         do_corr(schedule());
1748                 } else
1749                         err = -ENOMEM;
1750                 brelse(new_leaf);
1751         }
1752         assert_inv(iam_path_check(iam_leaf_path(leaf)));
1753         return err;
1754 }
1755
1756 static inline void dx_set_limit(struct iam_entry *entries, unsigned value)
1757 {
1758         ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
1759 }
1760
1761 static int iam_shift_entries(struct iam_path *path,
1762                          struct iam_frame *frame, unsigned count,
1763                          struct iam_entry *entries, struct iam_entry *entries2,
1764                          u32 newblock)
1765 {
1766         unsigned count1;
1767         unsigned count2;
1768         int delta;
1769
1770         struct iam_frame *parent = frame - 1;
1771         struct iam_ikey *pivot = iam_path_ikey(path, 3);
1772
1773         delta = dx_index_is_compat(path) ? 0 : +1;
1774
1775         count1 = count/2 + delta;
1776         count2 = count - count1;
1777         dx_get_ikey(path, iam_entry_shift(path, entries, count1), pivot);
1778
1779         dxtrace(printk("Split index %d/%d\n", count1, count2));
1780
1781         memcpy((char *) iam_entry_shift(path, entries2, delta),
1782                (char *) iam_entry_shift(path, entries, count1),
1783                count2 * iam_entry_size(path));
1784
1785         dx_set_count(entries2, count2 + delta);
1786         dx_set_limit(entries2, dx_node_limit(path));
1787
1788         /*
1789          * NOTE: very subtle piece of code competing dx_probe() may find 2nd
1790          * level index in root index, then we insert new index here and set
1791          * new count in that 2nd level index. so, dx_probe() may see 2nd level
1792          * index w/o hash it looks for. the solution is to check root index
1793          * after we locked just founded 2nd level index -bzzz
1794          */
1795         iam_insert_key_lock(path, parent, pivot, newblock);
1796
1797         /*
1798          * now old and new 2nd level index blocks contain all pointers, so
1799          * dx_probe() may find it in the both.  it's OK -bzzz
1800          */
1801         iam_lock_bh(frame->bh);
1802         dx_set_count(entries, count1);
1803         iam_unlock_bh(frame->bh);
1804
1805         /*
1806          * now old 2nd level index block points to first half of leafs. it's
1807          * importand that dx_probe() must check root index block for changes
1808          * under dx_lock_bh(frame->bh) -bzzz
1809          */
1810
1811         return count1;
1812 }
1813
1814
1815 int split_index_node(handle_t *handle, struct iam_path *path,
1816                      struct dynlock_handle **lh)
1817 {
1818         struct iam_entry *entries;   /* old block contents */
1819         struct iam_entry *entries2;  /* new block contents */
1820         struct iam_frame *frame, *safe;
1821         struct buffer_head *bh_new[DX_MAX_TREE_HEIGHT] = {NULL};
1822         u32 newblock[DX_MAX_TREE_HEIGHT] = {0};
1823         struct dynlock_handle *lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1824         struct dynlock_handle *new_lock[DX_MAX_TREE_HEIGHT] = {NULL,};
1825         struct inode *dir = iam_path_obj(path);
1826         struct iam_descr *descr;
1827         int nr_splet;
1828         int i, err;
1829
1830         descr = iam_path_descr(path);
1831         /*
1832          * Algorithm below depends on this.
1833          */
1834         assert_corr(dx_root_limit(path) < dx_node_limit(path));
1835
1836         frame = path->ip_frame;
1837         entries = frame->entries;
1838
1839         /*
1840          * Tall-tree handling: we might have to split multiple index blocks
1841          * all the way up to tree root. Tricky point here is error handling:
1842          * to avoid complicated undo/rollback we
1843          *
1844          *   - first allocate all necessary blocks
1845          *
1846          *   - insert pointers into them atomically.
1847          */
1848
1849         /*
1850          * Locking: leaf is already locked. htree-locks are acquired on all
1851          * index nodes that require split bottom-to-top, on the "safe" node,
1852          * and on all new nodes
1853          */
1854
1855         dxtrace(printk("using %u of %u node entries\n",
1856                        dx_get_count(entries), dx_get_limit(entries)));
1857
1858         /* What levels need split? */
1859         for (nr_splet = 0; frame >= path->ip_frames &&
1860              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1861              --frame, ++nr_splet) {
1862                 do_corr(schedule());
1863                 if (nr_splet == DX_MAX_TREE_HEIGHT) {
1864                         /*
1865                          * CWARN(dir->i_sb, __FUNCTION__,
1866                          * "Directory index full!\n");
1867                          */
1868                         err = -ENOSPC;
1869                         goto cleanup;
1870                 }
1871         }
1872
1873         safe = frame;
1874
1875         /*
1876          * Lock all nodes, bottom to top.
1877          */
1878         for (frame = path->ip_frame, i = nr_splet; i >= 0; --i, --frame) {
1879                 do_corr(schedule());
1880                 lock[i] = iam_lock_htree(path->ip_container, frame->curidx,
1881                                          DLT_WRITE);
1882                 if (lock[i] == NULL) {
1883                         err = -ENOMEM;
1884                         goto cleanup;
1885                 }
1886         }
1887
1888         /*
1889          * Check for concurrent index modification.
1890          */
1891         err = iam_check_full_path(path, 1);
1892         if (err)
1893                 goto cleanup;
1894         /*
1895          * And check that the same number of nodes is to be split.
1896          */
1897         for (i = 0, frame = path->ip_frame; frame >= path->ip_frames &&
1898              dx_get_count(frame->entries) == dx_get_limit(frame->entries);
1899              --frame, ++i) {
1900                 ;
1901         }
1902         if (i != nr_splet) {
1903                 err = -EAGAIN;
1904                 goto cleanup;
1905         }
1906
1907         /*
1908          * Go back down, allocating blocks, locking them, and adding into
1909          * transaction...
1910          */
1911         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1912                 bh_new[i] = iam_new_node(handle, path->ip_container,
1913                                          &newblock[i], &err);
1914                 do_corr(schedule());
1915                 if (!bh_new[i] ||
1916                     descr->id_ops->id_node_init(path->ip_container,
1917                                                 bh_new[i], 0) != 0)
1918                         goto cleanup;
1919
1920                 new_lock[i] = iam_lock_htree(path->ip_container, newblock[i],
1921                                              DLT_WRITE);
1922                 if (new_lock[i] == NULL) {
1923                         err = -ENOMEM;
1924                         goto cleanup;
1925                 }
1926                 do_corr(schedule());
1927                 BUFFER_TRACE(frame->bh, "get_write_access");
1928                 err = ldiskfs_journal_get_write_access(handle, frame->bh);
1929                 if (err)
1930                         goto journal_error;
1931         }
1932         /* Add "safe" node to transaction too */
1933         if (safe + 1 != path->ip_frames) {
1934                 do_corr(schedule());
1935                 err = ldiskfs_journal_get_write_access(handle, safe->bh);
1936                 if (err)
1937                         goto journal_error;
1938         }
1939
1940         /* Go through nodes once more, inserting pointers */
1941         for (frame = safe + 1, i = 0; i < nr_splet; ++i, ++frame) {
1942                 unsigned count;
1943                 int idx;
1944                 struct buffer_head *bh2;
1945                 struct buffer_head *bh;
1946
1947                 entries = frame->entries;
1948                 count = dx_get_count(entries);
1949                 idx = iam_entry_diff(path, frame->at, entries);
1950
1951                 bh2 = bh_new[i];
1952                 entries2 = dx_get_entries(path, bh2->b_data, 0);
1953
1954                 bh = frame->bh;
1955                 if (frame == path->ip_frames) {
1956                         /* splitting root node. Tricky point:
1957                          *
1958                          * In the "normal" B-tree we'd split root *and* add
1959                          * new root to the tree with pointers to the old root
1960                          * and its sibling (thus introducing two new nodes).
1961                          *
1962                          * In htree it's enough to add one node, because
1963                          * capacity of the root node is smaller than that of
1964                          * non-root one.
1965                          */
1966                         struct iam_frame *frames;
1967                         struct iam_entry *next;
1968
1969                         assert_corr(i == 0);
1970
1971                         do_corr(schedule());
1972
1973                         frames = path->ip_frames;
1974                         memcpy((char *) entries2, (char *) entries,
1975                                count * iam_entry_size(path));
1976                         dx_set_limit(entries2, dx_node_limit(path));
1977
1978                         /* Set up root */
1979                         iam_lock_bh(frame->bh);
1980                         next = descr->id_ops->id_root_inc(path->ip_container,
1981                                                           path, frame);
1982                         dx_set_block(path, next, newblock[0]);
1983                         iam_unlock_bh(frame->bh);
1984
1985                         do_corr(schedule());
1986                         /* Shift frames in the path */
1987                         memmove(frames + 2, frames + 1,
1988                                (sizeof path->ip_frames) - 2 * sizeof frames[0]);
1989                         /* Add new access path frame */
1990                         frames[1].at = iam_entry_shift(path, entries2, idx);
1991                         frames[1].entries = entries = entries2;
1992                         frames[1].bh = bh2;
1993                         assert_inv(dx_node_check(path, frame));
1994                         ++ path->ip_frame;
1995                         ++ frame;
1996                         assert_inv(dx_node_check(path, frame));
1997                         bh_new[0] = NULL; /* buffer head is "consumed" */
1998                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
1999                         if (err)
2000                                 goto journal_error;
2001                         do_corr(schedule());
2002                 } else {
2003                         /* splitting non-root index node. */
2004                         struct iam_frame *parent = frame - 1;
2005
2006                         do_corr(schedule());
2007                         count = iam_shift_entries(path, frame, count,
2008                                                 entries, entries2, newblock[i]);
2009                         /* Which index block gets the new entry? */
2010                         if (idx >= count) {
2011                                 int d = dx_index_is_compat(path) ? 0 : +1;
2012
2013                                 frame->at = iam_entry_shift(path, entries2,
2014                                                             idx - count + d);
2015                                 frame->entries = entries = entries2;
2016                                 frame->curidx = newblock[i];
2017                                 swap(frame->bh, bh2);
2018                                 assert_corr(lock[i + 1] != NULL);
2019                                 assert_corr(new_lock[i] != NULL);
2020                                 swap(lock[i + 1], new_lock[i]);
2021                                 bh_new[i] = bh2;
2022                                 parent->at = iam_entry_shift(path,
2023                                                              parent->at, +1);
2024                         }
2025                         assert_inv(dx_node_check(path, frame));
2026                         assert_inv(dx_node_check(path, parent));
2027                         dxtrace(dx_show_index("node", frame->entries));
2028                         dxtrace(dx_show_index("node",
2029                                 ((struct dx_node *) bh2->b_data)->entries));
2030                         err = ldiskfs_handle_dirty_metadata(handle, NULL, bh2);
2031                         if (err)
2032                                 goto journal_error;
2033                         do_corr(schedule());
2034                         err = ldiskfs_handle_dirty_metadata(handle, NULL,
2035                                                             parent->bh);
2036                         if (err)
2037                                 goto journal_error;
2038                 }
2039                 do_corr(schedule());
2040                 err = ldiskfs_handle_dirty_metadata(handle, NULL, bh);
2041                 if (err)
2042                         goto journal_error;
2043         }
2044                 /*
2045                  * This function was called to make insertion of new leaf
2046                  * possible. Check that it fulfilled its obligations.
2047                  */
2048                 assert_corr(dx_get_count(path->ip_frame->entries) <
2049                             dx_get_limit(path->ip_frame->entries));
2050         assert_corr(lock[nr_splet] != NULL);
2051         *lh = lock[nr_splet];
2052         lock[nr_splet] = NULL;
2053         if (nr_splet > 0) {
2054                 /*
2055                  * Log ->i_size modification.
2056                  */
2057                 err = ldiskfs_mark_inode_dirty(handle, dir);
2058                 if (err)
2059                         goto journal_error;
2060         }
2061         goto cleanup;
2062 journal_error:
2063         ldiskfs_std_error(dir->i_sb, err);
2064
2065 cleanup:
2066         iam_unlock_array(path->ip_container, lock);
2067         iam_unlock_array(path->ip_container, new_lock);
2068
2069         assert_corr(err || iam_frame_is_locked(path, path->ip_frame));
2070
2071         do_corr(schedule());
2072         for (i = 0; i < ARRAY_SIZE(bh_new); ++i) {
2073                 if (bh_new[i] != NULL)
2074                         brelse(bh_new[i]);
2075         }
2076         return err;
2077 }
2078
2079 static int iam_add_rec(handle_t *handle, struct iam_iterator *it,
2080                        struct iam_path *path,
2081                        const struct iam_key *k, const struct iam_rec *r)
2082 {
2083         int err;
2084         struct iam_leaf *leaf;
2085
2086         leaf = &path->ip_leaf;
2087         assert_inv(iam_path_check(path));
2088         err = iam_txn_add(handle, path, leaf->il_bh);
2089         if (err == 0) {
2090                 do_corr(schedule());
2091                 if (!iam_leaf_can_add(leaf, k, r)) {
2092                         struct dynlock_handle *lh = NULL;
2093
2094                         do {
2095                                 assert_corr(lh == NULL);
2096                                 do_corr(schedule());
2097                                 err = split_index_node(handle, path, &lh);
2098                                 if (err == -EAGAIN) {
2099                                         assert_corr(lh == NULL);
2100
2101                                         iam_path_fini(path);
2102                                         it->ii_state = IAM_IT_DETACHED;
2103
2104                                         do_corr(schedule());
2105                                         err = iam_it_get_exact(it, k);
2106                                         if (err == -ENOENT)
2107                                                 err = +1; /* repeat split */
2108                                         else if (err == 0)
2109                                                 err = -EEXIST;
2110                                 }
2111                         } while (err > 0);
2112                         assert_inv(iam_path_check(path));
2113                         if (err == 0) {
2114                                 assert_corr(lh != NULL);
2115                                 do_corr(schedule());
2116                                 err = iam_new_leaf(handle, leaf);
2117                                 if (err == 0)
2118                                         err = iam_txn_dirty(handle, path,
2119                                                             path->ip_frame->bh);
2120                         }
2121                         iam_unlock_htree(path->ip_container, lh);
2122                         do_corr(schedule());
2123                 }
2124                 if (err == 0) {
2125                         iam_leaf_rec_add(leaf, k, r);
2126                         err = iam_txn_dirty(handle, path, leaf->il_bh);
2127                 }
2128         }
2129         assert_inv(iam_path_check(path));
2130         return err;
2131 }
2132
2133 /*
2134  * Insert new record with key @k and contents from @r, shifting records to the
2135  * right. On success, iterator is positioned on the newly inserted record.
2136  *
2137  * precondition: it->ii_flags&IAM_IT_WRITE &&
2138  *               (it_state(it) == IAM_IT_ATTACHED ||
2139  *                it_state(it) == IAM_IT_SKEWED) &&
2140  *               ergo(it_state(it) == IAM_IT_ATTACHED,
2141  *                    it_keycmp(it, k) <= 0) &&
2142  *               ergo(it_before(it), it_keycmp(it, k) > 0));
2143  * postcondition: ergo(result == 0,
2144  *                     it_state(it) == IAM_IT_ATTACHED &&
2145  *                     it_keycmp(it, k) == 0 &&
2146  *                     !memcmp(iam_it_rec_get(it), r, ...))
2147  */
2148 int iam_it_rec_insert(handle_t *h, struct iam_iterator *it,
2149                       const struct iam_key *k, const struct iam_rec *r)
2150 {
2151         int result;
2152         struct iam_path *path;
2153
2154         path = &it->ii_path;
2155
2156         assert_corr(it->ii_flags&IAM_IT_WRITE);
2157         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2158                     it_state(it) == IAM_IT_SKEWED);
2159         assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
2160                     it_keycmp(it, k) <= 0));
2161         assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
2162         result = iam_add_rec(h, it, path, k, r);
2163         if (result == 0)
2164                 it->ii_state = IAM_IT_ATTACHED;
2165         assert_corr(ergo(result == 0,
2166                          it_state(it) == IAM_IT_ATTACHED &&
2167                          it_keycmp(it, k) == 0));
2168         return result;
2169 }
2170
2171 static inline int iam_idle_blocks_limit(struct inode *inode)
2172 {
2173         return (inode->i_sb->s_blocksize - sizeof(struct iam_idle_head)) >> 2;
2174 }
2175
2176 /*
2177  * If the leaf cannnot be recycled, we will lose one block for reusing.
2178  * It is not a serious issue because it almost the same of non-recycle.
2179  */
2180 static iam_ptr_t iam_index_shrink(handle_t *h, struct iam_path *p,
2181                                   struct iam_leaf *l, struct buffer_head **bh)
2182 {
2183         struct iam_container *c = p->ip_container;
2184         struct inode *inode = c->ic_object;
2185         struct iam_frame *frame = p->ip_frame;
2186         struct iam_entry *entries;
2187         struct iam_entry *pos;
2188         struct dynlock_handle *lh;
2189         int count;
2190         int rc;
2191
2192         if (c->ic_idle_failed)
2193                 return 0;
2194
2195         if (unlikely(frame == NULL))
2196                 return 0;
2197
2198         if (!iam_leaf_empty(l))
2199                 return 0;
2200
2201         lh = iam_lock_htree(c, frame->curidx, DLT_WRITE);
2202         if (lh == NULL) {
2203                 CWARN("%s: No memory to recycle idle blocks\n",
2204                       osd_ino2name(inode));
2205                 return 0;
2206         }
2207
2208         rc = iam_txn_add(h, p, frame->bh);
2209         if (rc != 0) {
2210                 iam_unlock_htree(c, lh);
2211                 return 0;
2212         }
2213
2214         iam_lock_bh(frame->bh);
2215         entries = frame->entries;
2216         count = dx_get_count(entries);
2217         /*
2218          * NOT shrink the last entry in the index node, which can be reused
2219          * directly by next new node.
2220          */
2221         if (count == 2) {
2222                 iam_unlock_bh(frame->bh);
2223                 iam_unlock_htree(c, lh);
2224                 return 0;
2225         }
2226
2227         pos = iam_find_position(p, frame);
2228         /*
2229          * There may be some new leaf nodes have been added or empty leaf nodes
2230          * have been shrinked during my delete operation.
2231          *
2232          * If the empty leaf is not under current index node because the index
2233          * node has been split, then just skip the empty leaf, which is rare.
2234          */
2235         if (unlikely(frame->leaf != dx_get_block(p, pos))) {
2236                 iam_unlock_bh(frame->bh);
2237                 iam_unlock_htree(c, lh);
2238                 return 0;
2239         }
2240
2241         frame->at = pos;
2242         if (frame->at < iam_entry_shift(p, entries, count - 1)) {
2243                 struct iam_entry *n = iam_entry_shift(p, frame->at, 1);
2244
2245                 memmove(frame->at, n,
2246                         (char *)iam_entry_shift(p, entries, count) - (char *)n);
2247                 frame->at_shifted = 1;
2248         }
2249         dx_set_count(entries, count - 1);
2250         iam_unlock_bh(frame->bh);
2251         rc = iam_txn_dirty(h, p, frame->bh);
2252         iam_unlock_htree(c, lh);
2253         if (rc != 0)
2254                 return 0;
2255
2256         get_bh(l->il_bh);
2257         *bh = l->il_bh;
2258         return frame->leaf;
2259 }
2260
2261 static int
2262 iam_install_idle_blocks(handle_t *h, struct iam_path *p, struct buffer_head *bh,
2263                         __u32 *idle_blocks, iam_ptr_t blk)
2264 {
2265         struct iam_container *c = p->ip_container;
2266         struct buffer_head *old = c->ic_idle_bh;
2267         struct iam_idle_head *head;
2268         int rc;
2269
2270         head = (struct iam_idle_head *)(bh->b_data);
2271         head->iih_magic = cpu_to_le16(IAM_IDLE_HEADER_MAGIC);
2272         head->iih_count = 0;
2273         head->iih_next = *idle_blocks;
2274         /* The bh already get_write_accessed. */
2275         rc = iam_txn_dirty(h, p, bh);
2276         if (rc != 0)
2277                 return rc;
2278
2279         rc = iam_txn_add(h, p, c->ic_root_bh);
2280         if (rc != 0)
2281                 return rc;
2282
2283         iam_lock_bh(c->ic_root_bh);
2284         *idle_blocks = cpu_to_le32(blk);
2285         iam_unlock_bh(c->ic_root_bh);
2286         rc = iam_txn_dirty(h, p, c->ic_root_bh);
2287         if (rc == 0) {
2288                 /* NOT release old before new assigned. */
2289                 get_bh(bh);
2290                 c->ic_idle_bh = bh;
2291                 brelse(old);
2292         } else {
2293                 iam_lock_bh(c->ic_root_bh);
2294                 *idle_blocks = head->iih_next;
2295                 iam_unlock_bh(c->ic_root_bh);
2296         }
2297         return rc;
2298 }
2299
2300 /*
2301  * If the leaf cannnot be recycled, we will lose one block for reusing.
2302  * It is not a serious issue because it almost the same of non-recycle.
2303  */
2304 static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
2305                              struct buffer_head *bh, iam_ptr_t blk)
2306 {
2307         struct iam_container *c = p->ip_container;
2308         struct inode *inode = c->ic_object;
2309         struct iam_idle_head *head;
2310         __u32 *idle_blocks;
2311         int count;
2312         int rc;
2313
2314         mutex_lock(&c->ic_idle_mutex);
2315         if (unlikely(c->ic_idle_failed)) {
2316                 rc = -EFAULT;
2317                 goto unlock;
2318         }
2319
2320         idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
2321                                 c->ic_descr->id_root_gap +
2322                                 sizeof(struct dx_countlimit));
2323         /* It is the first idle block. */
2324         if (c->ic_idle_bh == NULL) {
2325                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2326                 goto unlock;
2327         }
2328
2329         head = (struct iam_idle_head *)(c->ic_idle_bh->b_data);
2330         count = le16_to_cpu(head->iih_count);
2331         /* Current ic_idle_bh is full, to be replaced by the leaf. */
2332         if (count == iam_idle_blocks_limit(inode)) {
2333                 rc = iam_install_idle_blocks(h, p, bh, idle_blocks, blk);
2334                 goto unlock;
2335         }
2336
2337         /* Just add to ic_idle_bh. */
2338         rc = iam_txn_add(h, p, c->ic_idle_bh);
2339         if (rc != 0)
2340                 goto unlock;
2341
2342         head->iih_blks[count] = cpu_to_le32(blk);
2343         head->iih_count = cpu_to_le16(count + 1);
2344         rc = iam_txn_dirty(h, p, c->ic_idle_bh);
2345
2346 unlock:
2347         mutex_unlock(&c->ic_idle_mutex);
2348         if (rc != 0)
2349                 CWARN("%s: idle blocks failed, will lose the blk %u\n",
2350                       osd_ino2name(inode), blk);
2351 }
2352
2353 /*
2354  * Delete record under iterator.
2355  *
2356  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2357  *                it->ii_flags&IAM_IT_WRITE &&
2358  *                it_at_rec(it)
2359  * postcondition: it_state(it) == IAM_IT_ATTACHED ||
2360  *                it_state(it) == IAM_IT_DETACHED
2361  */
2362 int iam_it_rec_delete(handle_t *h, struct iam_iterator *it)
2363 {
2364         int result;
2365         struct iam_leaf *leaf;
2366         struct iam_path *path;
2367
2368         assert_corr(it_state(it) == IAM_IT_ATTACHED &&
2369                     it->ii_flags&IAM_IT_WRITE);
2370         assert_corr(it_at_rec(it));
2371
2372         path = &it->ii_path;
2373         leaf = &path->ip_leaf;
2374
2375         assert_inv(iam_path_check(path));
2376
2377         result = iam_txn_add(h, path, leaf->il_bh);
2378         /*
2379          * no compaction for now.
2380          */
2381         if (result == 0) {
2382                 iam_rec_del(leaf, it->ii_flags&IAM_IT_MOVE);
2383                 result = iam_txn_dirty(h, path, leaf->il_bh);
2384                 if (result == 0 && iam_leaf_at_end(leaf)) {
2385                         struct buffer_head *bh = NULL;
2386                         iam_ptr_t blk;
2387
2388                         blk = iam_index_shrink(h, path, leaf, &bh);
2389                         if (it->ii_flags & IAM_IT_MOVE) {
2390                                 result = iam_it_next(it);
2391                                 if (result > 0)
2392                                         result = 0;
2393                         }
2394
2395                         if (bh != NULL) {
2396                                 iam_recycle_leaf(h, path, bh, blk);
2397                                 brelse(bh);
2398                         }
2399                 }
2400         }
2401         assert_inv(iam_path_check(path));
2402         assert_corr(it_state(it) == IAM_IT_ATTACHED ||
2403                     it_state(it) == IAM_IT_DETACHED);
2404         return result;
2405 }
2406
2407 /*
2408  * Convert iterator to cookie.
2409  *
2410  * precondition:  it_state(it) == IAM_IT_ATTACHED &&
2411  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2412  * postcondition: it_state(it) == IAM_IT_ATTACHED
2413  */
2414 iam_pos_t iam_it_store(const struct iam_iterator *it)
2415 {
2416         iam_pos_t result;
2417
2418         assert_corr(it_state(it) == IAM_IT_ATTACHED);
2419         assert_corr(it_at_rec(it));
2420         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
2421                     sizeof result);
2422
2423         result = 0;
2424         return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
2425 }
2426
2427 /*
2428  * Restore iterator from cookie.
2429  *
2430  * precondition:  it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE &&
2431  *                iam_path_descr(it->ii_path)->id_key_size <= sizeof(iam_pos_t)
2432  * postcondition: ergo(result == 0, it_state(it) == IAM_IT_ATTACHED &&
2433  *                                  iam_it_store(it) == pos)
2434  */
2435 int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
2436 {
2437         assert_corr(it_state(it) == IAM_IT_DETACHED &&
2438                 it->ii_flags&IAM_IT_MOVE);
2439         assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
2440         return iam_it_iget(it, (struct iam_ikey *)&pos);
2441 }
2442
2443 /***********************************************************************/
2444 /* invariants                                                          */
2445 /***********************************************************************/
2446
2447 static inline int ptr_inside(void *base, size_t size, void *ptr)
2448 {
2449         return (base <= ptr) && (ptr < base + size);
2450 }
2451
2452 static int iam_frame_invariant(struct iam_frame *f)
2453 {
2454         return
2455                 (f->bh != NULL &&
2456                 f->bh->b_data != NULL &&
2457                 ptr_inside(f->bh->b_data, f->bh->b_size, f->entries) &&
2458                 ptr_inside(f->bh->b_data, f->bh->b_size, f->at) &&
2459                 f->entries <= f->at);
2460 }
2461
2462 static int iam_leaf_invariant(struct iam_leaf *l)
2463 {
2464         return
2465                 l->il_bh != NULL &&
2466                 l->il_bh->b_data != NULL &&
2467                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_entries) &&
2468                 ptr_inside(l->il_bh->b_data, l->il_bh->b_size, l->il_at) &&
2469                 l->il_entries <= l->il_at;
2470 }
2471
2472 static int iam_path_invariant(struct iam_path *p)
2473 {
2474         int i;
2475
2476         if (p->ip_container == NULL ||
2477             p->ip_indirect < 0 || p->ip_indirect > DX_MAX_TREE_HEIGHT - 1 ||
2478             p->ip_frame != p->ip_frames + p->ip_indirect ||
2479             !iam_leaf_invariant(&p->ip_leaf))
2480                 return 0;
2481         for (i = 0; i < ARRAY_SIZE(p->ip_frames); ++i) {
2482                 if (i <= p->ip_indirect) {
2483                         if (!iam_frame_invariant(&p->ip_frames[i]))
2484                                 return 0;
2485                 }
2486         }
2487         return 1;
2488 }
2489
2490 int iam_it_invariant(struct iam_iterator *it)
2491 {
2492         return
2493                 (it->ii_state == IAM_IT_DETACHED ||
2494                 it->ii_state == IAM_IT_ATTACHED ||
2495                 it->ii_state == IAM_IT_SKEWED) &&
2496                 !(it->ii_flags & ~(IAM_IT_MOVE | IAM_IT_WRITE)) &&
2497                 ergo(it->ii_state == IAM_IT_ATTACHED ||
2498                 it->ii_state == IAM_IT_SKEWED,
2499                 iam_path_invariant(&it->ii_path) &&
2500                 equi(it_at_rec(it), it->ii_state == IAM_IT_SKEWED));
2501 }
2502
2503 /*
2504  * Search container @c for record with key @k. If record is found, its data
2505  * are moved into @r.
2506  *
2507  * Return values: 0: found, -ENOENT: not-found, -ve: error
2508  */
2509 int iam_lookup(struct iam_container *c, const struct iam_key *k,
2510                struct iam_rec *r, struct iam_path_descr *pd)
2511 {
2512         struct iam_iterator it;
2513         int result;
2514
2515         iam_it_init(&it, c, 0, pd);
2516
2517         result = iam_it_get_exact(&it, k);
2518         if (result == 0)
2519                 /*
2520                  * record with required key found, copy it into user buffer
2521                  */
2522                 iam_reccpy(&it.ii_path.ip_leaf, r);
2523         iam_it_put(&it);
2524         iam_it_fini(&it);
2525         return result;
2526 }
2527
2528 /*
2529  * Insert new record @r with key @k into container @c (within context of
2530  * transaction @h).
2531  *
2532  * Return values: 0: success, -ve: error, including -EEXIST when record with
2533  * given key is already present.
2534  *
2535  * postcondition: ergo(result == 0 || result == -EEXIST,
2536  *                                  iam_lookup(c, k, r2) > 0;
2537  */
2538 int iam_insert(handle_t *h, struct iam_container *c, const struct iam_key *k,
2539                const struct iam_rec *r, struct iam_path_descr *pd)
2540 {
2541         struct iam_iterator it;
2542         int result;
2543
2544         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2545
2546         result = iam_it_get_exact(&it, k);
2547         if (result == -ENOENT)
2548                 result = iam_it_rec_insert(h, &it, k, r);
2549         else if (result == 0)
2550                 result = -EEXIST;
2551         iam_it_put(&it);
2552         iam_it_fini(&it);
2553         return result;
2554 }
2555
2556 /*
2557  * Update record with the key @k in container @c (within context of
2558  * transaction @h), new record is given by @r.
2559  *
2560  * Return values: +1: skip because of the same rec value, 0: success,
2561  * -ve: error, including -ENOENT if no record with the given key found.
2562  */
2563 int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
2564                const struct iam_rec *r, struct iam_path_descr *pd)
2565 {
2566         struct iam_iterator it;
2567         struct iam_leaf *folio;
2568         int result;
2569
2570         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2571
2572         result = iam_it_get_exact(&it, k);
2573         if (result == 0) {
2574                 folio = &it.ii_path.ip_leaf;
2575                 result = iam_leaf_ops(folio)->rec_eq(folio, r);
2576                 if (result == 0)
2577                         iam_it_rec_set(h, &it, r);
2578                 else
2579                         result = 1;
2580         }
2581         iam_it_put(&it);
2582         iam_it_fini(&it);
2583         return result;
2584 }
2585
2586 /*
2587  * Delete existing record with key @k.
2588  *
2589  * Return values: 0: success, -ENOENT: not-found, -ve: other error.
2590  *
2591  * postcondition: ergo(result == 0 || result == -ENOENT,
2592  *                                 !iam_lookup(c, k, *));
2593  */
2594 int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
2595                struct iam_path_descr *pd)
2596 {
2597         struct iam_iterator it;
2598         int result;
2599
2600         iam_it_init(&it, c, IAM_IT_WRITE, pd);
2601
2602         result = iam_it_get_exact(&it, k);
2603         if (result == 0)
2604                 iam_it_rec_delete(h, &it);
2605         iam_it_put(&it);
2606         iam_it_fini(&it);
2607         return result;
2608 }
2609
2610 int iam_root_limit(int rootgap, int blocksize, int size)
2611 {
2612         int limit;
2613         int nlimit;
2614
2615         limit = (blocksize - rootgap) / size;
2616         nlimit = blocksize / size;
2617         if (limit == nlimit)
2618                 limit--;
2619         return limit;
2620 }