Index: iam/fs/ext3/Makefile
===================================================================
--- iam.orig/fs/ext3/Makefile 2006-09-28 22:11:14.000000000 +0400
-+++ iam/fs/ext3/Makefile 2006-10-03 00:15:55.000000000 +0400
++++ iam/fs/ext3/Makefile 2006-10-03 21:14:47.000000000 +0400
@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
Index: iam/fs/ext3/iam.c
===================================================================
--- iam.orig/fs/ext3/iam.c 2004-04-06 17:27:52.000000000 +0400
-+++ iam/fs/ext3/iam.c 2006-10-02 19:59:04.000000000 +0400
-@@ -0,0 +1,1326 @@
++++ iam/fs/ext3/iam.c 2006-10-03 21:13:35.000000000 +0400
+@@ -0,0 +1,1335 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+static int iam_leaf_check(struct iam_leaf *leaf);
+extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
+
++#if EXT3_INVARIANT
+static int iam_path_check(struct iam_path *p)
+{
+ int i;
+ }
+ return result;
+}
++#endif
+
+static int iam_leaf_load(struct iam_path *path)
+{
+ leaf->il_bh = bh;
+ leaf->il_path = path;
+ err = iam_leaf_ops(leaf)->init(leaf);
-+ assert(ergo(err == 0, iam_leaf_check(leaf)));
++ assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
+ }
+ return err;
+}
+static void iam_leaf_fini(struct iam_leaf *leaf)
+{
+ if (leaf->il_path != NULL) {
-+ assert(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
++ assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
+ iam_leaf_ops(leaf)->fini(leaf);
+ if (leaf->il_bh) {
+ brelse(leaf->il_bh);
+ */
+void iam_it_fini(struct iam_iterator *it)
+{
-+ assert(it_state(it) == IAM_IT_DETACHED);
++ assert_corr(it_state(it) == IAM_IT_DETACHED);
+ iam_path_fini(&it->ii_path);
+}
+EXPORT_SYMBOL(iam_it_fini);
+ leaf = &path->ip_leaf;
+ descr = iam_path_descr(path);
+ result = dx_lookup(path);
-+ assert(iam_path_check(path));
++ assert_inv(iam_path_check(path));
+ if (result == 0) {
+ result = iam_leaf_load(path);
-+ assert(ergo(result == 0, iam_leaf_check(leaf)));
++ assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
+ if (result == 0) {
+ if (index)
+ result = iam_leaf_ops(leaf)->
+static int __iam_it_get(struct iam_iterator *it, int index)
+{
+ int result;
-+ assert(it_state(it) == IAM_IT_DETACHED);
++ assert_corr(it_state(it) == IAM_IT_DETACHED);
+
+ iam_it_lock(it);
+ result = iam_path_lookup(&it->ii_path, index);
+ /*
+ * See iam_it_get_exact() for explanation.
+ */
-+ assert(result != -ENOENT);
++ assert_corr(result != -ENOENT);
+ return result;
+}
+
+int iam_it_get(struct iam_iterator *it, const struct iam_key *k)
+{
+ int result;
-+ assert(it_state(it) == IAM_IT_DETACHED);
++ assert_corr(it_state(it) == IAM_IT_DETACHED);
+
+ it->ii_path.ip_ikey_target = NULL;
+ it->ii_path.ip_key_target = k;
+
+ result = __iam_it_get(it, 0);
+
-+ assert(ergo(result > 0, it_keycmp(it, k) == 0));
-+ assert(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
-+ it_keycmp(it, k) <= 0));
++ assert_corr(ergo(result > 0, it_keycmp(it, k) == 0));
++ assert_corr(ergo(result == 0 && it_state(it) == IAM_IT_ATTACHED,
++ it_keycmp(it, k) <= 0));
+ return result;
+}
+EXPORT_SYMBOL(iam_it_get);
+ */
+static int iam_it_iget(struct iam_iterator *it, const struct iam_ikey *k)
+{
-+ assert(it_state(it) == IAM_IT_DETACHED);
++ assert_corr(it_state(it) == IAM_IT_DETACHED);
+
+ it->ii_path.ip_ikey_target = k;
+ return __iam_it_get(it, 1);
+int iam_it_get_at(struct iam_iterator *it, const struct iam_key *k)
+{
+ int result;
-+ assert(it_state(it) == IAM_IT_DETACHED && !(it->ii_flags&IAM_IT_WRITE));
++ assert_corr(it_state(it) == IAM_IT_DETACHED &&
++ !(it->ii_flags&IAM_IT_WRITE));
+ result = iam_it_get(it, k);
+ if (result == 0) {
+ if (it_state(it) != IAM_IT_ATTACHED) {
-+ assert(it_state(it) == IAM_IT_SKEWED);
++ assert_corr(it_state(it) == IAM_IT_SKEWED);
+ result = iam_it_next(it);
+ }
+ }
-+ assert(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
++ assert_corr(ergo(result >= 0, it_state(it) == IAM_IT_ATTACHED));
+ return result;
+}
+EXPORT_SYMBOL(iam_it_get_at);
+ /*
+ * XXX: duplicate lock.
+ */
-+ assert(it_state(dst) == it_state(src));
-+ assert(iam_it_container(dst) == iam_it_container(src));
-+ assert(dst->ii_flags = src->ii_flags);
-+ assert(ergo(it_state(src) == IAM_IT_ATTACHED,
++ assert_corr(it_state(dst) == it_state(src));
++ assert_corr(iam_it_container(dst) == iam_it_container(src));
++ assert_corr(dst->ii_flags = src->ii_flags);
++ assert_corr(ergo(it_state(src) == IAM_IT_ATTACHED,
+ iam_it_rec_get(dst) == iam_it_rec_get(src) &&
+ iam_it_key_get(dst) == iam_it_key_get(src)));
+
+ struct iam_path *path;
+ struct iam_leaf *leaf;
+
-+ assert(it->ii_flags&IAM_IT_MOVE);
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_SKEWED);
++ assert_corr(it->ii_flags&IAM_IT_MOVE);
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_SKEWED);
+
+ path = &it->ii_path;
+ leaf = &path->ip_leaf;
+
+ result = 0;
+ if (it_before(it)) {
-+ assert(!iam_leaf_at_end(leaf));
++ assert_corr(!iam_leaf_at_end(leaf));
+ it->ii_state = IAM_IT_ATTACHED;
+ } else {
+ if (!iam_leaf_at_end(leaf))
+ iam_it_put(it);
+ }
+ }
-+ assert(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
-+ assert(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
++ assert_corr(ergo(result == 0, it_state(it) == IAM_IT_ATTACHED));
++ assert_corr(ergo(result > 0, it_state(it) == IAM_IT_DETACHED));
+ return result;
+}
+EXPORT_SYMBOL(iam_it_next);
+ */
+struct iam_rec *iam_it_rec_get(const struct iam_iterator *it)
+{
-+ assert(it_state(it) == IAM_IT_ATTACHED);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED);
++ assert_corr(it_at_rec(it));
+ return iam_leaf_rec(&it->ii_path.ip_leaf);
+}
+EXPORT_SYMBOL(iam_it_rec_get);
+ struct iam_path *path;
+ struct buffer_head *bh;
+
-+ assert(it_state(it) == IAM_IT_ATTACHED && it->ii_flags&IAM_IT_WRITE);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED &&
++ it->ii_flags&IAM_IT_WRITE);
++ assert_corr(it_at_rec(it));
+
+ path = &it->ii_path;
+ bh = path->ip_leaf.il_bh;
+static struct iam_ikey *iam_it_ikey_get(const struct iam_iterator *it,
+ struct iam_ikey *ikey)
+{
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_SKEWED);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_SKEWED);
++ assert_corr(it_at_rec(it));
+ return iam_leaf_ikey(&it->ii_path.ip_leaf, ikey);
+}
+
+ */
+struct iam_key *iam_it_key_get(const struct iam_iterator *it)
+{
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_SKEWED);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_SKEWED);
++ assert_corr(it_at_rec(it));
+ return iam_leaf_key(&it->ii_path.ip_leaf);
+}
+EXPORT_SYMBOL(iam_it_key_get);
+ */
+int iam_it_key_size(const struct iam_iterator *it)
+{
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_SKEWED);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_SKEWED);
++ assert_corr(it_at_rec(it));
+ return iam_leaf_key_size(&it->ii_path.ip_leaf);
+}
+EXPORT_SYMBOL(iam_it_key_size);
+ struct iam_container *c;
+ struct inode *obj;
+
-+ assert(iam_leaf_check(leaf));
++ assert_inv(iam_leaf_check(leaf));
+
+ c = iam_leaf_container(leaf);
+
+ if (err == 0)
+ err = ext3_mark_inode_dirty(handle, c->ic_object);
+ }
-+ assert(iam_leaf_check(leaf));
-+ assert(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
-+ assert(iam_path_check(iam_leaf_path(leaf)));
++ assert_inv(iam_leaf_check(leaf));
++ assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
++ assert_inv(iam_path_check(iam_leaf_path(leaf)));
+ return err;
+}
+
+ struct iam_leaf *leaf;
+
+ leaf = &path->ip_leaf;
-+ assert(iam_leaf_check(leaf));
-+ assert(iam_path_check(path));
++ assert_inv(iam_leaf_check(leaf));
++ assert_inv(iam_path_check(path));
+ err = iam_txn_add(handle, path, leaf->il_bh);
+ if (err == 0) {
+ if (!iam_leaf_can_add(leaf, k, r)) {
+ err = split_index_node(handle, path);
-+ assert(iam_path_check(path));
++ assert_inv(iam_path_check(path));
+ if (err == 0) {
+ err = iam_new_leaf(handle, leaf);
+ /*
+ err = iam_txn_dirty(handle, path, leaf->il_bh);
+ }
+ }
-+ assert(iam_leaf_check(leaf));
-+ assert(iam_leaf_check(&path->ip_leaf));
-+ assert(iam_path_check(path));
++ assert_inv(iam_leaf_check(leaf));
++ assert_inv(iam_leaf_check(&path->ip_leaf));
++ assert_inv(iam_path_check(path));
+ return err;
+}
+
+
+ path = &it->ii_path;
+
-+ assert(it->ii_flags&IAM_IT_WRITE);
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_SKEWED);
-+ assert(ergo(it_state(it) == IAM_IT_ATTACHED, it_keycmp(it, k) <= 0));
-+ assert(ergo(it_before(it), it_keycmp(it, k) > 0));
++ assert_corr(it->ii_flags&IAM_IT_WRITE);
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_SKEWED);
++ assert_corr(ergo(it_state(it) == IAM_IT_ATTACHED,
++ it_keycmp(it, k) <= 0));
++ assert_corr(ergo(it_before(it), it_keycmp(it, k) > 0));
+ result = iam_add_rec(h, path, k, r);
+ if (result == 0)
+ it->ii_state = IAM_IT_ATTACHED;
-+ assert(ergo(result == 0,
-+ it_state(it) == IAM_IT_ATTACHED && it_keycmp(it, k) == 0 &&
-+ !memcmp(iam_it_rec_get(it), r,
-+ iam_it_container(it)->ic_descr->id_rec_size)));
++ assert_corr(ergo(result == 0,
++ it_state(it) == IAM_IT_ATTACHED &&
++ it_keycmp(it, k) == 0 &&
++ !memcmp(iam_it_rec_get(it), r,
++ iam_it_container(it)->ic_descr->id_rec_size)));
+ return result;
+}
+EXPORT_SYMBOL(iam_it_rec_insert);
+ struct iam_leaf *leaf;
+ struct iam_path *path;
+
-+ assert(it_state(it) == IAM_IT_ATTACHED && it->ii_flags&IAM_IT_WRITE);
-+ assert(it_at_rec(it));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED &&
++ it->ii_flags&IAM_IT_WRITE);
++ assert_corr(it_at_rec(it));
+
+ path = &it->ii_path;
+ leaf = &path->ip_leaf;
+
-+ assert(iam_leaf_check(leaf));
-+ assert(iam_path_check(path));
++ assert_inv(iam_leaf_check(leaf));
++ assert_inv(iam_path_check(path));
+
+ result = iam_txn_add(h, path, leaf->il_bh);
+ /*
+ result = 0;
+ }
+ }
-+ assert(iam_leaf_check(leaf));
-+ assert(iam_path_check(path));
-+ assert(it_state(it) == IAM_IT_ATTACHED ||
-+ it_state(it) == IAM_IT_DETACHED);
++ assert_inv(iam_leaf_check(leaf));
++ assert_inv(iam_path_check(path));
++ assert_corr(it_state(it) == IAM_IT_ATTACHED ||
++ it_state(it) == IAM_IT_DETACHED);
+ return result;
+}
+EXPORT_SYMBOL(iam_it_rec_delete);
+{
+ iam_pos_t result;
+
-+ assert(it_state(it) == IAM_IT_ATTACHED);
-+ assert(it_at_rec(it));
-+ assert(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof result);
++ assert_corr(it_state(it) == IAM_IT_ATTACHED);
++ assert_corr(it_at_rec(it));
++ assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <=
++ sizeof result);
+
+ result = 0;
+ return *(iam_pos_t *)iam_it_ikey_get(it, (void *)&result);
+ */
+int iam_it_load(struct iam_iterator *it, iam_pos_t pos)
+{
-+ assert(it_state(it) == IAM_IT_DETACHED && it->ii_flags&IAM_IT_MOVE);
-+ assert(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
++ assert_corr(it_state(it) == IAM_IT_DETACHED &&
++ it->ii_flags&IAM_IT_MOVE);
++ assert_corr(iam_it_container(it)->ic_descr->id_ikey_size <= sizeof pos);
+ return iam_it_iget(it, (struct iam_ikey *)&pos);
+}
+EXPORT_SYMBOL(iam_it_load);
Index: iam/fs/ext3/iam_htree.c
===================================================================
--- iam.orig/fs/ext3/iam_htree.c 2004-04-06 17:27:52.000000000 +0400
-+++ iam/fs/ext3/iam_htree.c 2006-09-28 22:11:15.000000000 +0400
-@@ -0,0 +1,665 @@
++++ iam/fs/ext3/iam_htree.c 2006-10-03 21:14:41.000000000 +0400
+@@ -0,0 +1,668 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ struct iam_path *path;
+
+ path = iam_leaf_path(folio);
-+ assert(dx_index_is_compat(path));
-+ assert(path->ip_data != NULL);
++ assert_corr(dx_index_is_compat(path));
++ assert_corr(path->ip_data != NULL);
+ return container_of(path->ip_data, struct iam_path_compat, ipc_descr);
+}
+
+ struct dx_hash_info *hinfo;
+
+ hinfo = getipc(folio)->ipc_hinfo;
-+ assert(hinfo != NULL);
++ assert_corr(hinfo != NULL);
+ result = ext3fs_dirhash(name, namelen, hinfo);
-+ assert(result == 0);
++ assert_corr(result == 0);
+ return hinfo->hash;
+}
+
+ free = le16_to_cpu(ent->rec_len);
+ if (ent_is_live(ent))
+ free -= recsize(ent->name_len);
-+ assert(free >= 0);
++ assert_corr(free >= 0);
+ return free;
+}
+
+ __u32 hash0;
+ __u32 hash1;
+
-+ assert(ent_is_live(e0));
-+ assert(ent_is_live(e1));
++ assert_corr(ent_is_live(e0));
++ assert_corr(ent_is_live(e1));
+
+ hash0 = gethash(folio, e0);
+ hash1 = gethash(folio, e1);
+ return 0;
+}
+
++#if EXT3_CORRECTNESS || EXT3_INVARIANT
+static int iam_leaf_at_rec(const struct iam_leaf *folio)
+{
+ struct ext3_dir_entry_2 *ent;
+ return getstart(folio) <= ent &&
+ ent < gettop(folio) && ent_is_live(ent);
+}
++#endif
+
+/*
+ * Leaf operations.
+ struct iam_ikey *key)
+{
+ __u32 *hash;
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+
+ hash = (void *)key;
+ *hash = gethash(l, getent(l));
+
+static struct iam_key *iam_htree_key(const struct iam_leaf *l)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+
+ return (struct iam_key *)&getent(l)->name;
+}
+
+static int iam_htree_key_size(const struct iam_leaf *l)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+
+ return getent(l)->name_len;
+}
+
+static int iam_htree_init(struct iam_leaf *l)
+{
-+ assert(l->il_bh != NULL);
++ assert_corr(l->il_bh != NULL);
+
+ l->il_at = l->il_entries = (void *)getstart(l);
+ return 0;
+
+struct iam_rec *iam_htree_rec(const struct iam_leaf *l)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ return (void *)&getent(l)->inode;
+}
+
+ struct ext3_dir_entry_2 *scan;
+ struct ext3_dir_entry_2 *found;
+
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ found = NULL;
+ for (scan = getstart(l); scan < gettop(l); scan = entnext(scan)) {
+ if (scan != getent(l) && ent_is_live(scan) &&
+ (found == NULL || entcmp(l, scan, found) < 0))
+ found = scan;
+ }
-+ assert(ergo(found != NULL,
-+ gethash(l, getent(l)) <= gethash(l, found)));
++ assert_corr(ergo(found != NULL,
++ gethash(l, getent(l)) <= gethash(l, found)));
+ l->il_at = (void *)(found ? : gettop(l));
+}
+
+ result = IAM_LOOKUP_BEFORE;
+ } else {
+ l->il_at = (void *)found;
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ }
+ return result;
+}
+
+static void iam_htree_key_set(struct iam_leaf *l, const struct iam_key *k)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ assert(0);
+}
+
+
+ name = (const char *)k;
+
-+ assert(ent_is_live(getent(l)));
++ assert_corr(ent_is_live(getent(l)));
+
+ h0 = gethash(l, getent(l));
+ h1 = hashname(l, name, strlen(name));
+ __u32 *ino;
+ int namelen;
+
-+ assert(iam_leaf_can_add(leaf, k, r));
++ assert_corr(iam_leaf_can_add(leaf, k, r));
+
+ dir = iam_leaf_container(leaf)->ic_object;
+ ino = (void *)r;
+ namelen = strlen(name);
+
+ scan = find_insertion_point(dir, leaf->il_bh, name, namelen);
-+ assert(!IS_ERR(scan));
++ assert_corr(!IS_ERR(scan));
+ scan = split_entry(dir, scan, *ino, EXT3_FT_UNKNOWN, name, namelen);
+ leaf->il_at = (void *)scan;
+}
+ struct ext3_dir_entry_2 *scan;
+ struct ext3_dir_entry_2 *prev;
+
-+ assert(iam_leaf_at_rec(leaf));
++ assert_corr(iam_leaf_at_rec(leaf));
+
+ orig = getent(leaf);
+
+ prev = scan, scan = entnext(scan))
+ ;
+
-+ assert(scan == orig);
++ assert_corr(scan == orig);
+ if (prev != NULL) {
+ prev->rec_len = cpu_to_le16(le16_to_cpu(prev->rec_len) +
+ le16_to_cpu(scan->rec_len));
+ } else {
-+ assert(scan == getstart(leaf));
++ assert_corr(scan == getstart(leaf));
+ scan->inode = 0;
+ }
+ iam_leaf_container(leaf)->ic_object->i_version ++;
+ /*
+ * insertion point moves into new leaf.
+ */
-+ assert(delim_hash >= old_hash);
++ assert_corr(delim_hash >= old_hash);
+ iam_htree_lookup(l, (void *)&old_hash);
+ }
+}
+ int namelen;
+
+ root = data;
-+ assert(path->ip_data != NULL);
++ assert_corr(path->ip_data != NULL);
+ ipc = container_of(path->ip_data, struct iam_path_compat,
+ ipc_descr);
+
+ return check;
+ path->ip_indirect = root->info.indirect_levels;
+
-+ assert((char *)entries == (((char *)&root->info) +
-+ root->info.info_length));
-+ assert(dx_get_limit(entries) == dx_root_limit(path));
++ assert_corr((char *)entries == (((char *)&root->info) +
++ root->info.info_length));
++ assert_corr(dx_get_limit(entries) == dx_root_limit(path));
+
+ ipc->ipc_hinfo->hash_version = root->info.hash_version;
+ ipc->ipc_hinfo->seed = EXT3_SB(sb)->s_hash_seed;
+ }
+ } else {
+ /* non-root index */
-+ assert(entries == data + iam_path_descr(path)->id_node_gap);
-+ assert(dx_get_limit(entries) == dx_node_limit(path));
++ assert_corr(entries ==
++ data + iam_path_descr(path)->id_node_gap);
++ assert_corr(dx_get_limit(entries) == dx_node_limit(path));
+ }
+ frame->entries = frame->at = entries;
+ return 0;
+{
+ struct dx_node *node;
+
-+ assert(!root);
++ assert_corr(!root);
+
+ node = (void *)bh->b_data;
+ node->fake.rec_len = cpu_to_le16(c->ic_object->i_sb->s_blocksize);
+ struct buffer_head *bh;
+ const struct dx_root *root;
+
-+ assert(c->ic_object != NULL);
++ assert_corr(c->ic_object != NULL);
+
+ result = iam_node_read(c, iam_htree_root_ptr(c), NULL, &bh);
+ if (result == 0) {
Index: iam/fs/ext3/iam_lfix.c
===================================================================
--- iam.orig/fs/ext3/iam_lfix.c 2004-04-06 17:27:52.000000000 +0400
-+++ iam/fs/ext3/iam_lfix.c 2006-10-02 16:34:29.000000000 +0400
-@@ -0,0 +1,673 @@
++++ iam/fs/ext3/iam_lfix.c 2006-10-03 21:14:08.000000000 +0400
+@@ -0,0 +1,675 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+
+static void lentry_count_set(struct iam_leaf *leaf, unsigned count)
+{
-+ assert(0 <= count && count <= leaf_count_limit(leaf));
++ assert_corr(0 <= count && count <= leaf_count_limit(leaf));
+ iam_get_head(leaf)->ill_count = cpu_to_le16(count);
+}
+
+static struct iam_lentry *iam_lfix_get_end(const struct iam_leaf *l);
+
++#if EXT3_CORRECTNESS || EXT3_INVARIANT
+static int iam_leaf_at_rec(const struct iam_leaf *folio)
+{
+ return
+ iam_get_lentries(folio) <= folio->il_at &&
+ folio->il_at < iam_lfix_get_end(folio);
+}
++#endif
+
+static struct iam_ikey *iam_lfix_ikey(const struct iam_leaf *l,
+ struct iam_ikey *key)
+{
+ void *ie = l->il_at;
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ return (struct iam_ikey*)ie;
+}
+
+static struct iam_key *iam_lfix_key(const struct iam_leaf *l)
+{
+ void *ie = l->il_at;
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ return (struct iam_key*)ie;
+}
+
+
+ esize = iam_lfix_entry_size(l);
+ diff = (void *)e1 - (void *)e2;
-+ assert(diff / esize * esize == diff);
++ assert_corr(diff / esize * esize == diff);
+ return diff / esize;
+}
+
+ struct iam_leaf_head *ill;
+ int count;
+
-+ assert(l->il_bh != NULL);
++ assert_corr(l->il_bh != NULL);
+
+ ill = iam_get_head(l);
+ count = le16_to_cpu(ill->ill_count);
+struct iam_rec *iam_lfix_rec(const struct iam_leaf *l)
+{
+ void *e = l->il_at;
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ return e + iam_leaf_descr(l)->id_key_size;
+}
+
+static void iam_lfix_next(struct iam_leaf *l)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ l->il_at = iam_lfix_shift(l, l->il_at, 1);
+}
+
+ */
+ while (iam_lfix_shift(l, p, 1) != q) {
+ m = iam_lfix_shift(l, p, iam_lfix_diff(l, q, p) / 2);
-+ assert(p < m && m < q);
++ assert_corr(p < m && m < q);
+ (lfix_keycmp(c, iam_leaf_key_at(m), k) <= 0 ? p : q) = m;
+ }
-+ assert(lfix_keycmp(c, iam_leaf_key_at(p), k) <= 0 &&
-+ lfix_keycmp(c, k, iam_leaf_key_at(q)) < 0);
++ assert_corr(lfix_keycmp(c, iam_leaf_key_at(p), k) <= 0 &&
++ lfix_keycmp(c, k, iam_leaf_key_at(q)) < 0);
+ /*
+ * skip over records with duplicate keys.
+ */
+ }
+ l->il_at = p;
+ }
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+
+ if (lfix_keycmp(c, iam_leaf_key_at(l->il_at), k) == 0)
+ result = IAM_LOOKUP_EXACT;
+
+static void iam_lfix_key_set(struct iam_leaf *l, const struct iam_key *k)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ memcpy(iam_leaf_key_at(l->il_at), k, iam_leaf_descr(l)->id_key_size);
+}
+
+
+static void iam_lfix_rec_set(struct iam_leaf *l, const struct iam_rec *r)
+{
-+ assert(iam_leaf_at_rec(l));
++ assert_corr(iam_leaf_at_rec(l));
+ iam_reccpy(iam_leaf_path(l), iam_lfix_rec(l), r);
+}
+
+ ptrdiff_t diff;
+ int count;
+
-+ assert(iam_leaf_can_add(leaf, k, r));
++ assert_corr(iam_leaf_can_add(leaf, k, r));
+
+ count = lentry_count_get(leaf);
+ /*
+ * Another exceptional case: insertion with the key
+ * less than least key in the leaf.
+ */
-+ assert(cur == leaf->il_entries);
++ assert_corr(cur == leaf->il_entries);
+
+ start = leaf->il_at;
+ diff = (void *)end - (void *)start;
-+ assert(diff >= 0);
++ assert_corr(diff >= 0);
+ memmove(iam_lfix_shift(leaf, start, 1), start, diff);
+ }
+ lentry_count_set(leaf, count + 1);
+ iam_lfix_key_set(leaf, k);
+ iam_lfix_rec_set(leaf, r);
-+ assert(iam_leaf_at_rec(leaf));
++ assert_corr(iam_leaf_at_rec(leaf));
+}
+
+static void iam_lfix_rec_del(struct iam_leaf *leaf, int shift)
+ int count;
+ ptrdiff_t diff;
+
-+ assert(iam_leaf_at_rec(leaf));
++ assert_corr(iam_leaf_at_rec(leaf));
+
+ count = lentry_count_get(leaf);
+ end = iam_lfix_get_end(leaf);
+ /*
+ * init cannot fail, as node was just initialized.
+ */
-+ assert(result == 0);
++ assert_corr(result == 0);
+ l->il_at = iam_lfix_shift(l, iam_get_lentries(l), shift);
+ }
+
+ entries = frame->entries;
+
+ dx_set_count(entries, 2);
-+ assert(dx_get_limit(entries) == dx_root_limit(path));
++ assert_corr(dx_get_limit(entries) == dx_root_limit(path));
+
+ root = (void *)frame->bh->b_data;
-+ assert(le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC);
++ assert_corr(le64_to_cpu(root->ilr_magic) == IAM_LFIX_ROOT_MAGIC);
+ root->ilr_indirect_levels ++;
+ frame->at = entries = iam_entry_shift(path, entries, 1);
+ memset(iam_ikey_at(path, entries), 0,
+ struct buffer_head *bh;
+ const struct iam_lfix_root *root;
+
-+ assert(c->ic_object != NULL);
++ assert_corr(c->ic_object != NULL);
+
+ result = iam_node_read(c, iam_lfix_root_ptr(c), NULL, &bh);
+ if (result == 0) {
Index: iam/fs/ext3/iam_lvar.c
===================================================================
--- iam.orig/fs/ext3/iam_lvar.c 2004-04-06 17:27:52.000000000 +0400
-+++ iam/fs/ext3/iam_lvar.c 2006-10-02 16:33:55.000000000 +0400
-@@ -0,0 +1,900 @@
++++ iam/fs/ext3/iam_lvar.c 2006-10-03 21:14:26.000000000 +0400
+@@ -0,0 +1,902 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ int used;
+
+ used = h_used(hdr) + adj;
-+ assert(sizeof *hdr <= used && used <= blocksize(leaf));
++ assert_corr(sizeof *hdr <= used && used <= blocksize(leaf));
+ hdr->vlh_used = cpu_to_le16(used);
+}
+
+ return lentry_lvar(l->il_at);
+}
+
++#if EXT3_CORRECTNESS || EXT3_INVARIANT
+static int n_at_rec(const struct iam_leaf *folio)
+{
+ return
+ n_start(folio) <= lentry_lvar(folio->il_at) &&
+ lentry_lvar(folio->il_at) < n_end(folio);
+}
++#endif
+
+static struct iam_ikey *lvar_ikey(const struct iam_leaf *l,
+ struct iam_ikey *key)
+{
+ lvar_hash_t *hash;
+
-+ assert(n_at_rec(l));
++ assert_corr(n_at_rec(l));
+
+ hash = (void *)key;
+ *hash = e_hash(n_cur(l));
+ int used;
+ struct lvar_leaf_header *head;
+
-+ assert(l->il_bh != NULL);
++ assert_corr(l->il_bh != NULL);
+
+ head = n_head(l);
+ used = h_used(head);
+
+struct iam_rec *lvar_rec(const struct iam_leaf *l)
+{
-+ assert(n_at_rec(l));
++ assert_corr(n_at_rec(l));
+ return e_rec(n_cur(l));
+}
+
+static void lvar_next(struct iam_leaf *l)
+{
-+ assert(n_at_rec(l));
++ assert_corr(n_at_rec(l));
+ l->il_at = lvar_lentry(e_next(l, n_cur(l)));
+}
+
+ } else {
+ leaf->il_at = lvar_lentry(found);
+ result = IAM_LOOKUP_OK;
-+ assert(n_at_rec(leaf));
++ assert_corr(n_at_rec(leaf));
+ }
+ return result;
+}
+
+static void lvar_key_set(struct iam_leaf *l, const struct iam_key *k)
+{
-+ assert(n_at_rec(l));
-+ assert(strlen(kchar(k)) == e_keysize(n_cur(l)));
++ assert_corr(n_at_rec(l));
++ assert_corr(strlen(kchar(k)) == e_keysize(n_cur(l)));
+ memcpy(e_key(n_cur(l)), k, e_keysize(n_cur(l)));
+}
+
+
+static void lvar_rec_set(struct iam_leaf *l, const struct iam_rec *r)
+{
-+ assert(n_at_rec(l));
++ assert_corr(n_at_rec(l));
+ iam_reccpy(iam_leaf_path(l), e_rec(n_cur(l)), r);
+}
+
+ void *start;
+ ptrdiff_t diff;
+
-+ assert(lvar_can_add(leaf, k, r));
++ assert_corr(lvar_can_add(leaf, k, r));
+
+ key = kchar(k);
+ ksize = strlen(key);
+ * Another exceptional case: insertion with the key
+ * less than least key in the leaf.
+ */
-+ assert(leaf->il_at == leaf->il_entries);
++ assert_corr(leaf->il_at == leaf->il_entries);
+
+ start = leaf->il_at;
+ diff = PDIFF(end, start);
-+ assert(diff >= 0);
++ assert_corr(diff >= 0);
+ memmove(start + shift, start, diff);
+ }
+ h_used_adj(leaf, n_head(leaf), shift);
+ key, ksize));
+ lvar_key_set(leaf, k);
+ lvar_rec_set(leaf, r);
-+ assert(n_at_rec(leaf));
++ assert_corr(n_at_rec(leaf));
+}
+
+static void lvar_rec_del(struct iam_leaf *leaf, int shift)
+ void *end;
+ int nob;
+
-+ assert(n_at_rec(leaf));
++ assert_corr(n_at_rec(leaf));
+
+ end = n_end(leaf);
+ next = e_next(leaf, n_cur(leaf));
+ hdr = (void *)new_leaf->b_data;
+
+ first_to_move = find_pivot(leaf, &last_to_stay);
-+ assert(last_to_stay != NULL);
-+ assert(e_next(leaf, last_to_stay) == first_to_move);
++ assert_corr(last_to_stay != NULL);
++ assert_corr(e_next(leaf, last_to_stay) == first_to_move);
+
+ hash = e_hash(first_to_move);
+ if (hash == e_hash(last_to_stay))
+ h_used_adj(leaf, hdr, tomove);
+ h_used_adj(leaf, n_head(leaf), -tomove);
+
-+ assert(n_end(leaf) == first_to_move);
++ assert_corr(n_end(leaf) == first_to_move);
+
+ /*
+ * Insert pointer to the new node (together with the least key in
+ /*
+ * init cannot fail, as node was just initialized.
+ */
-+ assert(result == 0);
++ assert_corr(result == 0);
+ leaf->il_at = ((void *)leaf->il_at) + shift;
+ }
+}
+ entries = frame->entries;
+
+ dx_set_count(entries, 2);
-+ assert(dx_get_limit(entries) == dx_root_limit(path));
++ assert_corr(dx_get_limit(entries) == dx_root_limit(path));
+
+ root = (void *)frame->bh->b_data;
-+ assert(le64_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC);
++ assert_corr(le64_to_cpu(root->vr_magic) == IAM_LVAR_ROOT_MAGIC);
+ root->vr_indirect_levels ++;
+ frame->at = entries = iam_entry_shift(path, entries, 1);
+ memset(iam_ikey_at(path, entries), 0,
+ *
+ * XXX: this key is hard-coded to be a sequence of 0's.
+ */
-+ assert(*(lvar_hash_t *)entry == 0);
++ assert_corr(*(lvar_hash_t *)entry == 0);
+ entry += sizeof(lvar_hash_t);
+ /* now @entry points to <ptr> */
+ if (ptrsize == 4)
+ int result;
+ unsigned long bsize;
+
-+ assert(obj->i_size == 0);
++ assert_corr(obj->i_size == 0);
+
+ sb = obj->i_sb;
+ bsize = sb->s_blocksize;
+ struct buffer_head *bh;
+ const struct lvar_root *root;
+
-+ assert(c->ic_object != NULL);
++ assert_corr(c->ic_object != NULL);
+
+ result = iam_node_read(c, lvar_root_ptr(c), NULL, &bh);
+ if (result == 0) {
Index: iam/fs/ext3/namei.c
===================================================================
--- iam.orig/fs/ext3/namei.c 2006-09-28 22:11:15.000000000 +0400
-+++ iam/fs/ext3/namei.c 2006-10-02 22:39:52.000000000 +0400
++++ iam/fs/ext3/namei.c 2006-10-03 20:59:58.000000000 +0400
@@ -24,81 +24,6 @@
* Theodore Ts'o, 2002
*/
-}
-
-static inline unsigned dx_node_limit(struct iam_path *p)
-+int dx_index_is_compat(struct iam_path *path)
- {
+-{
- struct iam_descr *param = path_descr(p);
- unsigned entry_space = path_obj(p)->i_sb->s_blocksize -
- param->id_node_gap;
- return entry_space / (param->id_key_size + param->id_ptr_size);
+-}
+-
+-static inline int dx_index_is_compat(struct iam_path *path)
++int dx_index_is_compat(struct iam_path *path)
+ {
+- return path_descr(path) == &htree_compat_param;
+ return iam_path_descr(path) == &iam_htree_compat_param;
}
--static inline int dx_index_is_compat(struct iam_path *path)
--{
-- return path_descr(path) == &htree_compat_param;
--}
--
-static struct iam_entry *dx_get_entries(struct iam_path *path, void *data,
- int root)
-{
- (root ?
- path_descr(path)->id_root_gap : path_descr(path)->id_node_gap);
-}
--
+
-static struct iam_entry *dx_node_get_entries(struct iam_path *path,
- struct iam_frame *frame)
-{
- return dx_get_entries(path,
- frame->bh->b_data, frame == path->ip_frames);
-}
-
+-
-static int dx_node_check(struct iam_path *p, struct iam_frame *f)
+int dx_node_check(struct iam_path *p, struct iam_frame *f)
{
}
/*
-@@ -796,602 +228,120 @@ struct stats dx_show_entries(struct dx_h
- if (bcount)
- printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
- names, space/bcount,(space/bcount)*100/blocksize);
-- return (struct stats) { names, space, bcount};
--}
--#endif /* DX_DEBUG */
--
+@@ -800,598 +232,116 @@ struct stats dx_show_entries(struct dx_h
+ }
+ #endif /* DX_DEBUG */
+
-static int dx_lookup(struct iam_path *path)
-{
- u32 ptr;
-}
-
-static int split_leaf_node(handle_t *handle, struct iam_path *path)
--{
++int dx_lookup(struct iam_path *path)
+ {
- struct inode *dir = path_obj(path);
- unsigned continued = 0;
- struct buffer_head *bh2;
- .hinfo = &hinfo
- };
- int err, i;
--
++ u32 ptr;
++ int err = 0;
++ int i;
+
- iam_path_init(path, c, &hc);
- for (i = 0; i < ARRAY_SIZE(path->ip_key_scratch); ++i)
- path->ip_key_scratch[i] =
- err = dx_lookup(path);
- if (err)
- goto errout;
--
++ struct iam_descr *param;
++ struct iam_frame *frame;
++ struct iam_container *c;
+
- err = iam_leaf_insert(handle, path, k, r);
--
++ param = iam_path_descr(path);
++ c = path->ip_container;
+
- if (err != -ENOSPC)
- goto errout;
--
++ for (frame = path->ip_frames, i = 0,
++ ptr = param->id_ops->id_root_ptr(c);
++ i <= path->ip_indirect;
++ ptr = dx_get_block(path, frame->at), ++frame, ++i) {
++ struct iam_entry *entries;
++ struct iam_entry *p;
++ struct iam_entry *q;
++ struct iam_entry *m;
++ unsigned count;
+
- err = split_index_node(handle, path);
- if (err)
- goto errout;
--
++ err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
++ &frame->bh);
++ if (err != 0)
++ break;
+
- err = split_leaf_node(handle, path);
- if (err)
- goto errout;
-errout:
- iam_path_fini(path);
- return(err);
-+ return (struct stats) { names, space, bcount};
- }
-+#endif /* DX_DEBUG */
+-}
++ err = param->id_ops->id_node_check(path, frame);
++ if (err != 0)
++ break;
-EXPORT_SYMBOL(iam_insert);
-static int iam_leaf_delete(handle_t *handle, struct iam_path *path,
- struct iam_key *k)
-+int dx_lookup(struct iam_path *path)
- {
+-{
- struct iam_leaf leaf;
- struct iam_leaf_entry *p, *q;
- int err, count;
--
++ err = param->id_ops->id_node_load(path, frame);
++ if (err != 0)
++ break;
+
- err = iam_leaf_init(path, &leaf);
- if (err)
- goto errout;
- err = iam_leaf_lookup(path, &leaf, k);
- if (err)
- goto errout;
--
++ assert_inv(dx_node_check(path, frame));
+
- count = dx_get_count((struct iam_entry*)leaf.entries);
- /*delete the k to leaf entries*/
- p = iam_leaf_entry_shift(path, leaf.at, 1);
- p = iam_leaf_entry_shift(path, p, 1);
- }
- dx_set_count((struct iam_entry*)leaf.entries, count - 1);
-+ u32 ptr;
-+ int err = 0;
-+ int i;
++ entries = frame->entries;
++ count = dx_get_count(entries);
++ assert_corr(count && count <= dx_get_limit(entries));
++ p = iam_entry_shift(path, entries, 1);
++ q = iam_entry_shift(path, entries, count - 1);
++ while (p <= q) {
++ m = iam_entry_shift(path,
++ p, iam_entry_diff(path, q, p) / 2);
++ dxtrace(printk("."));
++ if (iam_ikeycmp(c, iam_ikey_at(path, m),
++ path->ip_ikey_target) > 0)
++ q = iam_entry_shift(path, m, -1);
++ else
++ p = iam_entry_shift(path, m, +1);
++ }
- err = ext3_journal_dirty_metadata(handle, leaf.bh);
- if (err)
- ext3_std_error(path_obj(path)->i_sb, err);
-errout:
- iam_leaf_fini(&leaf);
-- return err;
--}
-+ struct iam_descr *param;
-+ struct iam_frame *frame;
-+ struct iam_container *c;
++ frame->at = iam_entry_shift(path, p, -1);
++ if (1) { // linear search cross check
++ unsigned n = count - 1;
++ struct iam_entry *at;
++
++ at = entries;
++ while (n--) {
++ dxtrace(printk(","));
++ at = iam_entry_shift(path, at, +1);
++ if (iam_ikeycmp(c, iam_ikey_at(path, at),
++ path->ip_ikey_target) > 0) {
++ if (at != iam_entry_shift(path, frame->at, 1)) {
++ BREAKPOINT();
++ printk(KERN_EMERG "%i\n",
++ iam_ikeycmp(c, iam_ikey_at(path, at),
++ path->ip_ikey_target));
++ }
++ at = iam_entry_shift(path, at, -1);
++ break;
++ }
++ }
++ assert_corr(at == frame->at);
++ }
++ }
++ if (err != 0)
++ iam_path_fini(path);
++ path->ip_frame = --frame;
+ return err;
+ }
--/*
+ /*
- * Delete existing record with key @k.
- *
- * Return values: 0: success, -ENOENT: not-found, -ve: other error.
-- *
++ * Probe for a directory leaf block to search.
+ *
- * postcondition: ergo(result == 0 || result == -ENOENT,
- * !iam_lookup(c, k, *));
-- */
++ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
++ * error in the directory index, and the caller should fall back to
++ * searching the directory normally. The callers of dx_probe **MUST**
++ * check for this error code, and make sure it never gets reflected
++ * back to userspace.
+ */
-int iam_delete(handle_t *h, struct iam_container *c, struct iam_key *k)
-{
- struct dx_hash_info hinfo;
- .hinfo = &hinfo
- };
- int err, i;
-+ param = iam_path_descr(path);
-+ c = path->ip_container;
-+
-+ for (frame = path->ip_frames, i = 0,
-+ ptr = param->id_ops->id_root_ptr(c);
-+ i <= path->ip_indirect;
-+ ptr = dx_get_block(path, frame->at), ++frame, ++i) {
-+ struct iam_entry *entries;
-+ struct iam_entry *p;
-+ struct iam_entry *q;
-+ struct iam_entry *m;
-+ unsigned count;
-
+-
- iam_path_init(path, c, &hc);
- for (i = 0; i < ARRAY_SIZE(path->ip_key_scratch); ++i)
- path->ip_key_scratch[i] =
- err = dx_lookup(path);
- if (err)
- goto errout;
-+ err = param->id_ops->id_node_read(c, (iam_ptr_t)ptr, NULL,
-+ &frame->bh);
-+ if (err != 0)
-+ break;
-
+-
- err = iam_leaf_delete(h, path, k);
-errout:
- iam_path_fini(path);
- return err;
-}
-+ err = param->id_ops->id_node_check(path, frame);
-+ if (err != 0)
-+ break;
-
+-
-EXPORT_SYMBOL(iam_delete);
-+ err = param->id_ops->id_node_load(path, frame);
-+ if (err != 0)
-+ break;
-
+-
-static int iam_leaf_update(handle_t *handle, struct iam_path *path,
- struct iam_key *k, struct iam_rec *r)
--{
++static int dx_probe(struct dentry *dentry, struct inode *dir,
++ struct dx_hash_info *hinfo, struct iam_path *path)
+ {
- struct iam_leaf leaf;
-- int err;
-+ assert(dx_node_check(path, frame));
+ int err;
++ struct iam_path_compat *ipc;
- err = iam_leaf_init(path, &leaf);
- if (err)
- err = iam_leaf_lookup(path, &leaf, k);
- if (err)
- goto errout;
-+ entries = frame->entries;
-+ count = dx_get_count(entries);
-+ assert(count && count <= dx_get_limit(entries));
-+ p = iam_entry_shift(path, entries, 1);
-+ q = iam_entry_shift(path, entries, count - 1);
-+ while (p <= q) {
-+ m = iam_entry_shift(path,
-+ p, iam_entry_diff(path, q, p) / 2);
-+ dxtrace(printk("."));
-+ if (iam_ikeycmp(c, iam_ikey_at(path, m),
-+ path->ip_ikey_target) > 0)
-+ q = iam_entry_shift(path, m, -1);
-+ else
-+ p = iam_entry_shift(path, m, +1);
-+ }
-
+-
- memcpy(iam_leaf_entry_at(path, leaf.at), r, path_descr(path)->id_rec_size);
- memcpy(iam_leaf_key_at(path, leaf.at), k, path_descr(path)->id_key_size);
-+ frame->at = iam_entry_shift(path, p, -1);
-+ if (1) { // linear search cross check
-+ unsigned n = count - 1;
-+ struct iam_entry *at;
++ assert_corr(path->ip_data != NULL);
++ ipc = container_of(path->ip_data, struct iam_path_compat, ipc_descr);
++ ipc->ipc_dentry = dentry;
++ ipc->ipc_hinfo = hinfo;
- err = ext3_journal_dirty_metadata(handle, leaf.bh);
- if (err)
- ext3_std_error(path_obj(path)->i_sb, err);
-errout:
- iam_leaf_fini(&leaf);
-+ at = entries;
-+ while (n--) {
-+ dxtrace(printk(","));
-+ at = iam_entry_shift(path, at, +1);
-+ if (iam_ikeycmp(c, iam_ikey_at(path, at),
-+ path->ip_ikey_target) > 0) {
-+ if (at != iam_entry_shift(path, frame->at, 1)) {
-+ BREAKPOINT();
-+ printk(KERN_EMERG "%i\n",
-+ iam_ikeycmp(c, iam_ikey_at(path, at),
-+ path->ip_ikey_target));
-+ }
-+ at = iam_entry_shift(path, at, -1);
-+ break;
-+ }
-+ }
-+ assert(at == frame->at);
-+ }
-+ }
-+ if (err != 0)
-+ iam_path_fini(path);
-+ path->ip_frame = --frame;
- return err;
- }
-+
- /*
+- return err;
+-}
+-/*
- * Replace existing record with key @k, or insert new one. New record data are
- * in @r.
- *
- * Return values: 0: success, -ve: error.
-+ * Probe for a directory leaf block to search.
- *
+- *
- * postcondition: ergo(result == 0, iam_lookup(c, k, r2) > 0 &&
- * !memcmp(r, r2, c->ic_descr->id_rec_size));
-+ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
-+ * error in the directory index, and the caller should fall back to
-+ * searching the directory normally. The callers of dx_probe **MUST**
-+ * check for this error code, and make sure it never gets reflected
-+ * back to userspace.
- */
+- */
-int iam_update(handle_t *h, struct iam_container *c,
- struct iam_key *k, struct iam_rec *r)
-+static int dx_probe(struct dentry *dentry, struct inode *dir,
-+ struct dx_hash_info *hinfo, struct iam_path *path)
- {
+-{
- struct dx_hash_info hinfo;
- struct iam_path_compat cpath;
- struct iam_path *path = &cpath.ipc_path;
- for (i = 0; i < ARRAY_SIZE(path->ip_key_scratch); ++i)
- path->ip_key_scratch[i] =
- (struct iam_key *)&cpath.ipc_scrach[i];
-- err = dx_lookup(path);
++ assert_corr(dx_index_is_compat(path));
+ err = dx_lookup(path);
- if (err)
- goto errout;
-+ int err;
-+ struct iam_path_compat *ipc;
-
+-
- err = iam_leaf_update(h, path, k, r);
-errout:
- iam_path_fini(path);
-+ assert(path->ip_data != NULL);
-+ ipc = container_of(path->ip_data, struct iam_path_compat, ipc_descr);
-+ ipc->ipc_dentry = dentry;
-+ ipc->ipc_hinfo = hinfo;
-+
-+ assert(dx_index_is_compat(path));
-+ err = dx_lookup(path);
-+ assert(err != 0 || path->ip_frames[path->ip_indirect].bh != NULL);
++ assert_corr(err != 0 || path->ip_frames[path->ip_indirect].bh != NULL);
return err;
}
p = path->ip_frame;
/*
* Find the next leaf page by incrementing the frame pointer.
-@@ -1438,28 +387,34 @@ static int ext3_htree_next_block(struct
+@@ -1438,39 +387,55 @@ static int ext3_htree_next_block(struct
--p;
}
if (err != 0)
return err; /* Failure */
++p;
-@@ -1471,6 +426,16 @@ static int ext3_htree_next_block(struct
+ brelse (p->bh);
+ p->bh = bh;
+ p->at = p->entries = dx_node_get_entries(path, p);
+- assert(dx_node_check(path, p));
++ assert_inv(dx_node_check(path, p));
+ }
return 1;
}
+ struct iam_entry *new = iam_entry_shift(path, frame->at, +1);
int count = dx_get_count(entries);
- assert(count < dx_get_limit(entries));
+- assert(count < dx_get_limit(entries));
- assert(old < iam_entry_shift(path, entries, count));
-+ assert(frame->at < iam_entry_shift(path, entries, count));
++ assert_corr(count < dx_get_limit(entries));
++ assert_corr(frame->at < iam_entry_shift(path, entries, count));
+
memmove(iam_entry_shift(path, new, 1), new,
(char *)iam_entry_shift(path, entries, count) - (char *)new);
+void dx_insert_block(struct iam_path *path, struct iam_frame *frame,
+ u32 hash, u32 block)
+{
-+ assert(dx_index_is_compat(path));
++ assert_corr(dx_index_is_compat(path));
+ iam_insert_key(path, frame, (struct iam_ikey *)&hash, block);
+}
+
+ /*
+ * Algorithm below depends on this.
+ */
-+ assert(dx_root_limit(path) < dx_node_limit(path));
++ assert_corr(dx_root_limit(path) < dx_node_limit(path));
+
frame = path->ip_frame;
entries = frame->entries;
struct iam_frame *frames;
+ struct iam_entry *next;
+
-+ assert(i == 0);
++ assert_corr(i == 0);
frames = path->ip_frames;
- root = (struct dx_root *) frames->bh->b_data;
/* Shift frames in the path */
memmove(frames + 2, frames + 1,
-@@ -2537,48 +1603,60 @@ static int split_index_node(handle_t *ha
+@@ -2536,49 +1602,61 @@ static int split_index_node(handle_t *ha
+ frames[1].at = iam_entry_shift(path, entries2, idx);
frames[1].entries = entries = entries2;
frames[1].bh = bh2;
- assert(dx_node_check(path, frame));
+- assert(dx_node_check(path, frame));
++ assert_inv(dx_node_check(path, frame));
+ ++ path->ip_frame;
++ frame;
- assert(dx_node_check(path, frame));
+- assert(dx_node_check(path, frame));
- bh_new[i] = NULL; /* buffer head is "consumed" */
++ assert_inv(dx_node_check(path, frame));
+ bh_new[0] = NULL; /* buffer head is "consumed" */
err = ext3_journal_get_write_access(handle, bh2);
if (err)
+ parent->at, +1);
}
- dx_insert_block(path, frame - 1, hash2, newblock[i]);
- assert(dx_node_check(path, frame));
+- assert(dx_node_check(path, frame));
- assert(dx_node_check(path, frame - 1));
-+ assert(dx_node_check(path, parent));
++ assert_inv(dx_node_check(path, frame));
++ assert_inv(dx_node_check(path, parent));
dxtrace(dx_show_index ("node", frame->entries));
dxtrace(dx_show_index ("node",
((struct dx_node *) bh2->b_data)->entries));
+ * This function was called to make insertion of new leaf
+ * possible. Check that it fulfilled its obligations.
+ */
-+ assert(dx_get_count(path->ip_frame->entries) <
-+ dx_get_limit(path->ip_frame->entries));
++ assert_corr(dx_get_count(path->ip_frame->entries) <
++ dx_get_limit(path->ip_frame->entries));
+ }
+ if (nr_splet > 0) {
+ /*
if (err != 0)
goto cleanup;
-@@ -2641,7 +1720,7 @@ static int ext3_dx_add_entry(handle_t *h
+@@ -2641,11 +1720,11 @@ static int ext3_dx_add_entry(handle_t *h
goto cleanup;
/*copy split inode too*/
if (!de)
goto cleanup;
+- assert(dx_node_check(path, frame));
++ assert_inv(dx_node_check(path, frame));
+ err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+ goto cleanup2;
+
@@ -2758,12 +1837,12 @@ static struct inode * ext3_new_inode_wan
* is so far negative - it has no inode.
*
Index: iam/include/linux/lustre_iam.h
===================================================================
--- iam.orig/include/linux/lustre_iam.h 2006-09-28 22:11:15.000000000 +0400
-+++ iam/include/linux/lustre_iam.h 2006-10-03 00:15:55.000000000 +0400
++++ iam/include/linux/lustre_iam.h 2006-10-03 21:14:47.000000000 +0400
@@ -1,9 +1,68 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
Index: iam/fs/ext3/Makefile
===================================================================
---- iam.orig/fs/ext3/Makefile 2006-10-03 00:15:55.000000000 +0400
-+++ iam/fs/ext3/Makefile 2006-10-03 00:15:56.000000000 +0400
+--- iam.orig/fs/ext3/Makefile 2006-10-03 21:14:47.000000000 +0400
++++ iam/fs/ext3/Makefile 2006-10-03 21:14:47.000000000 +0400
@@ -6,7 +6,7 @@ obj-$(CONFIG_EXT3_FS) += ext3.o
ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o iopen.o \
ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
Index: iam/fs/ext3/dir.c
===================================================================
---- iam.orig/fs/ext3/dir.c 2006-10-03 00:15:55.000000000 +0400
-+++ iam/fs/ext3/dir.c 2006-10-03 00:15:56.000000000 +0400
+--- iam.orig/fs/ext3/dir.c 2006-10-03 21:14:47.000000000 +0400
++++ iam/fs/ext3/dir.c 2006-10-03 21:14:47.000000000 +0400
@@ -28,6 +28,7 @@
#include <linux/smp_lock.h>
#include <linux/slab.h>
(filp->f_version != inode->i_version)) {
Index: iam/fs/ext3/file.c
===================================================================
---- iam.orig/fs/ext3/file.c 2006-10-03 00:15:55.000000000 +0400
-+++ iam/fs/ext3/file.c 2006-10-03 00:15:56.000000000 +0400
+--- iam.orig/fs/ext3/file.c 2006-10-03 21:14:47.000000000 +0400
++++ iam/fs/ext3/file.c 2006-10-03 21:14:47.000000000 +0400
@@ -23,6 +23,7 @@
#include <linux/jbd.h>
#include <linux/ext3_fs.h>
Index: iam/fs/ext3/iam-uapi.c
===================================================================
--- iam.orig/fs/ext3/iam-uapi.c 2004-04-06 17:27:52.000000000 +0400
-+++ iam/fs/ext3/iam-uapi.c 2006-10-03 00:15:56.000000000 +0400
++++ iam/fs/ext3/iam-uapi.c 2006-10-03 21:14:47.000000000 +0400
@@ -0,0 +1,368 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+}
Index: iam/fs/ext3/ioctl.c
===================================================================
---- iam.orig/fs/ext3/ioctl.c 2006-10-03 00:15:55.000000000 +0400
-+++ iam/fs/ext3/ioctl.c 2006-10-03 00:15:56.000000000 +0400
+--- iam.orig/fs/ext3/ioctl.c 2006-10-03 21:14:47.000000000 +0400
++++ iam/fs/ext3/ioctl.c 2006-10-03 21:14:47.000000000 +0400
@@ -250,6 +250,6 @@ flags_err:
}
Index: iam/include/linux/lustre_iam.h
===================================================================
---- iam.orig/include/linux/lustre_iam.h 2006-10-03 00:15:55.000000000 +0400
-+++ iam/include/linux/lustre_iam.h 2006-10-03 00:15:56.000000000 +0400
+--- iam.orig/include/linux/lustre_iam.h 2006-10-03 21:14:47.000000000 +0400
++++ iam/include/linux/lustre_iam.h 2006-10-03 21:14:47.000000000 +0400
@@ -30,9 +30,6 @@
#ifndef __LINUX_LUSTRE_IAM_H__
#define __LINUX_LUSTRE_IAM_H__
/*
* linux/include/linux/lustre_iam.h
*/
-@@ -57,14 +54,21 @@ enum {
+@@ -57,14 +54,64 @@ enum {
* [2] reserved for leaf node operations.
*
* [3] reserved for index operations.
+/* handle_t, journal_start(), journal_stop() */
+#include <linux/jbd.h>
+
++/*
++ * Debugging.
++ *
++ * Various debugging levels.
++ */
++
++/*
++ * Compile basic assertions in. You want this most of the time.
++ */
++#define EXT3_ASSERT (1)
++
++/*
++ * Compile heavier correctness checks in. You want this during development
++ * cycle.
++ */
++#define EXT3_CORRECTNESS (0)
++
++/*
++ * Compile heavy invariant checking in. You want this early during development
++ * or when chasing a bug.
++ */
++#define EXT3_INVARIANT (0)
++
++#ifndef assert
++#if EXT3_ASSERT
++#define assert(test) J_ASSERT(test)
++#else
++#define assert(test) ((void)(test))
++#endif
++#endif
++
++#if EXT3_CORRECTNESS
++#define assert_corr(test) J_ASSERT(test)
++#else
++#define assert_corr(test) do {;} while (0)
++#endif
++
++#if EXT3_INVARIANT
++#define assert_inv(test) J_ASSERT(test)
++#else
++#define assert_inv(test) do {;} while (0)
++#endif
++
/*
* Entry within index tree node. Consists of a key immediately followed
* (without padding) by a pointer to the child node.
-@@ -86,14 +90,21 @@ struct iam_entry_compat {
+@@ -86,14 +133,21 @@ struct iam_entry_compat {
*/
struct iam_key;
typedef __u64 iam_ptr_t;
/*
-@@ -123,6 +134,31 @@ struct iam_leaf {
+@@ -123,6 +177,31 @@ struct iam_leaf {
void *il_descr_data;
};
struct iam_operations {
/*
* Returns pointer (in the same sense as pointer in index entry) to
-@@ -131,11 +167,15 @@ struct iam_operations {
+@@ -131,11 +210,15 @@ struct iam_operations {
__u32 (*id_root_ptr)(struct iam_container *c);
/*
* Initialize new node (stored in @bh) that is going to be added into
* tree.
*/
-@@ -144,23 +184,33 @@ struct iam_operations {
+@@ -144,23 +227,33 @@ struct iam_operations {
int (*id_node_read)(struct iam_container *c, iam_ptr_t ptr,
handle_t *h, struct buffer_head **bh);
/*
struct iam_leaf_operations {
/*
* leaf operations.
-@@ -186,7 +236,8 @@ struct iam_leaf_operations {
+@@ -186,7 +279,8 @@ struct iam_leaf_operations {
void (*start)(struct iam_leaf *l);
/* more leaf to the next entry. */
void (*next)(struct iam_leaf *l);
* either pointer to the key stored in node, or copy key into
* @k buffer supplied by caller and return pointer to this
* buffer. The latter approach is used when keys in nodes are
-@@ -194,8 +245,10 @@ struct iam_leaf_operations {
+@@ -194,8 +288,10 @@ struct iam_leaf_operations {
* all).
*
* Caller should assume that returned pointer is only valid
/* return pointer to entry body. Pointer is valid while
corresponding leaf node is locked and pinned. */
struct iam_rec *(*rec)(const struct iam_leaf *l);
-@@ -203,6 +256,9 @@ struct iam_leaf_operations {
+@@ -203,6 +299,9 @@ struct iam_leaf_operations {
void (*key_set)(struct iam_leaf *l, const struct iam_key *k);
void (*rec_set)(struct iam_leaf *l, const struct iam_rec *r);
/*
* Search leaf @l for a record with key @k or for a place
* where such record is to be inserted.
-@@ -210,6 +266,7 @@ struct iam_leaf_operations {
+@@ -210,6 +309,7 @@ struct iam_leaf_operations {
* Scratch keys from @path can be used.
*/
int (*lookup)(struct iam_leaf *l, const struct iam_key *k);
int (*can_add)(const struct iam_leaf *l,
const struct iam_key *k, const struct iam_rec *r);
-@@ -221,12 +278,13 @@ struct iam_leaf_operations {
+@@ -221,12 +321,13 @@ struct iam_leaf_operations {
/*
* remove rec for a leaf
*/
};
struct iam_path *iam_leaf_path(const struct iam_leaf *leaf);
-@@ -241,6 +299,10 @@ struct iam_descr {
+@@ -241,6 +342,10 @@ struct iam_descr {
*/
size_t id_key_size;
/*
* Size of a pointer to the next level (stored in index nodes), in
* bytes.
*/
-@@ -264,6 +326,9 @@ struct iam_descr {
+@@ -264,6 +369,9 @@ struct iam_descr {
struct iam_leaf_operations *id_leaf_ops;
};
struct iam_container {
/*
* Underlying flat file. IO against this object is issued to
-@@ -284,7 +349,7 @@ struct iam_path_descr {
+@@ -284,7 +392,7 @@ struct iam_path_descr {
/*
* Scratch-pad area for temporary keys.
*/
};
/*
-@@ -316,6 +381,7 @@ struct iam_path {
+@@ -316,6 +424,7 @@ struct iam_path {
* Key searched for.
*/
const struct iam_key *ip_key_target;
/*
* Description-specific data.
*/
-@@ -334,6 +400,7 @@ struct iam_path_compat {
+@@ -334,6 +443,7 @@ struct iam_path_compat {
struct dx_hash_info *ipc_hinfo;
struct dentry *ipc_dentry;
struct iam_path_descr ipc_descr;
};
/*
-@@ -347,7 +414,9 @@ enum iam_it_state {
+@@ -347,7 +457,9 @@ enum iam_it_state {
/* initial state */
IAM_IT_DETACHED,
/* iterator is above particular record in the container */
};
/*
-@@ -355,7 +424,7 @@ enum iam_it_state {
+@@ -355,7 +467,7 @@ enum iam_it_state {
*/
enum iam_it_flags {
/*
*/
IAM_IT_MOVE = (1 << 0),
/*
-@@ -372,15 +441,26 @@ enum iam_it_flags {
+@@ -372,15 +484,26 @@ enum iam_it_flags {
* doesn't point to any particular record in this container.
*
* After successful call to iam_it_get() and until corresponding call to
*
*/
struct iam_iterator {
-@@ -390,7 +470,8 @@ struct iam_iterator {
+@@ -390,7 +513,8 @@ struct iam_iterator {
__u32 ii_flags;
enum iam_it_state ii_state;
/*
*/
struct iam_path ii_path;
};
-@@ -405,133 +486,26 @@ void iam_path_compat_fini(struct iam_pat
+@@ -405,133 +529,26 @@ void iam_path_compat_fini(struct iam_pat
struct iam_path_descr *iam_ipd_alloc(int keysize);
void iam_ipd_free(struct iam_path_descr *ipd);
int iam_it_load(struct iam_iterator *it, iam_pos_t pos);
int iam_lookup(struct iam_container *c, const struct iam_key *k,
-@@ -539,10 +513,10 @@ int iam_lookup(struct iam_container *c,
+@@ -539,10 +556,10 @@ int iam_lookup(struct iam_container *c,
int iam_delete(handle_t *h, struct iam_container *c, const struct iam_key *k,
struct iam_path_descr *pd);
int iam_update(handle_t *h, struct iam_container *c, const struct iam_key *k,
/*
* Initialize container @c.
*/
-@@ -577,16 +551,65 @@ static inline struct inode *iam_path_obj
+@@ -558,10 +575,6 @@ void iam_container_fini(struct iam_conta
+ */
+ int iam_container_setup(struct iam_container *c);
+
+-#ifndef assert
+-#define assert(test) J_ASSERT(test)
+-#endif
+-
+ static inline struct iam_descr *iam_container_descr(struct iam_container *c)
+ {
+ return c->ic_descr;
+@@ -577,16 +590,65 @@ static inline struct inode *iam_path_obj
return p->ip_container->ic_object;
}
+static inline struct iam_entry *iam_entry_shift(struct iam_path *p,
+ struct iam_entry *entry,
+ int shift)
- {
-- memcpy(k1, k2, c->ic_descr->id_key_size);
++{
+ void *e = entry;
+ return e + shift * iam_entry_size(p);
- }
-
--static inline int iam_keycmp(const struct iam_container *c,
-- const struct iam_key *k1, const struct iam_key *k2)
++}
++
+static inline struct iam_ikey *iam_get_ikey(struct iam_path *p,
+ struct iam_entry *entry,
+ struct iam_ikey *key)
- {
-- return c->ic_descr->id_ops->id_keycmp(c, k1, k2);
++{
+ return memcpy(key, entry, iam_path_descr(p)->id_ikey_size);
+}
+
+ ptrdiff_t diff;
+
+ diff = (void *)e1 - (void *)e2;
-+ assert(diff / iam_entry_size(p) * iam_entry_size(p) == diff);
++ assert_corr(diff / iam_entry_size(p) * iam_entry_size(p) == diff);
+ return diff / iam_entry_size(p);
+}
+
+ */
+static inline void iam_ikeycpy0(const struct iam_container *c,
+ struct iam_ikey *k1, const struct iam_ikey *k2)
-+{
+ {
+- memcpy(k1, k2, c->ic_descr->id_key_size);
+ if (k1 != k2)
+ iam_ikeycpy(c, k1, k2);
-+}
-+
+ }
+
+-static inline int iam_keycmp(const struct iam_container *c,
+- const struct iam_key *k1, const struct iam_key *k2)
+static inline int iam_ikeycmp(const struct iam_container *c,
+ const struct iam_ikey *k1,
+ const struct iam_ikey *k2)
-+{
+ {
+- return c->ic_descr->id_ops->id_keycmp(c, k1, k2);
+ return c->ic_descr->id_ops->id_ikeycmp(c, k1, k2);
}
static inline void iam_reccpy(const struct iam_path *p, struct iam_rec *rec_dst,
-@@ -604,7 +627,7 @@ static inline void *iam_entry_off(struct
+@@ -604,7 +666,7 @@ static inline void *iam_entry_off(struct
static inline unsigned dx_get_block(struct iam_path *p, struct iam_entry *entry)
{
return le32_to_cpu(*(u32*)iam_entry_off(entry,
& 0x00ffffff;
}
-@@ -612,21 +635,64 @@ static inline void dx_set_block(struct i
+@@ -612,21 +674,64 @@ static inline void dx_set_block(struct i
struct iam_entry *entry, unsigned value)
{
*(u32*)iam_entry_off(entry,
static inline unsigned dx_get_count(struct iam_entry *entries)
{
return le16_to_cpu(((struct dx_countlimit *) entries)->count);
-@@ -647,9 +713,21 @@ static inline unsigned dx_node_limit(str
+@@ -647,9 +752,21 @@ static inline unsigned dx_node_limit(str
struct iam_descr *param = iam_path_descr(p);
unsigned entry_space = iam_path_obj(p)->i_sb->s_blocksize -
param->id_node_gap;
static inline struct iam_entry *dx_get_entries(struct iam_path *path,
void *data, int root)
{
-@@ -665,7 +743,8 @@ static inline struct iam_entry *dx_node_
+@@ -665,7 +782,8 @@ static inline struct iam_entry *dx_node_
frame->bh->b_data, frame == path->ip_frames);
}
{
assert(0 <= nr && nr < ARRAY_SIZE(path->ip_data->ipd_key_scratch));
return path->ip_data->ipd_key_scratch[nr];
-@@ -674,6 +753,7 @@ static inline struct iam_key *iam_path_k
+@@ -674,6 +792,7 @@ static inline struct iam_key *iam_path_k
int dx_lookup(struct iam_path *path);
void dx_insert_block(struct iam_path *path, struct iam_frame *frame,
u32 hash, u32 block);
int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct iam_path *path, __u32 *start_hash);
-@@ -681,6 +761,20 @@ int ext3_htree_next_block(struct inode *
+@@ -681,6 +800,20 @@ int ext3_htree_next_block(struct inode *
struct buffer_head *ext3_append(handle_t *handle, struct inode *inode,
u32 *block, int *err);
int split_index_node(handle_t *handle, struct iam_path *path);
/*
* external
-@@ -698,10 +792,12 @@ int iam_node_read(struct iam_container *
+@@ -698,10 +831,12 @@ int iam_node_read(struct iam_container *
handle_t *handle, struct buffer_head **bh);
void iam_insert_key(struct iam_path *path, struct iam_frame *frame,
struct iam_path *iam_leaf_path(const struct iam_leaf *leaf);
struct iam_container *iam_leaf_container(const struct iam_leaf *leaf);
-@@ -709,14 +805,79 @@ struct iam_descr *iam_leaf_descr(const s
+@@ -709,14 +844,79 @@ struct iam_descr *iam_leaf_descr(const s
struct iam_leaf_operations *iam_leaf_ops(const struct iam_leaf *leaf);