===================================================================
--- iam.orig/fs/ext3/iam.c
+++ iam/fs/ext3/iam.c
-@@ -0,0 +1,1407 @@
+@@ -0,0 +1,1402 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ assert_corr(lh != NULL);
+ do_corr(schedule());
+ err = iam_new_leaf(handle, leaf);
-+ /*
-+ * refresh @leaf, as split may retarget path
-+ * to the new leaf node.
-+ */
-+ leaf = &path->ip_leaf;
+ if (err == 0)
+ err = iam_txn_dirty(handle, path,
+ path->ip_frame->bh);
===================================================================
--- iam.orig/fs/ext3/iam_htree.c
+++ iam/fs/ext3/iam_htree.c
-@@ -0,0 +1,683 @@
+@@ -0,0 +1,682 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ return NULL;
+}
+
-+static void iam_htree_ipd_free(const struct iam_container *c,
-+ struct iam_path_descr *ipd)
++static void iam_htree_ipd_free(struct iam_path_descr *ipd)
+{
+ struct iam_path_compat *ipc;
+
===================================================================
--- iam.orig/fs/ext3/iam_lfix.c
+++ iam/fs/ext3/iam_lfix.c
-@@ -0,0 +1,682 @@
+@@ -0,0 +1,676 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ return iam_ipd_alloc(c->ic_descr->id_ikey_size);
+}
+
-+static void iam_lfix_ipd_free(const struct iam_container *c,
-+ struct iam_path_descr *ipd)
-+{
-+ iam_ipd_free(ipd);
-+}
-+
+static struct iam_operations iam_lfix_ops = {
+ .id_root_ptr = iam_lfix_root_ptr,
+ .id_node_read = iam_node_read,
+ .id_ikeycmp = iam_lfix_ikeycmp,
+ .id_root_inc = iam_lfix_root_inc,
+ .id_ipd_alloc = iam_lfix_ipd_alloc,
-+ .id_ipd_free = iam_lfix_ipd_free,
++ .id_ipd_free = iam_ipd_free,
+ .id_name = "lfix"
+};
+
===================================================================
--- iam.orig/fs/ext3/iam_lvar.c
+++ iam/fs/ext3/iam_lvar.c
-@@ -0,0 +1,1006 @@
+@@ -0,0 +1,1011 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ return ((void *)ent) + e_size(leaf, ent);
+}
+
-+#define LVAR_HASH_TEA (1)
-+#define LVAR_HASH_R5 (0)
++#define LVAR_HASH_TEA (0)
++#define LVAR_HASH_R5 (1)
+#define LVAR_HASH_PREFIX (0)
+
+static inline lvar_hash_t get_hash(const struct iam_container *bag,
+#if EXT3_INVARIANT_ON
+static int n_invariant(const struct iam_leaf *leaf)
+{
++ struct iam_path *path;
+ struct lvar_leaf_entry *scan;
+ struct lvar_leaf_entry *end;
+ lvar_hash_t hash;
+ lvar_hash_t nexthash;
++ lvar_hash_t starthash;
+
-+ end = n_end(leaf);
++ end = n_end(leaf);
+ hash = 0;
++ path = leaf->il_path;
+
+ if (h_used(n_head(leaf)) > blocksize(leaf))
+ return 0;
+
++ starthash = *(lvar_hash_t *)iam_ikey_at(path, path->ip_frame->at);
+ for (scan = n_start(leaf); scan < end; scan = e_next(leaf, scan)) {
+ nexthash = e_hash(scan);
+ if (nexthash != get_hash(iam_leaf_container(leaf),
+ BREAKPOINT();
+ return 0;
+ }
++ if (nexthash < starthash) {
++ n_print(leaf);
++ printk("%#x < %#x\n", nexthash, starthash);
++ dump_stack();
++ BREAKPOINT();
++ return 0;
++ }
+ if (nexthash < hash) {
+ BREAKPOINT();
+ return 0;
+ return iam_ipd_alloc(c->ic_descr->id_ikey_size);
+}
+
-+static void lvar_ipd_free(const struct iam_container *c,
-+ struct iam_path_descr *ipd)
-+{
-+ iam_ipd_free(ipd);
-+}
-+
+static int root_limit(int rootgap, int blocksize, int size)
+{
+ int limit;
+ .id_ikeycmp = lvar_ikeycmp,
+ .id_root_inc = lvar_root_inc,
+ .id_ipd_alloc = lvar_ipd_alloc,
-+ .id_ipd_free = lvar_ipd_free,
++ .id_ipd_free = iam_ipd_free,
+ .id_name = "lvar"
+};
+
};
/*
-@@ -331,6 +349,7 @@ struct iam_leaf_operations {
+@@ -271,8 +289,7 @@ struct iam_operations {
+ struct iam_frame *frame);
+
+ struct iam_path_descr *(*id_ipd_alloc)(const struct iam_container *c);
+- void (*id_ipd_free)(const struct iam_container *c,
+- struct iam_path_descr *ipd);
++ void (*id_ipd_free)(struct iam_path_descr *ipd);
+ /*
+ * Format name.
+ */
+@@ -331,6 +348,7 @@ struct iam_leaf_operations {
void (*rec_set)(struct iam_leaf *l, const struct iam_rec *r);
int (*key_cmp)(const struct iam_leaf *l, const struct iam_key *k);
int (*key_size)(const struct iam_leaf *l);
/*
-@@ -473,7 +492,7 @@ struct iam_path_compat {
+@@ -473,7 +491,7 @@ struct iam_path_compat {
struct iam_container ipc_container;
__u32 ipc_scratch[DX_SCRATCH_KEYS];
struct dx_hash_info *ipc_hinfo;
struct iam_path_descr ipc_descr;
struct dx_hash_info ipc_hinfo_area;
};
-@@ -848,7 +867,9 @@ static inline struct iam_ikey *iam_path_
+@@ -848,7 +866,9 @@ static inline struct iam_ikey *iam_path_
return path->ip_data->ipd_key_scratch[nr];
}
void dx_insert_block(struct iam_path *path, struct iam_frame *frame,
u32 hash, u32 block);
int dx_index_is_compat(struct iam_path *path);
-@@ -858,7 +879,8 @@ int ext3_htree_next_block(struct inode *
+@@ -858,7 +878,8 @@ int ext3_htree_next_block(struct inode *
struct buffer_head *ext3_append(handle_t *handle, struct inode *inode,
u32 *block, int *err);
struct ext3_dir_entry_2 *split_entry(struct inode *dir,
struct ext3_dir_entry_2 *de,
unsigned long ino, mode_t mode,
-@@ -874,6 +896,10 @@ struct ext3_dir_entry_2 *move_entries(st
+@@ -874,6 +895,10 @@ struct ext3_dir_entry_2 *move_entries(st
extern struct iam_descr iam_htree_compat_param;
#include "osd_igif.h"
struct osd_object {
- /*
- * Mutable fields (like ->do_index_ops) are protected by ->oo_guard.
- */
struct dt_object oo_dt;
/*
* Inode for file system object represented by this osd_object. This
struct rw_semaphore oo_sem;
struct iam_container oo_container;
struct iam_descr oo_descr;
- /*
- * Protected by ->oo_guard.
- */
- struct iam_path_descr *oo_ipd;
- spinlock_t oo_guard;
const struct lu_env *oo_owner;
};
mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
init_rwsem(&mo->oo_sem);
- spin_lock_init(&mo->oo_guard);
return l;
} else
return NULL;
OBD_FREE_PTR(obj);
}
+static struct iam_path_descr *osd_ipd_get(const struct lu_env *env,
+ const struct iam_container *bag)
+{
+ return bag->ic_descr->id_ops->id_ipd_alloc(bag);
+}
+
+static void osd_ipd_put(const struct lu_env *env,
+ const struct iam_container *bag,
+ struct iam_path_descr *ipd)
+{
+ bag->ic_descr->id_ops->id_ipd_free(ipd);
+}
+
/*
* Concurrency: no concurrent access is possible that late in object
* life-cycle.
struct iam_container *bag;
bag = &o->oo_container;
- if (o->oo_ipd != NULL) {
- LASSERT(bag->ic_descr->id_ops->id_ipd_free != NULL);
- bag->ic_descr->id_ops->id_ipd_free(&o->oo_container, o->oo_ipd);
- o->oo_ipd = NULL;
- }
if (o->oo_inode != NULL) {
- if (o->oo_container.ic_object == o->oo_inode)
- iam_container_fini(&o->oo_container);
+ if (bag->ic_object == o->oo_inode)
+ iam_container_fini(bag);
}
}
* writable */);
}
-static int osd_index_setup(const struct lu_env *env, struct osd_object *obj,
- struct iam_container *bag)
-{
- struct iam_path_descr *ipd;
- int result;
-
- ipd = bag->ic_descr->id_ops->id_ipd_alloc(bag);
- if (ipd != NULL) {
- spin_lock(&obj->oo_guard);
- if (obj->oo_ipd == NULL) {
- obj->oo_ipd = ipd;
- obj->oo_dt.do_index_ops = &osd_index_ops;
- } else {
- /*
- * Oops, index was setup concurrently.
- */
- LASSERT(obj->oo_dt.do_index_ops == &osd_index_ops);
- LASSERT(bag->ic_descr->id_ops->id_ipd_free != NULL);
- bag->ic_descr->id_ops->id_ipd_free(bag, ipd);
- }
- spin_unlock(&obj->oo_guard);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
/*
* Concurrency: no external locking is necessary.
*/
if (result == 0) {
result = iam_container_setup(bag);
if (result == 0)
- result = osd_index_setup(env, obj, bag);
+ obj->oo_dt.do_index_ops = &osd_index_ops;
}
} else
result = 0;
{
struct osd_object *obj = osd_dt_obj(dt);
struct osd_thandle *oh;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_container;
int rc;
ENTRY;
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_container.ic_object == obj->oo_inode);
- LASSERT(obj->oo_ipd != NULL);
+ LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(handle != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
RETURN(-EACCES);
+ ipd = osd_ipd_get(env, bag);
+ if (ipd == NULL)
+ RETURN(-ENOMEM);
+
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
- rc = iam_delete(oh->ot_handle, &obj->oo_container,
- (const struct iam_key *)key, obj->oo_ipd);
-
+ rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
+ osd_ipd_put(env, bag, ipd);
LASSERT(osd_invariant(obj));
RETURN(rc);
}
struct dt_rec *rec, const struct dt_key *key,
struct lustre_capa *capa)
{
- struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_container;
int rc;
ENTRY;
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_container.ic_object == obj->oo_inode);
- LASSERT(obj->oo_ipd != NULL);
+ LASSERT(bag->ic_object == obj->oo_inode);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
return -EACCES;
- rc = iam_lookup(&obj->oo_container, (const struct iam_key *)key,
- (struct iam_rec *)rec, obj->oo_ipd);
+ ipd = osd_ipd_get(env, bag);
+ if (ipd == NULL)
+ RETURN(-ENOMEM);
+ rc = iam_lookup(bag, (const struct iam_key *)key,
+ (struct iam_rec *)rec, ipd);
+ osd_ipd_put(env, bag, ipd);
LASSERT(osd_invariant(obj));
RETURN(rc);
struct thandle *th, struct lustre_capa *capa)
{
struct osd_object *obj = osd_dt_obj(dt);
-
+ struct iam_path_descr *ipd;
struct osd_thandle *oh;
+ struct iam_container *bag = &obj->oo_container;
int rc;
ENTRY;
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- LASSERT(obj->oo_container.ic_object == obj->oo_inode);
- LASSERT(obj->oo_ipd != NULL);
+ LASSERT(bag->ic_object == obj->oo_inode);
LASSERT(th != NULL);
if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
return -EACCES;
+ ipd = osd_ipd_get(env, bag);
+ if (ipd == NULL)
+ RETURN(-ENOMEM);
+
oh = container_of0(th, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle != NULL);
- rc = iam_insert(oh->ot_handle, &obj->oo_container,
- (const struct iam_key *)key,
- (struct iam_rec *)rec, obj->oo_ipd);
-
+ rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
+ (struct iam_rec *)rec, ipd);
+ osd_ipd_put(env, bag, ipd);
LASSERT(osd_invariant(obj));
RETURN(rc);
}
struct dt_object *dt, int writable,
struct lustre_capa *capa)
{
- struct osd_it *it;
- struct osd_object *obj = osd_dt_obj(dt);
- struct lu_object *lo = &dt->do_lu;
- __u32 flags;
+ struct osd_it *it;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct lu_object *lo = &dt->do_lu;
+ struct iam_path_descr *ipd;
+ struct iam_container *bag = &obj->oo_container;
+ __u32 flags;
LASSERT(lu_object_exists(lo));
- LASSERT(obj->oo_ipd != NULL);
if (osd_object_auth(env, dt, capa, writable ? CAPA_OPC_BODY_WRITE :
CAPA_OPC_BODY_READ))
return ERR_PTR(-EACCES);
+ ipd = osd_ipd_get(env, bag);
+ if (ipd == NULL)
+ return ERR_PTR(-ENOMEM);
+
flags = writable ? IAM_IT_MOVE|IAM_IT_WRITE : IAM_IT_MOVE;
OBD_ALLOC_PTR(it);
if (it != NULL) {
it->oi_obj = obj;
lu_object_get(lo);
- iam_it_init(&it->oi_it, &obj->oo_container, flags, obj->oo_ipd);
+ iam_it_init(&it->oi_it, bag, flags, ipd);
}
+ osd_ipd_put(env, bag, ipd);
return (struct dt_it *)it;
}