#include <libcfs/libcfs.h>
#include <libcfs/linux/linux-mem.h>
+#include <libcfs/linux/linux-hash.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_disk.h>
ENTRY;
+ /* FID is from disk or network, zero FID is meaningless, return error
+ * early to avoid assertion in lu_object_put. If a zero FID is wanted,
+ * it should be allocated via lu_object_anon().
+ */
+ if (fid_is_zero(f))
+ RETURN(ERR_PTR(-EINVAL));
+
/*
* This uses standard index maintenance protocol:
*
}
EXPORT_SYMBOL(lu_device_put);
+enum { /* Maximal number of tld slots. */
+ LU_CONTEXT_KEY_NR = 40
+};
+static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
+static DECLARE_RWSEM(lu_key_initing);
+
/**
* Initialize device \a d of type \a t.
*/
int lu_device_init(struct lu_device *d, struct lu_device_type *t)
{
- if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
- t->ldt_ops->ldto_start != NULL)
- t->ldt_ops->ldto_start(t);
+ if (atomic_add_unless(&t->ldt_device_nr, 1, 0) == 0) {
+ down_write(&lu_key_initing);
+ if (t->ldt_ops->ldto_start &&
+ atomic_read(&t->ldt_device_nr) == 0)
+ t->ldt_ops->ldto_start(t);
+ atomic_inc(&t->ldt_device_nr);
+ up_write(&lu_key_initing);
+ }
memset(d, 0, sizeof *d);
d->ld_type = t;
EXPORT_SYMBOL(lu_object_header_fini);
/**
+ * Free lu_object_header with proper RCU handling
+ */
+void lu_object_header_free(struct lu_object_header *h)
+{
+ lu_object_header_fini(h);
+ OBD_FREE_PRE(h, sizeof(*h), "kfreed");
+ kfree_rcu(h, loh_rcu);
+}
+EXPORT_SYMBOL(lu_object_header_free);
+
+/**
* Given a compound object, find its slice, corresponding to the device type
* \a dtype.
*/
}
}
-enum {
- /**
- * Maximal number of tld slots.
- */
- LU_CONTEXT_KEY_NR = 40
-};
-
-static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
-
-static DECLARE_RWSEM(lu_key_initing);
-
/**
* Global counter incremented whenever key is registered, unregistered,
* revived or quiesced. This is used to void unnecessary calls to
LASSERT(atomic_read(&key->lct_used) >= 1);
LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
- lu_context_key_quiesce(key);
+ lu_context_key_quiesce(NULL, key);
key_fini(&lu_shrink_env.le_ctx, key->lct_index);
/**
* Quiescent a number of keys.
*/
-void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
+void lu_context_key_quiesce_many(struct lu_device_type *t,
+ struct lu_context_key *k, ...)
{
- va_list args;
+ va_list args;
- va_start(args, k);
- do {
- lu_context_key_quiesce(k);
- k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
- va_end(args);
+ va_start(args, k);
+ do {
+ lu_context_key_quiesce(t, k);
+ k = va_arg(args, struct lu_context_key*);
+ } while (k != NULL);
+ va_end(args);
}
EXPORT_SYMBOL(lu_context_key_quiesce_many);
* values in "shared" contexts (like service threads), when a module owning
* the key is about to be unloaded.
*/
-void lu_context_key_quiesce(struct lu_context_key *key)
+void lu_context_key_quiesce(struct lu_device_type *t,
+ struct lu_context_key *key)
{
struct lu_context *ctx;
+ if (key->lct_tags & LCT_QUIESCENT)
+ return;
+ /*
+ * The write-lock on lu_key_initing will ensure that any
+ * keys_fill() which didn't see LCT_QUIESCENT will have
+ * finished before we call key_fini().
+ */
+ down_write(&lu_key_initing);
if (!(key->lct_tags & LCT_QUIESCENT)) {
- /*
- * The write-lock on lu_key_initing will ensure that any
- * keys_fill() which didn't see LCT_QUIESCENT will have
- * finished before we call key_fini().
- */
- down_write(&lu_key_initing);
- key->lct_tags |= LCT_QUIESCENT;
+ if (t == NULL || atomic_read(&t->ldt_device_nr) == 0)
+ key->lct_tags |= LCT_QUIESCENT;
up_write(&lu_key_initing);
spin_lock(&lu_context_remembered_guard);
spin_until_cond(READ_ONCE(ctx->lc_state) != LCS_LEAVING);
key_fini(ctx, key->lct_index);
}
-
spin_unlock(&lu_context_remembered_guard);
+
+ return;
}
+ up_write(&lu_key_initing);
}
void lu_context_key_revive(struct lu_context_key *key)