static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
DEFINE_RWLOCK(lu_keys_guard);
-static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
+static DECLARE_RWSEM(lu_key_initing);
/**
* Global counter incremented whenever key is registered, unregistered,
if (!(key->lct_tags & LCT_QUIESCENT)) {
/*
- * XXX memory barrier has to go here.
+ * The write-lock on lu_key_initing will ensure that any
+ * keys_fill() which didn't see LCT_QUIESCENT will have
+ * finished before we call key_fini().
*/
- write_lock(&lu_keys_guard);
+ down_write(&lu_key_initing);
key->lct_tags |= LCT_QUIESCENT;
+ up_write(&lu_key_initing);
- /**
- * Wait until all lu_context_key::lct_init() methods
- * have completed.
- */
- while (atomic_read(&lu_key_initing_cnt) > 0) {
- write_unlock(&lu_keys_guard);
- CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\""
- " %p, %d (%d)\n",
- key->lct_owner ? key->lct_owner->name : "",
- key, atomic_read(&key->lct_used),
- atomic_read(&lu_key_initing_cnt));
- schedule();
- write_lock(&lu_keys_guard);
- }
-
- list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
+ write_lock(&lu_keys_guard);
+ list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
key_fini(ctx, key->lct_index);
-
write_unlock(&lu_keys_guard);
}
}
void lu_context_key_revive(struct lu_context_key *key)
{
- write_lock(&lu_keys_guard);
key->lct_tags &= ~LCT_QUIESCENT;
atomic_inc(&key_set_version);
- write_unlock(&lu_keys_guard);
}
static void keys_fini(struct lu_context *ctx)
static int keys_fill(struct lu_context *ctx)
{
unsigned int i;
+ int rc = 0;
/*
- * A serialisation with lu_context_key_quiesce() is needed, but some
- * "key->lct_init()" are calling kernel memory allocation routine and
- * can't be called while holding a spin_lock.
- * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
- * to ensure the start of the serialisation.
- * An atomic_t variable is still used, in order not to reacquire the
- * lock when decrementing the counter.
+ * A serialisation with lu_context_key_quiesce() is needed, to
+ * ensure we see LCT_QUIESCENT and don't allocate a new value
+ * after it freed one. The rwsem provides this. As down_read()
+ * does optimistic spinning while the writer is active, this is
+ * unlikely to ever sleep.
*/
- read_lock(&lu_keys_guard);
- atomic_inc(&lu_key_initing_cnt);
- read_unlock(&lu_keys_guard);
+ down_read(&lu_key_initing);
ctx->lc_version = atomic_read(&key_set_version);
LINVRNT(ctx->lc_value);
value = key->lct_init(ctx, key);
if (unlikely(IS_ERR(value))) {
- atomic_dec(&lu_key_initing_cnt);
- return PTR_ERR(value);
+ rc = PTR_ERR(value);
+ break;
}
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
}
}
- atomic_dec(&lu_key_initing_cnt);
- return 0;
+ up_read(&lu_key_initing);
+ return rc;
}
static int keys_init(struct lu_context *ctx)