From 96d773d993cd48a069da4098b87da7d9ef0dd52e Mon Sep 17 00:00:00 2001 From: Li Xi Date: Fri, 26 Jun 2015 11:13:08 +0800 Subject: [PATCH] LU-6800 obdclass: change spinlock of key to rwlock Most of the time, keys are never changed. So rwlock might be better for the concurrency of key read. Signed-off-by: Li Xi Change-Id: I5c86c7333884a44ec8e9017d532d6eea5a119cfa Signed-off-by: Gu Zheng Reviewed-on: http://review.whamcloud.com/15558 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Faccini Bruno Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/obdclass/lu_object.c | 54 ++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index a51dc41..fe9b5b5 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -1364,7 +1364,7 @@ enum { static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static DEFINE_SPINLOCK(lu_keys_guard); +DEFINE_RWLOCK(lu_keys_guard); static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); /** @@ -1389,7 +1389,7 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; @@ -1401,7 +1401,7 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1440,7 +1440,7 @@ void lu_context_key_degister(struct lu_context_key *key) lu_context_key_quiesce(key); ++key_set_version; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key_fini(&lu_shrink_env.le_ctx, key->lct_index); /** @@ -1448,18 +1448,18 @@ void lu_context_key_degister(struct lu_context_key *key) * run lu_context_key::lct_fini() method. */ while (atomic_read(&key->lct_used) > 1) { - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n", key->lct_owner ? key->lct_owner->name : "", key, atomic_read(&key->lct_used)); schedule(); - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); } if (lu_keys[key->lct_index]) { lu_keys[key->lct_index] = NULL; lu_ref_fini(&key->lct_reference); } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n", @@ -1584,7 +1584,7 @@ void lu_context_key_quiesce(struct lu_context_key *key) /* * XXX memory barrier has to go here. */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key->lct_tags |= LCT_QUIESCENT; /** @@ -1592,20 +1592,20 @@ void lu_context_key_quiesce(struct lu_context_key *key) * have completed. */ while (atomic_read(&lu_key_initing_cnt) > 0) { - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\"" " %p, %d (%d)\n", key->lct_owner ? key->lct_owner->name : "", key, atomic_read(&key->lct_used), atomic_read(&lu_key_initing_cnt)); schedule(); - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); } list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); ++key_set_version; } } @@ -1643,9 +1643,9 @@ static int keys_fill(struct lu_context *ctx) * An atomic_t variable is still used, in order not to reacquire the * lock when decrementing the counter. */ - spin_lock(&lu_keys_guard); + read_lock(&lu_keys_guard); atomic_inc(&lu_key_initing_cnt); - spin_unlock(&lu_keys_guard); + read_unlock(&lu_keys_guard); LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { @@ -1714,9 +1714,9 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_state = LCS_INITIALIZED; ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); list_add(&ctx->lc_remember, &lu_context_remembered); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } else { INIT_LIST_HEAD(&ctx->lc_remember); } @@ -1742,10 +1742,10 @@ void lu_context_fini(struct lu_context *ctx) keys_fini(ctx); } else { /* could race with key degister */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); keys_fini(ctx); list_del_init(&ctx->lc_remember); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_fini); @@ -1773,7 +1773,7 @@ void lu_context_exit(struct lu_context *ctx) for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { /* could race with key quiescency */ if (ctx->lc_tags & LCT_REMEMBER) - spin_lock(&lu_keys_guard); + read_lock(&lu_keys_guard); if (ctx->lc_value[i] != NULL) { struct lu_context_key *key; @@ -1784,7 +1784,7 @@ void lu_context_exit(struct lu_context *ctx) key, ctx->lc_value[i]); } if (ctx->lc_tags & LCT_REMEMBER) - spin_unlock(&lu_keys_guard); + read_unlock(&lu_keys_guard); } } } @@ -1812,37 +1812,37 @@ __u32 lu_session_tags_default = 0; void lu_context_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_update); void lu_context_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_clear); void lu_session_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_update); void lu_session_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_clear); -- 1.8.3.1