From 60e052d06f7f82e13fc551b96d1ca7585ac505df Mon Sep 17 00:00:00 2001 From: Jinshan Xiong Date: Mon, 13 Aug 2012 16:57:49 -0700 Subject: [PATCH 1/1] LU-1666 obdclass: reduce lock contention on coh_page_guard Define a per-page spinlock to get and put a cl_page instead of grabbing per-object lock coh_page_guard. Signed-off-by: Jinshan Xiong Change-Id: Iecf5e6840ab1a28edcf2c4bcde6a72c2f9b5bdae Reviewed-on: http://review.whamcloud.com/3627 Reviewed-by: Brian Behlendorf Reviewed-by: James Simmons Reviewed-by: Ned Bass Reviewed-by: Andreas Dilger Tested-by: Hudson Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/include/cl_object.h | 2 ++ lustre/obdclass/cl_page.c | 40 ++++++++++++++++++++++------------------ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index b0c11e3..78f0b49 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -723,6 +723,8 @@ struct cl_page { * modified only internally within cl_page.c. Protected by a VM lock. */ const enum cl_page_state cp_state; + /** Protect to get and put page, see cl_page_put and cl_vmpage_page */ + cfs_spinlock_t cp_lock; /** * Linkage of pages within some group. Protected by * cl_page::cp_mutex. */ diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index b2cecd8..ea160eb 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -92,7 +92,7 @@ static struct lu_kmem_descr cl_page_caches[] = { /** * Internal version of cl_page_top, it should be called with page referenced, - * or coh_page_guard held. + * or cp_lock held. */ static struct cl_page *cl_page_top_trusted(struct cl_page *page) { @@ -134,10 +134,8 @@ cl_page_at_trusted(const struct cl_page *page, const struct cl_page_slice *slice; #ifdef INVARIANT_CHECK - struct cl_object_header *ch = cl_object_header(page->cp_obj); - if (!cfs_atomic_read(&page->cp_ref)) - LASSERT_SPIN_LOCKED(&ch->coh_page_guard); + LASSERT_SPIN_LOCKED(&page->cp_lock); #endif ENTRY; @@ -344,6 +342,7 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o, "cl_page", page); page->cp_index = ind; cl_page_state_set_trust(page, CPS_CACHED); + cfs_spin_lock_init(&page->cp_lock); page->cp_type = type; CFS_INIT_LIST_HEAD(&page->cp_layers); CFS_INIT_LIST_HEAD(&page->cp_batch); @@ -415,6 +414,11 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type); /* fast path. */ if (type == CPT_CACHEABLE) { + /* cl_page::cp_lock is used to protect the page state and + * refcount, but need an external lock to protect the + * child/parent relationship, so vmpage lock must be held for + * this purpose. */ + KLASSERT(PageLocked(vmpage)); /* * cl_vmpage_page() can be called here without any locks as * @@ -648,7 +652,6 @@ EXPORT_SYMBOL(cl_page_get); */ void cl_page_put(const struct lu_env *env, struct cl_page *page) { - struct cl_object_header *hdr; struct cl_site *site = cl_object_site(page->cp_obj); PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent); @@ -657,19 +660,18 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", cfs_atomic_read(&page->cp_ref)); - hdr = cl_object_header(cl_object_top(page->cp_obj)); - if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) { + if (cfs_atomic_dec_and_lock(&page->cp_ref, &page->cp_lock)) { cfs_atomic_dec(&site->cs_pages.cs_busy); /* We're going to access the page w/o a reference, but it's - * ok because we have grabbed the lock coh_page_guard, which + * ok because we have grabbed the lock cp_lock, which * means nobody is able to free this page behind us. */ if (page->cp_state == CPS_FREEING) { /* We drop the page reference and check the page state - * inside the coh_page_guard. So that if it gets here, + * inside the cp_lock. So that if it gets here, * it is the REALLY last reference to this page. */ - cfs_spin_unlock(&hdr->coh_page_guard); + cfs_spin_unlock(&page->cp_lock); LASSERT(cfs_atomic_read(&page->cp_ref) == 0); PASSERT(env, page, page->cp_owner == NULL); @@ -683,7 +685,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) EXIT; return; } - cfs_spin_unlock(&hdr->coh_page_guard); + cfs_spin_unlock(&page->cp_lock); } EXIT; @@ -718,8 +720,8 @@ EXPORT_SYMBOL(cl_page_vmpage); */ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) { - struct cl_page *page; - struct cl_object_header *hdr; + struct cl_page *top; + struct cl_page *page; ENTRY; KLASSERT(PageLocked(vmpage)); @@ -734,16 +736,18 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) * This loop assumes that ->private points to the top-most page. This * can be rectified easily. */ - hdr = cl_object_header(cl_object_top(obj)); - cfs_spin_lock(&hdr->coh_page_guard); - for (page = (void *)vmpage->private; - page != NULL; page = page->cp_child) { + top = (struct cl_page *)vmpage->private; + if (top == NULL) + RETURN(NULL); + + cfs_spin_lock(&top->cp_lock); + for (page = top; page != NULL; page = page->cp_child) { if (cl_object_same(page->cp_obj, obj)) { cl_page_get_trust(page); break; } } - cfs_spin_unlock(&hdr->coh_page_guard); + cfs_spin_unlock(&top->cp_lock); LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE)); RETURN(page); } -- 1.8.3.1