X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=inline;f=lustre%2Fosc%2Fosc_page.c;h=9662b16f82c13126d34e12a84a402ccd47c1da40;hb=63d146df45660b9dc9d448b8d3ca750ab5c5f0c8;hp=ce870823c2da71cb943fe08f55d42ab40149ae1e;hpb=7af958a556ef2be0219ba279631b05a7b2a58cee;p=fs%2Flustre-release.git diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index ce87082..9662b16 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -53,115 +53,7 @@ static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli, */ /* - * Comment out osc_page_protected because it may sleep inside the - * the client_obd_list_lock. - * client_obd_list_lock -> osc_ap_completion -> osc_completion -> - * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base - * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep. - */ -#if 0 -static int osc_page_is_dlocked(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int pending, int unref) -{ - struct cl_page *page; - struct osc_object *obj; - struct osc_thread_info *info; - struct ldlm_res_id *resname; - struct lustre_handle *lockh; - union ldlm_policy_data *policy; - enum ldlm_mode dlmmode; - __u64 flags; - - might_sleep(); - - info = osc_env_info(env); - resname = &info->oti_resname; - policy = &info->oti_policy; - lockh = &info->oti_handle; - page = opg->ops_cl.cpl_page; - obj = cl2osc(opg->ops_cl.cpl_obj); - - flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED; - if (pending) - flags |= LDLM_FL_CBPENDING; - - dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW; - osc_lock_build_res(env, obj, resname); - osc_index2policy(policy, page->cp_obj, osc_index(opg), osc_index(opg)); - return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - dlmmode, &flags, NULL, lockh, unref); -} - -/** - * Checks an invariant that a page in the cache is covered by a lock, as - * needed. - */ -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - struct cl_object_header *hdr; - struct cl_lock *scan; - struct cl_page *page; - struct cl_lock_descr *descr; - int result; - - LINVRNT(!opg->ops_temp); - - page = opg->ops_cl.cpl_page; - if (page->cp_owner != NULL && - cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER) - /* - * If IO is done without locks (liblustre, or lloop), lock is - * not required. - */ - result = 1; - else - /* otherwise check for a DLM lock */ - result = osc_page_is_dlocked(env, opg, mode, 1, unref); - if (result == 0) { - /* maybe this page is a part of a lockless io? */ - hdr = cl_object_header(opg->ops_cl.cpl_obj); - descr = &osc_env_info(env)->oti_descr; - descr->cld_mode = mode; - descr->cld_start = osc_index(opg); - descr->cld_end = osc_index(opg); - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - /* - * Lock-less sub-lock has to be either in HELD state - * (when io is actively going on), or in CACHED state, - * when top-lock is being unlocked: - * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse(). - */ - if ((scan->cll_state == CLS_HELD || - scan->cll_state == CLS_CACHED) && - cl_lock_ext_match(&scan->cll_descr, descr)) { - struct osc_lock *olck; - - olck = osc_lock_at(scan); - result = osc_lock_is_lockless(olck); - break; - } - } - spin_unlock(&hdr->coh_lock_guard); - } - return result; -} -#else -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - return 1; -} -#endif - -/***************************************************************************** - * * Page operations. - * */ static void osc_page_transfer_get(struct osc_page *opg, const char *label) { @@ -207,8 +99,6 @@ int osc_page_cache_add(const struct lu_env *env, int result; ENTRY; - LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0)); - osc_page_transfer_get(opg, "transfer\0cache"); result = osc_queue_async_io(env, io, opg); if (result != 0) @@ -290,17 +180,15 @@ static int osc_page_print(const struct lu_env *env, } static void osc_page_delete(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { struct osc_page *opg = cl2osc_page(slice); struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); - int rc; - - LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1)); + int rc; - ENTRY; - CDEBUG(D_TRACE, "%p\n", opg); - osc_page_transfer_put(env, opg); + ENTRY; + CDEBUG(D_TRACE, "%p\n", opg); + osc_page_transfer_put(env, opg); rc = osc_teardown_async_page(env, obj, opg); if (rc) { CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, @@ -329,33 +217,29 @@ static void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice, int from, int to) { - struct osc_page *opg = cl2osc_page(slice); - struct osc_async_page *oap = &opg->ops_oap; - - LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); + struct osc_page *opg = cl2osc_page(slice); + struct osc_async_page *oap = &opg->ops_oap; - opg->ops_from = from; - opg->ops_to = to; + opg->ops_from = from; + opg->ops_to = to; spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_COUNT_STABLE; spin_unlock(&oap->oap_lock); } static int osc_page_cancel(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { struct osc_page *opg = cl2osc_page(slice); - int rc = 0; - - LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); + int rc = 0; - /* Check if the transferring against this page - * is completed, or not even queued. */ - if (opg->ops_transfer_pinned) - /* FIXME: may not be interrupted.. */ + /* Check if the transferring against this page + * is completed, or not even queued. */ + if (opg->ops_transfer_pinned) + /* FIXME: may not be interrupted.. */ rc = osc_cancel_async_page(env, opg); - LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0)); - return rc; + LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0)); + return rc; } static int osc_page_flush(const struct lu_env *env, @@ -395,13 +279,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, cl_page_slice_add(page, &opg->ops_cl, obj, index, &osc_page_ops); } - /* - * Cannot assert osc_page_protected() here as read-ahead - * creates temporary pages outside of a lock. - */ -#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK - opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1); -#endif INIT_LIST_HEAD(&opg->ops_lru); /* reserve an LRU space for this page */ @@ -429,9 +306,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, { struct osc_async_page *oap = &opg->ops_oap; - LINVRNT(osc_page_protected(env, opg, - crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1)); - LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, " "magic 0x%x\n", oap, oap->oap_magic); LASSERT(oap->oap_async_flags & ASYNC_READY);