* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2011, Whamcloud, Inc.
+ * Copyright (c) 2010, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
- if (ptlrpc_check_suspend())
- RETURN(0);
-
- LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
- "server code, just going back to sleep",
- lock->l_last_activity,
- cfs_time_sub(cfs_time_current_sec(),
- lock->l_last_activity));
+ LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago)\n",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
+ LDLM_DEBUG(lock, "lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago); not entering recovery in "
+ "server code, just going back to sleep",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
*/
static int ldlm_completion_tail(struct ldlm_lock *lock)
{
- long delay;
- int result;
-
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
- LDLM_DEBUG(lock, "client-side enqueue: destroyed");
- result = -EIO;
- } else {
- delay = cfs_time_sub(cfs_time_current_sec(),
- lock->l_last_activity);
- LDLM_DEBUG(lock, "client-side enqueue: granted after "
- CFS_DURATION_T"s", delay);
-
- /* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock),
- delay);
- result = 0;
- }
- return result;
+ long delay;
+ int result;
+
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
+ LDLM_DEBUG(lock, "client-side enqueue: destroyed");
+ result = -EIO;
+ } else {
+ delay = cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity);
+ LDLM_DEBUG(lock, "client-side enqueue: granted after "
+ CFS_DURATION_T"s", delay);
+
+ /* Update our time estimate */
+ at_measured(ldlm_lock_to_ns_at(lock),
+ delay);
+ result = 0;
+ }
+ return result;
}
/**
*/
int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
{
- ENTRY;
+ ENTRY;
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- RETURN(0);
- }
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ RETURN(0);
+ }
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(ldlm_completion_tail(lock));
- }
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ RETURN(ldlm_completion_tail(lock));
+ }
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "going forward");
- ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "going forward");
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
}
EXPORT_SYMBOL(ldlm_completion_ast_async);
goto noreproc;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
- cfs_waitq_signal(&lock->l_waitq);
- RETURN(0);
- }
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ RETURN(0);
+ }
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"sleeping");
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
if (ns_is_client(ldlm_lock_to_ns(lock)) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
int do_ast;
ENTRY;
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0)
CERROR("ldlm_cli_cancel: %d\n", rc);
} else {
lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len,
lvb_type);
- if (unlikely(!lock))
- GOTO(out_nolock, err = -ENOMEM);
+ if (IS_ERR(lock))
+ GOTO(out_nolock, err = PTR_ERR(lock));
ldlm_lock2handle(lock, lockh);
/* NB: we don't have any lock now (lock_res_and_lock)
* because it's a new lock */
ldlm_lock_addref_internal_nolock(lock, mode);
- lock->l_flags |= LDLM_FL_LOCAL;
+ ldlm_set_local(lock);
if (*flags & LDLM_FL_ATOMIC_CB)
- lock->l_flags |= LDLM_FL_ATOMIC_CB;
+ ldlm_set_atomic_cb(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
if (client_cookie != NULL)
lock->l_client_cookie = *client_cookie;
- if (type == LDLM_EXTENT)
- lock->l_req_extent = policy->l_extent;
+ if (type == LDLM_EXTENT) {
+ /* extent lock without policy is a bug */
+ if (policy == NULL)
+ LBUG();
+
+ lock->l_req_extent = policy->l_extent;
+ }
err = ldlm_lock_enqueue(ns, &lock, policy, flags);
if (unlikely(err != ELDLM_OK))
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
- need_cancel = 1;
+ !ldlm_is_failed(lock)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * b=17645*/
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+ need_cancel = 1;
}
unlock_res_and_lock(lock);
else
LDLM_DEBUG(lock, "lock was granted or failed in race");
- ldlm_lock_decref_internal(lock, mode);
-
- /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
- * from llite/file.c/ll_file_flock(). */
- /* This code makes for the fact that we do not have blocking handler on
- * a client for flock locks. As such this is the place where we must
- * completely kill failed locks. (interrupted and those that
- * were waiting to be granted when server evicted us. */
- if (lock->l_resource->lr_type == LDLM_FLOCK) {
- lock_res_and_lock(lock);
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
- unlock_res_and_lock(lock);
- }
+ /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
+ * from llite/file.c/ll_file_flock(). */
+ /* This code makes for the fact that we do not have blocking handler on
+ * a client for flock locks. As such this is the place where we must
+ * completely kill failed locks. (interrupted and those that
+ * were waiting to be granted when server evicted us. */
+ if (lock->l_resource->lr_type == LDLM_FLOCK) {
+ lock_res_and_lock(lock);
+ if (!ldlm_is_destroyed(lock)) {
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_decref_internal_nolock(lock, mode);
+ ldlm_lock_destroy_nolock(lock);
+ }
+ unlock_res_and_lock(lock);
+ } else {
+ ldlm_lock_decref_internal(lock, mode);
+ }
}
/**
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
- /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well */
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_FL_NO_TIMEOUT);
+ LDLM_FL_INHERIT_MASK);
unlock_res_and_lock(lock);
- CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%llx\n",
- lock, reply->lock_handle.cookie, *flags);
-
- /* If enqueue returned a blocked lock but the completion handler has
- * already run, then it fixed up the resource and we don't need to do it
- * again. */
- if ((*flags) & LDLM_FL_LOCK_CHANGED) {
- int newmode = reply->lock_desc.l_req_mode;
- LASSERT(!is_replay);
- if (newmode && newmode != lock->l_req_mode) {
- LDLM_DEBUG(lock, "server returned different mode %s",
- ldlm_lockname[newmode]);
- lock->l_req_mode = newmode;
- }
+ CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: "LPX64"\n",
+ lock, reply->lock_handle.cookie, *flags);
+
+ /* If enqueue returned a blocked lock but the completion handler has
+ * already run, then it fixed up the resource and we don't need to do it
+ * again. */
+ if ((*flags) & LDLM_FL_LOCK_CHANGED) {
+ int newmode = reply->lock_desc.l_req_mode;
+ LASSERT(!is_replay);
+ if (newmode && newmode != lock->l_req_mode) {
+ LDLM_DEBUG(lock, "server returned different mode %s",
+ ldlm_lockname[newmode]);
+ lock->l_req_mode = newmode;
+ }
- if (memcmp(reply->lock_desc.l_resource.lr_name.name,
- lock->l_resource->lr_name.name,
- sizeof(struct ldlm_res_id))) {
- CDEBUG(D_INFO, "remote intent success, locking "
- "(%ld,%ld,%ld) instead of "
- "(%ld,%ld,%ld)\n",
- (long)reply->lock_desc.l_resource.lr_name.name[0],
- (long)reply->lock_desc.l_resource.lr_name.name[1],
- (long)reply->lock_desc.l_resource.lr_name.name[2],
- (long)lock->l_resource->lr_name.name[0],
- (long)lock->l_resource->lr_name.name[1],
- (long)lock->l_resource->lr_name.name[2]);
-
- rc = ldlm_lock_change_resource(ns, lock,
- &reply->lock_desc.l_resource.lr_name);
- if (rc || lock->l_resource == NULL)
- GOTO(cleanup, rc = -ENOMEM);
- LDLM_DEBUG(lock, "client-side enqueue, new resource");
- }
- if (with_policy)
- if (!(type == LDLM_IBITS && !(exp->exp_connect_flags &
- OBD_CONNECT_IBITS)))
- /* We assume lock type cannot change on server*/
- ldlm_convert_policy_to_local(exp,
- lock->l_resource->lr_type,
- &reply->lock_desc.l_policy_data,
- &lock->l_policy_data);
+ if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
+ &lock->l_resource->lr_name)) {
+ CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
+ " instead of "DLDLMRES"\n",
+ PLDLMRES(&reply->lock_desc.l_resource),
+ PLDLMRES(lock->l_resource));
+
+ rc = ldlm_lock_change_resource(ns, lock,
+ &reply->lock_desc.l_resource.lr_name);
+ if (rc || lock->l_resource == NULL)
+ GOTO(cleanup, rc = -ENOMEM);
+ LDLM_DEBUG(lock, "client-side enqueue, new resource");
+ }
+ if (with_policy)
+ if (!(type == LDLM_IBITS &&
+ !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
+ /* We assume lock type cannot change on server*/
+ ldlm_convert_policy_to_local(exp,
+ lock->l_resource->lr_type,
+ &reply->lock_desc.l_policy_data,
+ &lock->l_policy_data);
if (type != LDLM_PLAIN)
LDLM_DEBUG(lock,"client-side enqueue, new policy data");
}
* bug 7311). */
(LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
}
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
lock->l_lvb_data, size);
unlock_res_and_lock(lock);
- if (rc < 0)
+ if (rc < 0) {
+ cleanup_phase = 1;
GOTO(cleanup, rc);
+ }
}
if (!is_replay) {
{
int avail;
- avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else
* that needs to be performed.
*/
int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- cfs_list_t *cancels, int count)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *dlm = NULL;
- int flags, avail, to_free, pack = 0;
- CFS_LIST_HEAD(head);
- int rc;
- ENTRY;
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count)
+ {
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct req_capsule *pill = &req->rq_pill;
+ struct ldlm_request *dlm = NULL;
+ struct list_head head = LIST_HEAD_INIT(head);
+ int flags, avail, to_free, pack = 0;
+ int rc;
+ ENTRY;
- if (cancels == NULL)
- cancels = &head;
+ if (cancels == NULL)
+ cancels = &head;
if (ns_connect_cancelset(ns)) {
/* Estimate the amount of available space in the request. */
req_capsule_filled_sizes(pill, RCL_CLIENT);
}
EXPORT_SYMBOL(ldlm_prep_enqueue_req);
+struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len)
+{
+ struct ptlrpc_request *req;
+ int rc;
+ ENTRY;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+ if (req == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(ERR_PTR(rc));
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+ ptlrpc_request_set_replen(req);
+ RETURN(req);
+}
+EXPORT_SYMBOL(ldlm_enqueue_pack);
+
/**
* Client-side lock enqueue.
*
void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
struct lustre_handle *lockh, int async)
{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct ldlm_namespace *ns;
struct ldlm_lock *lock;
struct ldlm_request *body;
int is_replay = *flags & LDLM_FL_REPLAY;
LASSERT(exp != NULL);
+ ns = exp->exp_obd->obd_namespace;
+
/* If we're replaying this lock, just check some invariants.
* If we're creating a new lock, get everything all setup nice. */
if (is_replay) {
LDLM_DEBUG(lock, "client-side enqueue START");
LASSERT(exp == lock->l_conn_export);
} else {
- const struct ldlm_callback_suite cbs = {
- .lcs_completion = einfo->ei_cb_cp,
- .lcs_blocking = einfo->ei_cb_bl,
- .lcs_glimpse = einfo->ei_cb_gl,
- .lcs_weigh = einfo->ei_cb_wg
- };
- lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, &cbs, einfo->ei_cbdata,
+ const struct ldlm_callback_suite cbs = {
+ .lcs_completion = einfo->ei_cb_cp,
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl
+ };
+ lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
+ einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (lock == NULL)
- RETURN(-ENOMEM);
+ if (IS_ERR(lock))
+ RETURN(PTR_ERR(lock));
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
- if (policy != NULL) {
- /* INODEBITS_INTEROP: If the server does not support
- * inodebits, we will request a plain lock in the
- * descriptor (ldlm_lock2desc() below) but use an
- * inodebits lock internally with both bits set.
- */
- if (einfo->ei_type == LDLM_IBITS &&
- !(exp->exp_connect_flags & OBD_CONNECT_IBITS))
- lock->l_policy_data.l_inodebits.bits =
- MDS_INODELOCK_LOOKUP |
- MDS_INODELOCK_UPDATE;
- else
- lock->l_policy_data = *policy;
- }
+ if (policy != NULL)
+ lock->l_policy_data = *policy;
- if (einfo->ei_type == LDLM_EXTENT)
- lock->l_req_extent = policy->l_extent;
- LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
+ if (einfo->ei_type == LDLM_EXTENT) {
+ /* extent lock without policy is a bug */
+ if (policy == NULL)
+ LBUG();
+
+ lock->l_req_extent = policy->l_extent;
+ }
+ LDLM_DEBUG(lock, "client-side enqueue START, flags "LPX64"\n",
*flags);
- }
+ }
lock->l_conn_export = exp;
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
- lock->l_flags |= (*flags & LDLM_FL_NO_LRU);
+ lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
/* lock not sent to server yet */
ldlm_reprocess_all(res);
rc = 0;
} else {
- rc = EDEADLOCK;
+ rc = LUSTRE_EDEADLK;
}
LDLM_DEBUG(lock, "client-side local convert handler END");
LDLM_LOCK_PUT(lock);
GOTO(out, rc);
}
} else {
- rc = EDEADLOCK;
+ rc = LUSTRE_EDEADLK;
}
EXIT;
out:
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
- ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
- unlock_res_and_lock(lock);
+ ldlm_cancel_callback(lock);
+ rc = (ldlm_is_bl_ast(lock)) ?
+ LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ unlock_res_and_lock(lock);
if (local_only) {
CDEBUG(D_DLMTRACE, "not sending request (at caller's "
} else {
rc = ptlrpc_queue_wait(req);
}
- if (rc == ESTALE) {
+ if (rc == LUSTRE_ESTALE) {
CDEBUG(D_DLMTRACE, "client/server (nid %s) "
"out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
ptlrpc_req_finished(req);
continue;
} else if (rc != ELDLM_OK) {
- if (rc != -ESHUTDOWN)
- CERROR("Got rc %d from cancel RPC: canceling "
- "anyway\n", rc);
- break;
+ /* -ESHUTDOWN is common on umount */
+ CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
+ "Got rc %d from cancel RPC: "
+ "canceling anyway\n", rc);
+ break;
}
sent = count;
break;
*
* Lock must not have any readers or writers by this time.
*/
-int ldlm_cli_cancel(struct lustre_handle *lockh)
+int ldlm_cli_cancel(struct lustre_handle *lockh,
+ ldlm_cancel_flags_t cancel_flags)
{
- struct obd_export *exp;
+ struct obd_export *exp;
int avail, flags, count = 1;
__u64 rc = 0;
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- CFS_LIST_HEAD(cancels);
- ENTRY;
+ struct ldlm_namespace *ns;
+ struct ldlm_lock *lock;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
+ ENTRY;
/* concurrent cancels on the same handle can happen */
lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
- if (lock == NULL) {
- LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
- RETURN(0);
- }
+ if (lock == NULL) {
+ LDLM_DEBUG_NOLOCK("lock is already being destroyed");
+ RETURN(0);
+ }
- rc = ldlm_cli_cancel_local(lock);
- if (rc == LDLM_FL_LOCAL_ONLY) {
- LDLM_LOCK_RELEASE(lock);
+ rc = ldlm_cli_cancel_local(lock);
+ if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
+ LDLM_LOCK_RELEASE(lock);
RETURN(0);
- }
+ }
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* RPC which goes to canceld portal, so we can cancel other LRU locks
* here and send them all as one LDLM_CANCEL RPC. */
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
LCF_BL_AST, flags);
}
- ldlm_cli_cancel_list(&cancels, count, NULL, 0);
+ ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_cancel);
* Return the number of cancelled locks.
*/
int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
- ldlm_cancel_flags_t flags)
+ ldlm_cancel_flags_t flags)
{
- CFS_LIST_HEAD(head);
- struct ldlm_lock *lock, *next;
+ struct list_head head = LIST_HEAD_INIT(head);
+ struct ldlm_lock *lock, *next;
int left = 0, bl_ast = 0;
__u64 rc;
int unused, int added,
int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
- lock_res_and_lock(lock);
-
- /* don't check added & count since we want to process all locks
- * from unused list */
- switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (cb && cb(lock))
- break;
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
- break;
- }
+ ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
+
+ /* don't check added & count since we want to process all locks
+ * from unused list.
+ * It's fine to not take lock to access lock->l_resource since
+ * the lock has already been granted so it won't change. */
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (ns->ns_cancel != NULL && ns->ns_cancel(lock) != 0)
+ break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock_res_and_lock(lock);
+ ldlm_set_skipped(lock);
+ unlock_res_and_lock(lock);
+ break;
+ }
- unlock_res_and_lock(lock);
- RETURN(result);
+ RETURN(result);
}
/**
int unused, int added,
int count)
{
- cfs_time_t cur = cfs_time_current();
- struct ldlm_pool *pl = &ns->ns_pool;
- __u64 slv, lvf, lv;
- cfs_time_t la;
+ cfs_time_t cur = cfs_time_current();
+ struct ldlm_pool *pl = &ns->ns_pool;
+ __u64 slv, lvf, lv;
+ cfs_time_t la;
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU. */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
+ if (count && added >= count)
+ return LDLM_POLICY_KEEP_LOCK;
- slv = ldlm_pool_get_slv(pl);
- lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
- lock->l_last_used));
+ slv = ldlm_pool_get_slv(pl);
+ lvf = ldlm_pool_get_lvf(pl);
+ la = cfs_duration_sec(cfs_time_sub(cur,
+ lock->l_last_used));
lv = lvf * la * unused;
/* Inform pool about current CLV to see it via proc. */
/* Stop when SLV is not yet come from server or lv is smaller than
* it is. */
- return (slv == 0 || lv < slv) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if (slv == 0 || lv < slv)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel != NULL && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
}
/**
int unused, int added,
int count)
{
- /* Stop LRU processing if young lock is found and we reach past count */
- return ((added >= count) &&
- cfs_time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used,
- ns->ns_max_age))) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if (added >= count)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (cfs_time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel != NULL && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
}
/**
cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
- /* already processed */
- continue;
+ if (flags & LDLM_CANCEL_NO_WAIT &&
+ ldlm_is_skipped(lock))
+ /* already processed */
+ continue;
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again. */
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
- ldlm_lock_remove_from_lru_nolock(lock);
- }
- if (&lock->l_lru == &ns->ns_unused_list)
- break;
+ ldlm_lock_remove_from_lru_nolock(lock);
+ }
+ if (&lock->l_lru == &ns->ns_unused_list)
+ break;
- LDLM_LOCK_GET(lock);
+ LDLM_LOCK_GET(lock);
spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+ lu_ref_add(&lock->l_reference, __FUNCTION__, current);
/* Pass the lock through the policy filter and see if it
* should stay in LRU.
* old locks, but additionally choose them by
* their weight. Big extent locks will stay in
* the cache. */
- result = pf(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ result = pf(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
break;
}
if (result == LDLM_POLICY_SKIP_LOCK) {
lu_ref_del(&lock->l_reference,
- __func__, cfs_current());
+ __func__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
+ continue;
+ }
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ lock_res_and_lock(lock);
+ /* Check flags again under the lock. */
+ if (ldlm_is_canceling(lock) ||
+ (ldlm_lock_remove_from_lru(lock) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
* by itself, or the lock is no longer unused. */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop lr_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+ continue;
+ }
+ LASSERT(!lock->l_readers && !lock->l_writers);
+
+ /* If we have chosen to cancel this lock voluntarily, we
+ * better send cancel notification to server, so that it
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
+ * silently cancelling this lock. */
+ ldlm_clear_cancel_on_block(lock);
+
+ /* Setting the CBPENDING flag is a little misleading,
+ * but prevents an important race; namely, once
+ * CBPENDING is set, the lock can accumulate no more
+ * readers/writers. Since readers and writers are
+ * already zero here, ldlm_lock_decref() won't see
+ * this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+ /* We can't re-add to l_lru as it confuses the
+ * refcounting in ldlm_lock_remove_from_lru() if an AST
+ * arrives after we drop lr_lock below. We use l_bl_ast
+ * and can't use l_pending_chain as it is used both on
+ * server and client nevertheless bug 5666 says it is
+ * used only on server */
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
spin_lock(&ns->ns_lock);
added++;
unused--;
/**
* Cancel at least \a nr locks from given namespace LRU.
*
- * When called with LDLM_ASYNC the blocking callback will be handled
+ * When called with LCF_ASYNC the blocking callback will be handled
* in a thread and this function will return after the thread has been
- * asked to call the callback. When called with LDLM_SYNC the blocking
+ * asked to call the callback. When called with LCF_ASYNC the blocking
* callback will be performed in this function.
*/
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode,
- int flags)
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+ ldlm_cancel_flags_t cancel_flags,
+ int flags)
{
- CFS_LIST_HEAD(cancels);
- int count, rc;
- ENTRY;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
+ int count, rc;
+ ENTRY;
#ifndef __KERNEL__
- mode = LDLM_SYNC; /* force to be sync in user space */
+ cancel_flags &= ~LCF_ASYNC; /* force to be sync in user space */
#endif
- /* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
- rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode);
- if (rc == 0)
- RETURN(count);
-
- RETURN(0);
+ /* Just prepare the list of locks, do not actually cancel them yet.
+ * Locks are cancelled later in a separate thread. */
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
+ if (rc == 0)
+ RETURN(count);
+
+ RETURN(0);
}
/**
* list.
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- cfs_list_t *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque)
+ cfs_list_t *cancels,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, __u64 lock_flags,
+ ldlm_cancel_flags_t cancel_flags, void *opaque)
{
struct ldlm_lock *lock;
int count = 0;
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
- continue;
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
+ continue;
if (lockmode_compat(lock->l_granted_mode, mode))
continue;
policy->l_inodebits.bits))
continue;
- /* See CBPENDING comment in ldlm_cancel_lru */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
- lock_flags;
+ /* See CBPENDING comment in ldlm_cancel_lru */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
+ lock_flags;
LASSERT(cfs_list_empty(&lock->l_bl_ast));
cfs_list_add(&lock->l_bl_ast, cancels);
cancels, 1, flags);
}
- if (res < 0) {
- if (res != -ESHUTDOWN)
- CERROR("ldlm_cli_cancel_list: %d\n", res);
- res = count;
- }
+ if (res < 0) {
+ CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
+ "ldlm_cli_cancel_list: %d\n", res);
+ res = count;
+ }
count -= res;
ldlm_lock_list_put(cancels, l_bl_ast, res);
* If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
* to notify the server. */
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
- struct ldlm_resource *res;
- CFS_LIST_HEAD(cancels);
- int count;
- int rc;
- ENTRY;
-
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL) {
- /* This is not a problem. */
- CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
- RETURN(0);
- }
+ struct ldlm_resource *res;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
+ int count;
+ int rc;
+ ENTRY;
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
- 0, flags | LCF_BL_AST, opaque);
- rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
- if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (IS_ERR(res)) {
+ /* This is not a problem. */
+ CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
+ RETURN(0);
+ }
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- RETURN(0);
+ LDLM_RESOURCE_ADDREF(res);
+ count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
+ 0, flags | LCF_BL_AST, opaque);
+ rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
+ if (rc != ELDLM_OK)
+ CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
+ PLDLMRES(res), rc);
+
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
};
static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *arg)
+ cfs_hlist_node_t *hnode, void *arg)
{
- struct ldlm_resource *res = cfs_hash_object(hs, hnode);
- struct ldlm_cli_cancel_arg *lc = arg;
- int rc;
-
- rc = ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
- NULL, LCK_MINMODE,
- lc->lc_flags, lc->lc_opaque);
- if (rc != 0) {
- CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
- res->lr_name.name[0], rc);
- }
- /* must return 0 for hash iteration */
- return 0;
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ struct ldlm_cli_cancel_arg *lc = arg;
+
+ ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
+ NULL, LCK_MINMODE, lc->lc_flags,
+ lc->lc_opaque);
+ /* must return 0 for hash iteration */
+ return 0;
}
/**
ldlm_iterator_t iter, void *closure)
{
- struct iter_helper_data helper = { iter: iter, closure: closure };
+ struct iter_helper_data helper = { .iter = iter, .closure = closure };
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper);
* < 0: errors
*/
int ldlm_resource_iterate(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_iterator_t iter, void *data)
+ const struct ldlm_res_id *res_id,
+ ldlm_iterator_t iter, void *data)
{
- struct ldlm_resource *res;
- int rc;
- ENTRY;
+ struct ldlm_resource *res;
+ int rc;
+ ENTRY;
- if (ns == NULL) {
- CERROR("must pass in namespace\n");
- LBUG();
- }
+ LASSERTF(ns != NULL, "must pass in namespace\n");
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL)
- RETURN(0);
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (IS_ERR(res))
+ RETURN(0);
- LDLM_RESOURCE_ADDREF(res);
- rc = ldlm_resource_foreach(res, iter, data);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- RETURN(rc);
+ LDLM_RESOURCE_ADDREF(res);
+ rc = ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_resource_iterate);
}
static int replay_lock_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct ldlm_async_args *aa, int rc)
+ struct ptlrpc_request *req,
+ struct ldlm_async_args *aa, int rc)
{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- struct obd_export *exp;
-
- ENTRY;
- cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
- if (rc != ELDLM_OK)
- GOTO(out, rc);
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ struct obd_export *exp;
+ ENTRY;
+ atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (rc != ELDLM_OK)
+ GOTO(out, rc);
reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
if (reply == NULL)
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
RETURN(0);
}
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already) */
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
RETURN(0);
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
* bug 6063 */
- lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
+ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
- LDLM_DEBUG(lock, "replaying lock:");
+ LDLM_DEBUG(lock, "replaying lock:");
- cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->lock_handle = body->lock_handle[0];
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ RETURN(0);
}
/**
*/
static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
{
- int canceled;
- CFS_LIST_HEAD(cancels);
+ int canceled;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
- "replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
+ "replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
- /* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
- * count parameter */
- canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+ /* We don't need to care whether or not LRU resize is enabled
+ * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * count parameter */
+ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
+ LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
- CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
+ CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
+ canceled, ldlm_ns_name(ns));
}
int ldlm_replay_locks(struct obd_import *imp)
{
- struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- CFS_LIST_HEAD(list);
- struct ldlm_lock *lock, *next;
- int rc = 0;
+ struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
+ struct list_head list = LIST_HEAD_INIT(list);
+ struct ldlm_lock *lock, *next;
+ int rc = 0;
- ENTRY;
+ ENTRY;
- LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+ LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- /* don't replay locks if import failed recovery */
- if (imp->imp_vbr_failed)
- RETURN(0);
+ /* don't replay locks if import failed recovery */
+ if (imp->imp_vbr_failed)
+ RETURN(0);
- /* ensure this doesn't fall to 0 before all have been queued */
- cfs_atomic_inc(&imp->imp_replay_inflight);
+ /* ensure this doesn't fall to 0 before all have been queued */
+ atomic_inc(&imp->imp_replay_inflight);
- if (ldlm_cancel_unused_locks_before_replay)
- ldlm_cancel_unused_locks_for_replay(ns);
+ if (ldlm_cancel_unused_locks_before_replay)
+ ldlm_cancel_unused_locks_for_replay(ns);
- ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
+ ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
- cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- cfs_list_del_init(&lock->l_pending_chain);
- if (rc) {
- LDLM_LOCK_RELEASE(lock);
- continue; /* or try to do the rest? */
- }
- rc = replay_one_lock(imp, lock);
- LDLM_LOCK_RELEASE(lock);
- }
+ cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ cfs_list_del_init(&lock->l_pending_chain);
+ if (rc) {
+ LDLM_LOCK_RELEASE(lock);
+ continue; /* or try to do the rest? */
+ }
+ rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
- cfs_atomic_dec(&imp->imp_replay_inflight);
+ atomic_dec(&imp->imp_replay_inflight);
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_replay_locks);