RETURN(0);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
+ CFS_DURATION_T"s ago); not entering recovery in "
"server code, just going back to sleep",
- lock->l_enqueued_time.tv_sec,
+ lock->l_last_activity,
cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec));
+ lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago), entering recovery for %s@%s",
- lock->l_enqueued_time.tv_sec,
- cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec),
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
RETURN(0);
/* Since these are non-updating timeouts, we should be conservative.
It would be nice to have some kind of "early reply" mechanism for
lock callbacks too... */
- timeout = timeout + (timeout >> 1); /* 150% */
+ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
return max(timeout, ldlm_enqueue_min);
}
-
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
- int ret = 0;
-
- lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & LDLM_FL_FAILED))
- ret = 1;
- unlock_res_and_lock(lock);
-
- return ret;
-}
+EXPORT_SYMBOL(ldlm_get_enq_timeout);
/**
* Helper function for ldlm_completion_ast(), updating timings when lock is
result = -EIO;
} else {
delay = cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec);
+ lock->l_last_activity);
LDLM_DEBUG(lock, "client-side enqueue: granted after "
CFS_DURATION_T"s", delay);
}
/**
- * Implementation of ->l_completion_ast() for a client that doesn't wait
- * until lock is granted. Suitable for locks enqueued through ptlrpcd or
+ * Implementation of ->l_completion_ast() for a client, that doesn't wait
+ * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
* other threads that cannot block for long.
*/
int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"going forward");
ldlm_lock_dump(D_OTHER, lock, 0);
+ ldlm_reprocess_all(lock->l_resource);
RETURN(0);
}
spin_unlock(&imp->imp_lock);
}
- /* Go to sleep until the lock is granted or cancelled. */
- rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
+ if (ns_is_client(lock->l_resource->lr_namespace) &&
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
+ OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ rc = -EINTR;
+ } else {
+ /* Go to sleep until the lock is granted or cancelled. */
+ rc = l_wait_event(lock->l_waitq,
+ is_granted_or_cancelled(lock), &lwi);
+ }
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
RETURN(ldlm_completion_tail(lock));
}
-/*
- * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
+/**
+ * A helper to build a blocking ast function
+ *
+ * Perform a common operation for blocking asts:
+ * defferred lock cancellation.
+ *
+ * \param lock the lock blocking or canceling ast was called on
+ * \retval 0
+ * \see mdt_blocking_ast
+ * \see ldlm_blocking_ast
*/
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
{
int do_ast;
ENTRY;
- if (flag == LDLM_CB_CANCELING) {
- /* Don't need to do anything here. */
- RETURN(0);
- }
-
- lock_res_and_lock(lock);
- /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
- * that ldlm_blocking_ast is called just before intent_policy method
- * takes the ns_lock, then by the time we get the lock, we might not
- * be the correct blocking function anymore. So check, and return
- * early, if so. */
- if (lock->l_blocking_ast != ldlm_blocking_ast) {
- unlock_res_and_lock(lock);
- RETURN(0);
- }
-
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
RETURN(0);
}
+/**
+ * Server blocking AST
+ *
+ * ->l_blocking_ast() callback for LDLM locks acquired by server-side
+ * OBDs.
+ *
+ * \param lock the lock which blocks a request or cancelling lock
+ * \param desc unused
+ * \param data unused
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0
+ * \see ldlm_blocking_ast_nocheck
+ */
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ ENTRY;
+
+ if (flag == LDLM_CB_CANCELING) {
+ /* Don't need to do anything here. */
+ RETURN(0);
+ }
+
+ lock_res_and_lock(lock);
+ /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
+ * that ldlm_blocking_ast is called just before intent_policy method
+ * takes the ns_lock, then by the time we get the lock, we might not
+ * be the correct blocking function anymore. So check, and return
+ * early, if so. */
+ if (lock->l_blocking_ast != ldlm_blocking_ast) {
+ unlock_res_and_lock(lock);
+ RETURN(0);
+ }
+ RETURN(ldlm_blocking_ast_nocheck(lock));
+}
+
/*
* ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
* comment in filter_intent_policy() on why you may need this.
ldlm_blocking_callback blocking,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
- void *data, __u32 lvb_len, void *lvb_swabber,
+ void *data, __u32 lvb_len,
+ const __u64 *client_cookie,
struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len);
if (unlikely(!lock))
GOTO(out_nolock, err = -ENOMEM);
- LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
ldlm_lock_addref_internal(lock, mode);
ldlm_lock2handle(lock, lockh);
lock->l_flags |= LDLM_FL_LOCAL;
if (*flags & LDLM_FL_ATOMIC_CB)
lock->l_flags |= LDLM_FL_ATOMIC_CB;
- lock->l_lvb_swabber = lvb_swabber;
unlock_res_and_lock(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
+ if (client_cookie != NULL)
+ lock->l_client_cookie = *client_cookie;
if (type == LDLM_EXTENT)
lock->l_req_extent = policy->l_extent;
if (policy != NULL)
*policy = lock->l_policy_data;
- LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
- lock);
-
if (lock->l_completion_ast)
lock->l_completion_ast(lock, *flags, NULL);
- LDLM_DEBUG(lock, "client-side local enqueue END");
+ LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
EXIT;
out:
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
out_nolock:
return err;
}
struct ldlm_lock *lock,
struct lustre_handle *lockh, int mode)
{
+ int need_cancel = 0;
+
/* Set a flag to prevent us from sending a CANCEL (bug 407) */
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ /* Check that lock is not granted or failed, we might race. */
+ if ((lock->l_req_mode != lock->l_granted_mode) &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * bug 17645 */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB;
+ need_cancel = 1;
+ }
unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- ldlm_lock_decref_and_cancel(lockh, mode);
+ if (need_cancel) {
+ LDLM_DEBUG(lock,
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+ "LDLM_FL_ATOMIC_CB");
+ ldlm_lock_decref_and_cancel(lockh, mode);
+ } else {
+ LDLM_DEBUG(lock, "lock was granted or failed in race");
+ ldlm_lock_decref(lockh, mode);
+ }
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock(). */
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
int *flags, void *lvb, __u32 lvb_len,
- void *lvb_swabber, struct lustre_handle *lockh,int rc)
+ struct lustre_handle *lockh,int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
req_capsule_set_size(&req->rq_pill,
&RMF_DLM_LVB, RCL_SERVER,
lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
if (lvb != NULL)
/* Key change rehash lock in per-export hash with new key */
if (exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
*flags = reply->lock_flags;
lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
memcpy(lock->l_lvb_data, tmplvb, lvb_len);
failed_lock_cleanup(ns, lock, lockh, mode);
/* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
LDLM_LOCK_PUT(lock);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
return rc;
}
int avail;
avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
- avail /= sizeof(struct lustre_handle);
+ if (likely(avail >= 0))
+ avail /= (int)sizeof(struct lustre_handle);
+ else
+ avail = 0;
avail += LDLM_LOCKREQ_HANDLES - off;
return avail;
bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT);
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
- flags = ns_connect_lru_resize(ns) ?
+ flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
- /* Cancel lru locks here _only_ if the server supports
+ /* Cancel lru locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
* rpc, what will make us slower. */
if (avail > count)
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy, int *flags,
- void *lvb, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh, int async)
+ void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
+ int async)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct ldlm_lock *lock;
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
- lock->l_lvb_swabber = lvb_swabber;
if (policy != NULL) {
/* INODEBITS_INTEROP: If the server does not support
* inodebits, we will request a plain lock in the
LDLM_ENQUEUE);
if (req == NULL) {
failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
RETURN(-ENOMEM);
}
req_passed_in = 0;
}
LDLM_DEBUG(lock, "sending request");
+
rc = ptlrpc_queue_wait(req);
+
err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
einfo->ei_mode, flags, lvb, lvb_len,
- lvb_swabber, lockh, rc);
+ lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
* one reference that we took */
if (err == -ENOLCK)
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
else
rc = err;
{
int rc = LDLM_FL_LOCAL_ONLY;
ENTRY;
-
+
if (lock->l_conn_export) {
int local_only;
LDLM_DEBUG(lock, "server-side local cancel");
ldlm_lock_cancel(lock);
ldlm_reprocess_all(lock->l_resource);
- LDLM_DEBUG(lock, "server-side local cancel handler END");
}
RETURN(rc);
LASSERT(dlm != NULL);
/* Check the room in the request buffer. */
- max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
+ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
sizeof(struct ldlm_request);
max /= sizeof(struct lustre_handle);
max += LDLM_LOCKREQ_HANDLES;
ptlrpc_request_set_replen(req);
if (flags & LDLM_FL_ASYNC) {
- ptlrpcd_add_req(req);
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
sent = count;
GOTO(out, 0);
} else {
__u64 old_slv, new_slv;
__u32 new_limit;
ENTRY;
-
- if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
+ if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
!imp_connect_lru_resize(req->rq_import)))
{
- /*
- * Do nothing for corner cases.
+ /*
+ * Do nothing for corner cases.
*/
RETURN(0);
}
- /*
- * In some cases RPC may contain slv and limit zeroed out. This is
+ /*
+ * In some cases RPC may contain slv and limit zeroed out. This is
* the case when server does not support lru resize feature. This is
* also possible in some recovery cases when server side reqs have no
- * ref to obd export and thus access to server side namespace is no
- * possible.
+ * ref to obd export and thus access to server side namespace is no
+ * possible.
*/
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
+ if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
- "(SLV: "LPU64", Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
+ "(SLV: "LPU64", Limit: %u)",
+ lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
RETURN(0);
}
new_slv = lustre_msg_get_slv(req->rq_repmsg);
obd = req->rq_import->imp_obd;
- /*
- * Set new SLV and Limit to obd fields to make accessible for pool
+ /*
+ * Set new SLV and Limit to obd fields to make accessible for pool
* thread. We do not access obd_namespace and pool directly here
* as there is no reliable way to make sure that they are still
* alive in cleanup time. Evil races are possible which may cause
- * oops in that time.
+ * oops in that time.
*/
write_lock(&obd->obd_pool_lock);
old_slv = obd->obd_pool_slv;
ENTRY;
/* concurrent cancels on the same handle can happen */
- lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
+ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
if (lock == NULL) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
RETURN(0);
rc = ldlm_cli_cancel_local(lock);
if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
RETURN(rc < 0 ? rc : 0);
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
if (rc == LDLM_FL_LOCAL_ONLY) {
/* CANCEL RPC should not be sent to server. */
list_del_init(&lock->l_bl_ast);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
count--;
}
RETURN(count);
}
-/**
- * Callback function for shrink policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan
- * \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- int lock_cost;
- __u64 page_nr;
-
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
- */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
-
- if (lock->l_resource->lr_type == LDLM_EXTENT) {
- struct ldlm_extent *l_extent;
-
- /*
- * For all extent locks cost is 1 + number of pages in
- * their extent.
- */
- l_extent = &lock->l_policy_data.l_extent;
- page_nr = (l_extent->end - l_extent->start);
- do_div(page_nr, CFS_PAGE_SIZE);
-
-#ifdef __KERNEL__
- /*
- * XXX: In fact this is evil hack, we can't access inode
- * here. For doing it right we need somehow to have number
- * of covered by lock. This should be fixed later when 10718
- * is landed.
- */
- if (lock->l_ast_data != NULL) {
- struct inode *inode = lock->l_ast_data;
- if (page_nr > inode->i_mapping->nrpages)
- page_nr = inode->i_mapping->nrpages;
- }
-#endif
- lock_cost = 1 + page_nr;
- } else {
- /*
- * For all locks which are not extent ones cost is 1
- */
- lock_cost = 1;
- }
-
- /*
- * Keep all expensive locks in lru for the memory pressure time
- * cancel policy. They anyways may be canceled by lru resize
- * pplicy if they have not small enough CLV.
- */
- return lock_cost > ns->ns_shrink_thumb ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
/**
* Callback function for lru-resize policy. Makes decision whether to keep
* \a lock in LRU for current \a LRU size \a unused, added in current scan
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
+ struct ldlm_lock *lock,
+ int unused, int added,
int count)
{
cfs_time_t cur = cfs_time_current();
__u64 slv, lvf, lv;
cfs_time_t la;
- /*
- * Stop lru processing when we reached passed @count or checked all
+ /*
+ * Stop lru processing when we reached passed @count or checked all
* locks in lru.
*/
if (count && added >= count)
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
+ la = cfs_duration_sec(cfs_time_sub(cur,
lock->l_last_used));
- /*
- * Stop when slv is not yet come from server or lv is smaller than
+ /*
+ * Stop when slv is not yet come from server or lv is smaller than
* it is.
*/
lv = lvf * la * unused;
-
- /*
- * Inform pool about current CLV to see it via proc.
+
+ /*
+ * Inform pool about current CLV to see it via proc.
*/
ldlm_pool_set_clv(pl, lv);
- return (slv == 1 || lv < slv) ?
+ return (slv == 1 || lv < slv) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing if young lock is found and we reached passed
- * @count.
+ /*
+ * Stop lru processing if young lock is found and we reached passed
+ * @count.
*/
- return ((added >= count) &&
+ return ((added >= count) &&
cfs_time_before(cfs_time_current(),
cfs_time_add(lock->l_last_used,
- ns->ns_max_age))) ?
+ ns->ns_max_age))) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
+typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+ struct ldlm_lock *, int,
int, int);
static ldlm_cancel_lru_policy_t
{
if (ns_connect_lru_resize(ns)) {
if (flags & LDLM_CANCEL_SHRINK)
- return ldlm_cancel_shrink_policy;
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
else if (flags & LDLM_CANCEL_LRUR)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
}
-
+
return ldlm_cancel_default_policy;
}
-
+
/* - Free space in lru for @count new locks,
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
pf = ldlm_cancel_lru_policy(ns, flags);
LASSERT(pf != NULL);
-
+
while (!list_empty(&ns->ns_unused_list)) {
/* For any flags, stop scanning if @max is reached. */
if (max && added >= max)
if (&lock->l_lru == &ns->ns_unused_list)
break;
+ LDLM_LOCK_GET(lock);
+ spin_unlock(&ns->ns_unused_lock);
+ lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+
/* Pass the lock through the policy filter and see if it
* should stay in lru.
*
* we find a lock that should stay in the cache.
* We should take into account lock age anyway
* as new lock even if it is small of weight is
- * valuable resource.
+ * valuable resource.
*
* That is, for shrinker policy we drop only
* old locks, but additionally chose them by
- * their weight. Big extent locks will stay in
+ * their weight. Big extent locks will stay in
* the cache. */
- if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
+ if (pf(ns, lock, unused, added, count) ==
+ LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_unused_lock);
break;
-
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- spin_unlock(&ns->ns_unused_lock);
+ }
lock_res_and_lock(lock);
/* Check flags again under the lock. */
* cancel by itseft or the lock is matched
* is already not unused. */
unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_unused_lock);
continue;
}
/* If we have chosen to cancel this lock voluntarily, we
* better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
* silently cancelling this lock. */
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
spin_lock(&ns->ns_unused_lock);
added++;
unused--;
RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
- int count, int max, int flags)
-{
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock;
- int added = 0, unused;
- ENTRY;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
- spin_lock(&ns->ns_unused_lock);
- unused = ns->ns_nr_unused;
-
- list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
-
- /* Somebody is already doing CANCEL or there is a
- * blocking request will send cancel. Let's not count
- * this lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST))
- continue;
-
- /* Pass the lock through the policy filter and see if it
- * should stay in lru. */
- if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
- break;
-
- added++;
- unused--;
- }
- spin_unlock(&ns->ns_unused_lock);
- RETURN(added);
-}
-
/* when called with LDLM_ASYNC the blocking callback will be handled
* in a thread and this function will return after the thread has been
* asked to call the callback. when called with LDLM_SYNC the blocking
* callback will be performed in this function. */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
int flags)
{
CFS_LIST_HEAD(cancels);
RETURN(count);
}
- /* If an error occured in ASYNC mode, or
- * this is SYNC mode, cancel the list. */
+ /* If an error occured in ASYNC mode, or this is SYNC mode,
+ * cancel the list. */
ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(count);
}
/* If somebody is already doing CANCEL, or blocking ast came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
+ if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
}
-/* If @req is NULL, send CANCEL request to server with handles of locks
- * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
+/* If @req is NULL, send CANCEL request to server with handles of locks
+ * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
* separately per lock.
- * If @req is not NULL, put handles of locks in @cancels into the request
+ * If @req is not NULL, put handles of locks in @cancels into the request
* buffer at the offset @off.
* Destroy @cancels at the end. */
int ldlm_cli_cancel_list(struct list_head *cancels, int count,
if (list_empty(cancels) || count == 0)
RETURN(0);
-
- /* XXX: requests (both batched and not) could be sent in parallel.
+
+ /* XXX: requests (both batched and not) could be sent in parallel.
* Usually it is enough to have just 1 RPC, but it is possible that
* there are to many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
RETURN(0);
}
+ LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
0, flags, opaque);
rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
RETURN(0);
}
ldlm_resource_getref(res);
spin_unlock(&ns->ns_hash_lock);
+ LDLM_RESOURCE_ADDREF(res);
rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
NULL, LCK_MINMODE,
flags, opaque);
CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
res->lr_name.name[0], rc);
+ LDLM_RESOURCE_DELREF(res);
spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
res = list_entry(tmp, struct ldlm_resource, lr_hash);
ldlm_resource_getref(res);
spin_unlock(&ns->ns_hash_lock);
+ LDLM_RESOURCE_ADDREF(res);
rc = iter(res, closure);
+ LDLM_RESOURCE_DELREF(res);
spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
return;
}
+ LDLM_RESOURCE_ADDREF(res);
ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
EXIT;
}
/* we use l_pending_chain here, because it's unused on clients. */
LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
- /* bug 9573: don't replay locks left after eviction */
- if (!(lock->l_flags & LDLM_FL_FAILED))
+ /* bug 9573: don't replay locks left after eviction, or
+ * bug 17614: locks being actively cancelled. Get a reference
+ * on a lock so that it does not disapear under us (e.g. due to cancel)
+ */
+ if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
+ LDLM_LOCK_GET(lock);
+ }
+
return LDLM_ITER_CONTINUE;
}
-static int replay_lock_interpret(struct ptlrpc_request *req,
+static int replay_lock_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct ldlm_async_args *aa, int rc)
{
struct lustre_handle old_hash_key;
lock->l_remote_handle = reply->lock_handle;
/* Key change rehash lock in per-export hash with new key */
- exp = req->rq_export;
+ exp = req->rq_export;
if (exp && exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
LDLM_DEBUG(lock, "replayed lock:");
ptlrpc_import_recovery_state_machine(req->rq_import);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = replay_lock_interpret;
- ptlrpcd_add_req(req);
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
list_del_init(&lock->l_pending_chain);
- if (rc)
+ if (rc) {
+ LDLM_LOCK_PUT(lock);
continue; /* or try to do the rest? */
+ }
rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_PUT(lock);
}
atomic_dec(&imp->imp_replay_inflight);