RETURN(0);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
+ CFS_DURATION_T"s ago); not entering recovery in "
"server code, just going back to sleep",
- lock->l_enqueued_time.tv_sec,
+ lock->l_last_activity,
cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec));
+ lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago), entering recovery for %s@%s",
- lock->l_enqueued_time.tv_sec,
- cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec),
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
RETURN(0);
/* Since these are non-updating timeouts, we should be conservative.
It would be nice to have some kind of "early reply" mechanism for
lock callbacks too... */
- timeout = timeout + (timeout >> 1); /* 150% */
+ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
return max(timeout, ldlm_enqueue_min);
}
EXPORT_SYMBOL(ldlm_get_enq_timeout);
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
-{
- int ret = 0;
-
- lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & LDLM_FL_FAILED))
- ret = 1;
- unlock_res_and_lock(lock);
-
- return ret;
-}
-
/**
* Helper function for ldlm_completion_ast(), updating timings when lock is
* actually granted.
result = -EIO;
} else {
delay = cfs_time_sub(cfs_time_current_sec(),
- lock->l_enqueued_time.tv_sec);
+ lock->l_last_activity);
LDLM_DEBUG(lock, "client-side enqueue: granted after "
CFS_DURATION_T"s", delay);
spin_unlock(&imp->imp_lock);
}
- if (ns_is_client(lock->l_resource->lr_namespace) &&
+ if (ns_is_client(lock->l_resource->lr_namespace) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
lock->l_flags |= LDLM_FL_FAIL_LOC;
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
- rc = l_wait_event(lock->l_waitq,
+ rc = l_wait_event(lock->l_waitq,
is_granted_or_cancelled(lock), &lwi);
}
ldlm_blocking_callback blocking,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
- void *data, __u32 lvb_len, void *lvb_swabber,
+ void *data, __u32 lvb_len,
const __u64 *client_cookie,
struct lustre_handle *lockh)
{
lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len);
if (unlikely(!lock))
GOTO(out_nolock, err = -ENOMEM);
- LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
ldlm_lock_addref_internal(lock, mode);
ldlm_lock2handle(lock, lockh);
lock->l_flags |= LDLM_FL_LOCAL;
if (*flags & LDLM_FL_ATOMIC_CB)
lock->l_flags |= LDLM_FL_ATOMIC_CB;
- lock->l_lvb_swabber = lvb_swabber;
unlock_res_and_lock(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
if (policy != NULL)
*policy = lock->l_policy_data;
- LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
- lock);
-
if (lock->l_completion_ast)
lock->l_completion_ast(lock, *flags, NULL);
- LDLM_DEBUG(lock, "client-side local enqueue END");
+ LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
EXIT;
out:
LDLM_LOCK_RELEASE(lock);
/* Set a flag to prevent us from sending a CANCEL (bug 407) */
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
- if ((lock->l_req_mode != lock->l_granted_mode) &&
+ if ((lock->l_req_mode != lock->l_granted_mode) &&
!(lock->l_flags & LDLM_FL_FAILED)) {
/* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
+ * bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645 */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
LDLM_FL_ATOMIC_CB;
need_cancel = 1;
}
unlock_res_and_lock(lock);
-
+
if (need_cancel) {
- LDLM_DEBUG(lock,
- "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+ LDLM_DEBUG(lock,
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
"LDLM_FL_ATOMIC_CB");
ldlm_lock_decref_and_cancel(lockh, mode);
} else {
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
int *flags, void *lvb, __u32 lvb_len,
- void *lvb_swabber, struct lustre_handle *lockh,int rc)
+ struct lustre_handle *lockh,int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
req_capsule_set_size(&req->rq_pill,
&RMF_DLM_LVB, RCL_SERVER,
lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
if (lvb != NULL)
/* Key change rehash lock in per-export hash with new key */
if (exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
*flags = reply->lock_flags;
lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
memcpy(lock->l_lvb_data, tmplvb, lvb_len);
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy, int *flags,
- void *lvb, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh, int async)
+ void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
+ int async)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct ldlm_lock *lock;
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
- lock->l_lvb_swabber = lvb_swabber;
if (policy != NULL) {
/* INODEBITS_INTEROP: If the server does not support
* inodebits, we will request a plain lock in the
err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
einfo->ei_mode, flags, lvb, lvb_len,
- lvb_swabber, lockh, rc);
+ lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
* one reference that we took */
LDLM_DEBUG(lock, "server-side local cancel");
ldlm_lock_cancel(lock);
ldlm_reprocess_all(lock->l_resource);
- LDLM_DEBUG(lock, "server-side local cancel handler END");
}
RETURN(rc);
}
/**
- * Callback function for shrink policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan
- * \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
- */
-static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
-{
- int lock_cost;
- __u64 page_nr;
-
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
- */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
-
- if (lock->l_resource->lr_type == LDLM_EXTENT) {
- if (lock->l_weigh_ast) {
- /*
- * For liblustre, l_weigh_ast should return 0 since it
- * don't cache pages
- */
- page_nr = lock->l_weigh_ast(lock);
- } else {
- struct ldlm_extent *l_extent;
-
- /*
- * For all extent locks cost is 1 + number of pages in
- * their extent.
- */
- l_extent = &lock->l_policy_data.l_extent;
- page_nr = l_extent->end - l_extent->start;
- do_div(page_nr, CFS_PAGE_SIZE);
- }
- lock_cost = 1 + page_nr;
- } else {
- /*
- * For all locks which are not extent ones cost is 1
- */
- lock_cost = 1;
- }
-
- /*
- * Keep all expensive locks in lru for the memory pressure time
- * cancel policy. They anyways may be canceled by lru resize
- * pplicy if they have not small enough CLV.
- */
- return lock_cost > ns->ns_shrink_thumb ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
-}
-
-/**
* Callback function for lru-resize policy. Makes decision whether to keep
* \a lock in LRU for current \a LRU size \a unused, added in current scan
* \a added and number of locks to be preferably canceled \a count.
{
if (ns_connect_lru_resize(ns)) {
if (flags & LDLM_CANCEL_SHRINK)
- return ldlm_cancel_shrink_policy;
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
else if (flags & LDLM_CANCEL_LRUR)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
- int count, int max, int flags)
-{
- struct list_head disp = CFS_LIST_HEAD_INIT(disp);
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock;
- int added = 0, unused;
- int loop_stop = 0;
- ENTRY;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
- spin_lock(&ns->ns_unused_lock);
- unused = ns->ns_nr_unused;
- list_splice_init(&ns->ns_unused_list, &disp);
- while (!list_empty(&disp)) {
- lock = list_entry(disp.next, struct ldlm_lock, l_lru);
- list_move_tail(&lock->l_lru, &ns->ns_unused_list);
-
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
-
- /* Somebody is already doing CANCEL or there is a
- * blocking request will send cancel. Let's not count
- * this lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST))
- continue;
-
- LDLM_LOCK_GET(lock);
- spin_unlock(&ns->ns_unused_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
-
- /* Pass the lock through the policy filter and see if it
- * should stay in lru. */
- if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
- loop_stop = 1;
-
- lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_unused_lock);
- if (loop_stop)
- break;
-
- added++;
- unused--;
- }
- list_splice(&disp, ns->ns_unused_list.prev);
- spin_unlock(&ns->ns_unused_lock);
- RETURN(added);
-}
-
/* when called with LDLM_ASYNC the blocking callback will be handled
* in a thread and this function will return after the thread has been
* asked to call the callback. when called with LDLM_SYNC the blocking
RETURN(count);
}
- /* If an error occured in ASYNC mode, or
- * this is SYNC mode, cancel the list. */
+ /* If an error occured in ASYNC mode, or this is SYNC mode,
+ * cancel the list. */
ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(count);
}
/* we use l_pending_chain here, because it's unused on clients. */
LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
- /* bug 9573: don't replay locks left after eviction */
- if (!(lock->l_flags & LDLM_FL_FAILED))
+ /* bug 9573: don't replay locks left after eviction, or
+ * bug 17614: locks being actively cancelled. Get a reference
+ * on a lock so that it does not disapear under us (e.g. due to cancel)
+ */
+ if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
+ LDLM_LOCK_GET(lock);
+ }
+
return LDLM_ITER_CONTINUE;
}
lock->l_remote_handle = reply->lock_handle;
/* Key change rehash lock in per-export hash with new key */
- exp = req->rq_export;
+ exp = req->rq_export;
if (exp && exp->exp_lock_hash)
- lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
- &lock->l_remote_handle,
- &lock->l_exp_hash);
+ cfs_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
LDLM_DEBUG(lock, "replayed lock:");
ptlrpc_import_recovery_state_machine(req->rq_import);
list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
list_del_init(&lock->l_pending_chain);
- if (rc)
+ if (rc) {
+ LDLM_LOCK_PUT(lock);
continue; /* or try to do the rest? */
+ }
rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_PUT(lock);
}
atomic_dec(&imp->imp_replay_inflight);