-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2011, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
CFS_MODULE_PARM(ldlm_enqueue_min, "i", int, 0644,
"lock enqueue timeout minimum");
+/* in client side, whether the cached locks will be canceled before replay */
+unsigned int ldlm_cancel_unused_locks_before_replay = 1;
+
static void interrupted_completion_wait(void *data)
{
}
RETURN(0);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
- CFS_DURATION_T"s ago); not entering recovery in "
+ CFS_DURATION_T"s ago); not entering recovery in "
"server code, just going back to sleep",
- lock->l_enqueued_time.tv_sec,
- cfs_time_current_sec() -
- lock->l_enqueued_time.tv_sec);
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
ldlm_namespace_dump(D_DLMTRACE,
- lock->l_resource->lr_namespace);
+ ldlm_lock_to_ns(lock));
if (last_dump == 0)
libcfs_debug_dumplog();
}
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago), entering recovery for %s@%s",
- lock->l_enqueued_time.tv_sec,
- cfs_time_current_sec() - lock->l_enqueued_time.tv_sec,
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_expired_completion_wait);
/* We use the same basis for both server side and client side functions
from a single node. */
int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
- int timeout = at_get(&lock->l_resource->lr_namespace->ns_at_estimate);
+ int timeout = at_get(ldlm_lock_to_ns_at(lock));
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
It would be nice to have some kind of "early reply" mechanism for
lock callbacks too... */
- timeout = timeout + (timeout >> 1); /* 150% */
+ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
return max(timeout, ldlm_enqueue_min);
}
+EXPORT_SYMBOL(ldlm_get_enq_timeout);
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
+/**
+ * Helper function for ldlm_completion_ast(), updating timings when lock is
+ * actually granted.
+ */
+static int ldlm_completion_tail(struct ldlm_lock *lock)
{
- int ret = 0;
+ long delay;
+ int result;
- lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & LDLM_FL_FAILED))
- ret = 1;
- unlock_res_and_lock(lock);
+ if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue: destroyed");
+ result = -EIO;
+ } else {
+ delay = cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity);
+ LDLM_DEBUG(lock, "client-side enqueue: granted after "
+ CFS_DURATION_T"s", delay);
+
+ /* Update our time estimate */
+ at_measured(ldlm_lock_to_ns_at(lock),
+ delay);
+ result = 0;
+ }
+ return result;
+}
+
+/**
+ * Implementation of ->l_completion_ast() for a client, that doesn't wait
+ * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
+ * other threads that cannot block for long.
+ */
+int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
+{
+ ENTRY;
+
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ RETURN(0);
+ }
+
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ cfs_waitq_signal(&lock->l_waitq);
+ RETURN(ldlm_completion_tail(lock));
+ }
- return ret;
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "going forward");
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
}
+EXPORT_SYMBOL(ldlm_completion_ast_async);
+/**
+ * Client side LDLM "completion" AST. This is called in several cases:
+ *
+ * - when a reply to an ENQUEUE rpc is received from the server
+ * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
+ * this point (determined by flags);
+ *
+ * - when LDLM_CP_CALLBACK rpc comes to client to notify it that lock has
+ * been granted;
+ *
+ * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
+ * gets correct lvb;
+ *
+ * - to force all locks when resource is destroyed (cleanup_resource());
+ *
+ * - during lock conversion (not used currently).
+ *
+ * If lock is not granted in the first case, this function waits until second
+ * or penultimate cases happen in some other thread.
+ *
+ */
int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
{
/* XXX ALLOCATE - 160 bytes */
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"sleeping");
- ldlm_lock_dump(D_OTHER, lock, 0);
- ldlm_reprocess_all(lock->l_resource);
noreproc:
}
if (imp != NULL) {
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
lwd.lwd_conn_cnt = imp->imp_conn_cnt;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
}
- /* Go to sleep until the lock is granted or cancelled. */
- rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
-
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- RETURN(-EIO);
+ if (ns_is_client(ldlm_lock_to_ns(lock)) &&
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
+ OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ rc = -EINTR;
+ } else {
+ /* Go to sleep until the lock is granted or cancelled. */
+ rc = l_wait_event(lock->l_waitq,
+ is_granted_or_cancelled(lock), &lwi);
}
if (rc) {
RETURN(rc);
}
- LDLM_DEBUG(lock, "client-side enqueue waking up: granted after %lds",
- cfs_time_current_sec() - lock->l_enqueued_time.tv_sec);
-
- /* Update our time estimate */
- at_add(&lock->l_resource->lr_namespace->ns_at_estimate,
- cfs_time_current_sec() - lock->l_enqueued_time.tv_sec);
-
- RETURN(0);
+ RETURN(ldlm_completion_tail(lock));
}
+EXPORT_SYMBOL(ldlm_completion_ast);
-/*
- * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
+/**
+ * A helper to build a blocking ast function
+ *
+ * Perform a common operation for blocking asts:
+ * defferred lock cancellation.
+ *
+ * \param lock the lock blocking or canceling ast was called on
+ * \retval 0
+ * \see mdt_blocking_ast
+ * \see ldlm_blocking_ast
*/
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
{
int do_ast;
ENTRY;
- if (flag == LDLM_CB_CANCELING) {
- /* Don't need to do anything here. */
- RETURN(0);
- }
-
- lock_res_and_lock(lock);
- /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
- * that ldlm_blocking_ast is called just before intent_policy method
- * takes the ns_lock, then by the time we get the lock, we might not
- * be the correct blocking function anymore. So check, and return
- * early, if so. */
- if (lock->l_blocking_ast != ldlm_blocking_ast) {
- unlock_res_and_lock(lock);
- RETURN(0);
- }
-
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
}
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
+
+/**
+ * Server blocking AST
+ *
+ * ->l_blocking_ast() callback for LDLM locks acquired by server-side
+ * OBDs.
+ *
+ * \param lock the lock which blocks a request or cancelling lock
+ * \param desc unused
+ * \param data unused
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0
+ * \see ldlm_blocking_ast_nocheck
+ */
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ ENTRY;
+
+ if (flag == LDLM_CB_CANCELING) {
+ /* Don't need to do anything here. */
+ RETURN(0);
+ }
+
+ lock_res_and_lock(lock);
+ /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
+ * that ldlm_blocking_ast is called just before intent_policy method
+ * takes the lr_lock, then by the time we get the lock, we might not
+ * be the correct blocking function anymore. So check, and return
+ * early, if so. */
+ if (lock->l_blocking_ast != ldlm_blocking_ast) {
+ unlock_res_and_lock(lock);
+ RETURN(0);
+ }
+ RETURN(ldlm_blocking_ast_nocheck(lock));
+}
+EXPORT_SYMBOL(ldlm_blocking_ast);
/*
* ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
*/
return -ELDLM_NO_LOCK_DATA;
}
+EXPORT_SYMBOL(ldlm_glimpse_ast);
int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_blocking_callback blocking,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
- void *data, __u32 lvb_len, void *lvb_swabber,
+ void *data, __u32 lvb_len,
+ const __u64 *client_cookie,
struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
int err;
+ const struct ldlm_callback_suite cbs = { .lcs_completion = completion,
+ .lcs_blocking = blocking,
+ .lcs_glimpse = glimpse,
+ };
ENTRY;
LASSERT(!(*flags & LDLM_FL_REPLAY));
LBUG();
}
- lock = ldlm_lock_create(ns, res_id, type, mode, blocking,
- completion, glimpse, data, lvb_len);
+ lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len);
if (unlikely(!lock))
GOTO(out_nolock, err = -ENOMEM);
- LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
- ldlm_lock_addref_internal(lock, mode);
ldlm_lock2handle(lock, lockh);
- lock_res_and_lock(lock);
+
+ /* NB: we don't have any lock now (lock_res_and_lock)
+ * because it's a new lock */
+ ldlm_lock_addref_internal_nolock(lock, mode);
lock->l_flags |= LDLM_FL_LOCAL;
if (*flags & LDLM_FL_ATOMIC_CB)
lock->l_flags |= LDLM_FL_ATOMIC_CB;
- lock->l_lvb_swabber = lvb_swabber;
- unlock_res_and_lock(lock);
+
if (policy != NULL)
lock->l_policy_data = *policy;
+ if (client_cookie != NULL)
+ lock->l_client_cookie = *client_cookie;
if (type == LDLM_EXTENT)
lock->l_req_extent = policy->l_extent;
if (policy != NULL)
*policy = lock->l_policy_data;
- LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
- lock);
-
if (lock->l_completion_ast)
lock->l_completion_ast(lock, *flags, NULL);
- LDLM_DEBUG(lock, "client-side local enqueue END");
+ LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
EXIT;
out:
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
out_nolock:
return err;
}
+EXPORT_SYMBOL(ldlm_cli_enqueue_local);
static void failed_lock_cleanup(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- struct lustre_handle *lockh, int mode)
+ struct ldlm_lock *lock, int mode)
{
+ int need_cancel = 0;
+
/* Set a flag to prevent us from sending a CANCEL (bug 407) */
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ /* Check that lock is not granted or failed, we might race. */
+ if ((lock->l_req_mode != lock->l_granted_mode) &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * bug 17645 */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+ need_cancel = 1;
+ }
unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- ldlm_lock_decref_and_cancel(lockh, mode);
+ if (need_cancel)
+ LDLM_DEBUG(lock,
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+ "LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
+ else
+ LDLM_DEBUG(lock, "lock was granted or failed in race");
+
+ ldlm_lock_decref_internal(lock, mode);
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock(). */
+ /* This code makes for the fact that we do not have blocking handler on
+ * a client for flock locks. As such this is the place where we must
+ * completely kill failed locks. (interrupted and those that
+ * were waiting to be granted when server evicted us. */
if (lock->l_resource->lr_type == LDLM_FLOCK) {
- ldlm_lock_destroy(lock);
+ lock_res_and_lock(lock);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_destroy_nolock(lock);
+ unlock_res_and_lock(lock);
}
}
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
int *flags, void *lvb, __u32 lvb_len,
- void *lvb_swabber, struct lustre_handle *lockh,int rc)
+ struct lustre_handle *lockh,int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
struct ldlm_lock *lock;
struct ldlm_reply *reply;
+ struct ost_lvb *tmplvb;
int cleanup_phase = 1;
ENTRY;
if (reply == NULL)
rc = -EPROTO;
if (lvb_len) {
- struct ost_lvb *tmplvb;
req_capsule_set_size(&req->rq_pill,
&RMF_DLM_LVB, RCL_SERVER,
lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
if (lvb != NULL)
cleanup_phase = 0;
lock_res_and_lock(lock);
- lock->l_remote_handle = reply->lock_handle;
+ /* Key change rehash lock in per-export hash with new key */
+ if (exp->exp_lock_hash) {
+ /* In the function below, .hs_keycmp resolves to
+ * ldlm_export_lock_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ cfs_hash_rehash_key(exp->exp_lock_hash,
+ &lock->l_remote_handle,
+ &reply->lock_handle,
+ &lock->l_exp_hash);
+ } else {
+ lock->l_remote_handle = reply->lock_handle;
+ }
+
*flags = reply->lock_flags;
lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
if (with_policy)
if (!(type == LDLM_IBITS && !(exp->exp_connect_flags &
OBD_CONNECT_IBITS)))
- lock->l_policy_data =
- reply->lock_desc.l_policy_data;
+ /* We assume lock type cannot change on server*/
+ ldlm_convert_policy_to_local(exp,
+ lock->l_resource->lr_type,
+ &reply->lock_desc.l_policy_data,
+ &lock->l_policy_data);
if (type != LDLM_PLAIN)
LDLM_DEBUG(lock,"client-side enqueue, new policy data");
}
/* If the lock has already been granted by a completion AST, don't
* clobber the LVB with an older one. */
- if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
- void *tmplvb;
+ if (lvb_len) {
+ /* We must lock or a racing completion might update lvb
+ without letting us know and we'll clobber the correct value.
+ Cannot unlock after the check either, a that still leaves
+ a tiny window for completion to get in */
+ lock_res_and_lock(lock);
+ if (lock->l_req_mode != lock->l_granted_mode) {
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- lvb_len);
- tmplvb = req_capsule_server_swab_get(&req->rq_pill,
- &RMF_DLM_LVB,
- lvb_swabber);
- if (tmplvb == NULL)
- GOTO(cleanup, rc = -EPROTO);
- memcpy(lock->l_lvb_data, tmplvb, lvb_len);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
+ RCL_SERVER, lvb_len);
+ tmplvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
+ if (tmplvb == NULL) {
+ unlock_res_and_lock(lock);
+ GOTO(cleanup, rc = -EPROTO);
+ }
+ memcpy(lock->l_lvb_data, tmplvb, lvb_len);
+ }
+ unlock_res_and_lock(lock);
}
if (!is_replay) {
int err = lock->l_completion_ast(lock, *flags, NULL);
if (!rc)
rc = err;
- if (rc && type != LDLM_FLOCK) /* bug 9425, bug 10250 */
+ if (rc)
cleanup_phase = 1;
}
}
EXIT;
cleanup:
if (cleanup_phase == 1 && rc)
- failed_lock_cleanup(ns, lock, lockh, mode);
+ failed_lock_cleanup(ns, lock, mode);
/* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
LDLM_LOCK_PUT(lock);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
return rc;
}
+EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
/* PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
* a single page on the send/receive side. XXX: 512 should be changed
int avail;
avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
- avail /= sizeof(struct lustre_handle);
+ if (likely(avail >= 0))
+ avail /= (int)sizeof(struct lustre_handle);
+ else
+ avail = 0;
avail += LDLM_LOCKREQ_HANDLES - off;
return avail;
* @count locks in @cancels. */
int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
int version, int opc, int canceloff,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct req_capsule *pill = &req->rq_pill;
struct ldlm_request *dlm = NULL;
- int flags, avail, to_free, bufcount, pack = 0;
+ int flags, avail, to_free, pack = 0;
CFS_LIST_HEAD(head);
int rc;
ENTRY;
cancels = &head;
if (exp_connect_cancelset(exp)) {
/* Estimate the amount of available space in the request. */
- bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT);
+ req_capsule_filled_sizes(pill, RCL_CLIENT);
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
- flags = ns_connect_lru_resize(ns) ?
+ flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
- /* Cancel lru locks here _only_ if the server supports
+ /* Cancel lru locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
* rpc, what will make us slower. */
if (avail > count)
}
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_prep_elc_req);
int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
- struct list_head *cancels, int count)
+ cfs_list_t *cancels, int count)
{
return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
}
+EXPORT_SYMBOL(ldlm_prep_enqueue_req);
/* If a request has some specific initialisation it is passed in @reqp,
* otherwise it is created in ldlm_cli_enqueue.
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy, int *flags,
- void *lvb, __u32 lvb_len, void *lvb_swabber,
- struct lustre_handle *lockh, int async)
+ ldlm_policy_data_t const *policy, int *flags,
+ void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
+ int async)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct ldlm_lock *lock;
/* If we're replaying this lock, just check some invariants.
* If we're creating a new lock, get everything all setup nice. */
if (is_replay) {
- lock = ldlm_handle2lock(lockh);
+ lock = ldlm_handle2lock_long(lockh, 0);
LASSERT(lock != NULL);
LDLM_DEBUG(lock, "client-side enqueue START");
LASSERT(exp == lock->l_conn_export);
} else {
+ const struct ldlm_callback_suite cbs = {
+ .lcs_completion = einfo->ei_cb_cp,
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl,
+ .lcs_weigh = einfo->ei_cb_wg
+ };
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, einfo->ei_cb_bl,
- einfo->ei_cb_cp, einfo->ei_cb_gl,
- einfo->ei_cbdata, lvb_len);
+ einfo->ei_mode, &cbs, einfo->ei_cbdata,
+ lvb_len);
if (lock == NULL)
RETURN(-ENOMEM);
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
- lock->l_lvb_swabber = lvb_swabber;
if (policy != NULL) {
/* INODEBITS_INTEROP: If the server does not support
* inodebits, we will request a plain lock in the
LDLM_DEBUG(lock, "client-side enqueue START");
}
+ lock->l_conn_export = exp;
+ lock->l_export = NULL;
+ lock->l_blocking_ast = einfo->ei_cb_bl;
+ lock->l_flags |= (*flags & LDLM_FL_NO_LRU);
+
/* lock not sent to server yet */
if (reqp == NULL || *reqp == NULL) {
LUSTRE_DLM_VERSION,
LDLM_ENQUEUE);
if (req == NULL) {
- failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
- LDLM_LOCK_PUT(lock);
+ failed_lock_cleanup(ns, lock, einfo->ei_mode);
+ LDLM_LOCK_RELEASE(lock);
RETURN(-ENOMEM);
}
req_passed_in = 0;
DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
}
- lock->l_conn_export = exp;
- lock->l_export = NULL;
- lock->l_blocking_ast = einfo->ei_cb_bl;
-
/* Dump lock data into the request buffer */
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
}
LDLM_DEBUG(lock, "sending request");
+
rc = ptlrpc_queue_wait(req);
+
err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
einfo->ei_mode, flags, lvb, lvb_len,
- lvb_swabber, lockh, rc);
+ lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
* one reference that we took */
if (err == -ENOLCK)
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
else
rc = err;
RETURN(rc);
}
+EXPORT_SYMBOL(ldlm_cli_enqueue);
static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
__u32 *flags)
struct ldlm_resource *res;
int rc;
ENTRY;
- if (ns_is_client(lock->l_resource->lr_namespace)) {
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
CERROR("Trying to cancel local lock\n");
LBUG();
}
ptlrpc_req_finished(req);
return rc;
}
+EXPORT_SYMBOL(ldlm_cli_convert);
/* Cancel locks locally.
* Returns:
{
int rc = LDLM_FL_LOCAL_ONLY;
ENTRY;
-
+
if (lock->l_conn_export) {
int local_only;
}
ldlm_lock_cancel(lock);
} else {
- if (ns_is_client(lock->l_resource->lr_namespace)) {
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
LDLM_ERROR(lock, "Trying to cancel local lock");
LBUG();
}
LDLM_DEBUG(lock, "server-side local cancel");
ldlm_lock_cancel(lock);
ldlm_reprocess_all(lock->l_resource);
- LDLM_DEBUG(lock, "server-side local cancel handler END");
}
RETURN(rc);
/* Pack @count locks in @head into ldlm_request buffer at the offset @off,
of the request @req. */
static void ldlm_cancel_pack(struct ptlrpc_request *req,
- struct list_head *head, int count)
+ cfs_list_t *head, int count)
{
struct ldlm_request *dlm;
struct ldlm_lock *lock;
LASSERT(dlm != NULL);
/* Check the room in the request buffer. */
- max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
+ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
sizeof(struct ldlm_request);
max /= sizeof(struct lustre_handle);
max += LDLM_LOCKREQ_HANDLES;
/* XXX: it would be better to pack lock handles grouped by resource.
* so that the server cancel would call filter_lvbo_update() less
* frequently. */
- list_for_each_entry(lock, head, l_bl_ast) {
+ cfs_list_for_each_entry(lock, head, l_bl_ast) {
if (!count--)
break;
LASSERT(lock->l_conn_export);
/* Prepare and send a batched cancel rpc, it will include count lock handles
* of locks given in @head. */
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
- int count, int flags)
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *cancels,
+ int count, ldlm_cancel_flags_t flags)
{
struct ptlrpc_request *req = NULL;
struct obd_import *imp;
LASSERT(exp != NULL);
LASSERT(count > 0);
- OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, obd_fail_val);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
+ if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
RETURN(count);
free = ldlm_format_handles_avail(class_exp2cliimp(exp),
count = free;
while (1) {
- int bufcount;
-
imp = class_exp2cliimp(exp);
if (imp == NULL || imp->imp_invalid) {
CDEBUG(D_DLMTRACE,
if (req == NULL)
GOTO(out, rc = -ENOMEM);
- bufcount = req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
+ req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
ldlm_request_bufsize(count, LDLM_CANCEL));
ldlm_cancel_pack(req, cancels, count);
ptlrpc_request_set_replen(req);
- if (flags & LDLM_FL_ASYNC) {
- ptlrpcd_add_req(req);
+ if (flags & LCF_ASYNC) {
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
sent = count;
GOTO(out, 0);
} else {
out:
return sent ? sent : rc;
}
+EXPORT_SYMBOL(ldlm_cli_cancel_req);
static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
{
int ldlm_cli_update_pool(struct ptlrpc_request *req)
{
struct obd_device *obd;
- __u64 old_slv, new_slv;
+ __u64 new_slv;
__u32 new_limit;
ENTRY;
-
- if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
+ if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
!imp_connect_lru_resize(req->rq_import)))
{
- /*
- * Do nothing for corner cases.
+ /*
+ * Do nothing for corner cases.
*/
RETURN(0);
}
- /*
- * In some cases RPC may contain slv and limit zeroed out. This is
+ /*
+ * In some cases RPC may contain slv and limit zeroed out. This is
* the case when server does not support lru resize feature. This is
* also possible in some recovery cases when server side reqs have no
- * ref to obd export and thus access to server side namespace is no
- * possible.
+ * ref to obd export and thus access to server side namespace is no
+ * possible.
*/
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
+ if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
- "(SLV: "LPU64", Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
+ "(SLV: "LPU64", Limit: %u)",
+ lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
RETURN(0);
}
new_slv = lustre_msg_get_slv(req->rq_repmsg);
obd = req->rq_import->imp_obd;
- /*
- * Set new SLV and Limit to obd fields to make accessible for pool
+ /*
+ * Set new SLV and Limit to obd fields to make accessible for pool
* thread. We do not access obd_namespace and pool directly here
* as there is no reliable way to make sure that they are still
* alive in cleanup time. Evil races are possible which may cause
- * oops in that time.
+ * oops in that time.
*/
- write_lock(&obd->obd_pool_lock);
- old_slv = obd->obd_pool_slv;
+ cfs_write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = new_slv;
obd->obd_pool_limit = new_limit;
- write_unlock(&obd->obd_pool_lock);
+ cfs_write_unlock(&obd->obd_pool_lock);
- /*
- * Check if we need to wakeup pools thread for fast SLV change.
- * This is only done when threads period is noticably long like
- * 10s or more.
- */
-#if defined(__KERNEL__) && (LDLM_POOLS_THREAD_PERIOD >= 10)
- if (old_slv > 0) {
- __u64 fast_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;
- do_div(fast_change, 100);
-
- /*
- * Wake up pools thread only if SLV has changed more than
- * 50% since last update. In this case we want to react asap.
- * Otherwise it is no sense to wake up pools as they are
- * re-calculated every LDLM_POOLS_THREAD_PERIOD anyways.
- */
- if (old_slv > new_slv && old_slv - new_slv > fast_change)
- ldlm_pools_wakeup();
- }
-#endif
RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_update_pool);
ENTRY;
/* concurrent cancels on the same handle can happen */
- lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
+ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
if (lock == NULL) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
RETURN(0);
rc = ldlm_cli_cancel_local(lock);
if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
RETURN(rc < 0 ? rc : 0);
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* rpc which goes to canceld portal, so we can cancel other lru locks
* here and send them all as one LDLM_CANCEL rpc. */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, &cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, &cancels);
exp = lock->l_conn_export;
if (exp_connect_cancelset(exp)) {
RCL_CLIENT, 0);
LASSERT(avail > 0);
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LDLM_FL_BL_AST, flags);
+ LCF_BL_AST, flags);
}
ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_cli_cancel);
/* XXX until we will have compound requests and can cut cancels from generic rpc
* we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-static int ldlm_cancel_list(struct list_head *cancels, int count, int flags)
+int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t flags)
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
int left = 0, bl_ast = 0, rc;
left = count;
- list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+ cfs_list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
if (left-- == 0)
break;
- if (flags & LDLM_FL_LOCAL_ONLY) {
+ if (flags & LCF_LOCAL) {
rc = LDLM_FL_LOCAL_ONLY;
ldlm_lock_cancel(lock);
} else {
rc = ldlm_cli_cancel_local(lock);
}
- if (!(flags & LDLM_FL_BL_AST) && (rc == LDLM_FL_BL_AST)) {
+ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
- list_del_init(&lock->l_bl_ast);
- list_add(&lock->l_bl_ast, &head);
+ cfs_list_del_init(&lock->l_bl_ast);
+ cfs_list_add(&lock->l_bl_ast, &head);
bl_ast ++;
continue;
}
if (rc == LDLM_FL_LOCAL_ONLY) {
/* CANCEL RPC should not be sent to server. */
- list_del_init(&lock->l_bl_ast);
- LDLM_LOCK_PUT(lock);
+ cfs_list_del_init(&lock->l_bl_ast);
+ LDLM_LOCK_RELEASE(lock);
count--;
}
RETURN(count);
}
+EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
-/**
- * Callback function for shrink policy. Makes decision whether to keep
- * \a lock in LRU for current \a LRU size \a unused, added in current scan
- * \a added and number of locks to be preferably canceled \a count.
- *
- * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
- *
- * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+/**
+ * Cancel as many locks as possible w/o sending any rpcs (e.g. to write back
+ * dirty data, to close a file, ...) or waiting for any rpcs in-flight (e.g.
+ * readahead requests, ...)
*/
-static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- int lock_cost;
- __u64 page_nr;
-
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
- */
- if (count && added >= count)
- return LDLM_POLICY_KEEP_LOCK;
-
- if (lock->l_resource->lr_type == LDLM_EXTENT) {
- struct ldlm_extent *l_extent;
+ ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
+ ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+ lock_res_and_lock(lock);
- /*
- * For all extent locks cost is 1 + number of pages in
- * their extent.
- */
- l_extent = &lock->l_policy_data.l_extent;
- page_nr = (l_extent->end - l_extent->start);
- do_div(page_nr, CFS_PAGE_SIZE);
-
-#ifdef __KERNEL__
- /*
- * XXX: In fact this is evil hack, we can't access inode
- * here. For doing it right we need somehow to have number
- * of covered by lock. This should be fixed later when 10718
- * is landed.
- */
- if (lock->l_ast_data != NULL) {
- struct inode *inode = lock->l_ast_data;
- if (page_nr > inode->i_mapping->nrpages)
- page_nr = inode->i_mapping->nrpages;
- }
-#endif
- lock_cost = 1 + page_nr;
- } else {
- /*
- * For all locks which are not extent ones cost is 1
- */
- lock_cost = 1;
+ /* don't check added & count since we want to process all locks
+ * from unused list */
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (cb && cb(lock))
+ break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock->l_flags |= LDLM_FL_SKIPPED;
+ break;
}
- /*
- * Keep all expensive locks in lru for the memory pressure time
- * cancel policy. They anyways may be canceled by lru resize
- * pplicy if they have not small enough CLV.
- */
- return lock_cost > ns->ns_shrink_thumb ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ unlock_res_and_lock(lock);
+ RETURN(result);
}
/**
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
+ struct ldlm_lock *lock,
+ int unused, int added,
int count)
{
cfs_time_t cur = cfs_time_current();
__u64 slv, lvf, lv;
cfs_time_t la;
- /*
- * Stop lru processing when we reached passed @count or checked all
+ /*
+ * Stop lru processing when we reached passed @count or checked all
* locks in lru.
*/
if (count && added >= count)
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
+ la = cfs_duration_sec(cfs_time_sub(cur,
lock->l_last_used));
- /*
- * Stop when slv is not yet come from server or lv is smaller than
+ /*
+ * Stop when slv is not yet come from server or lv is smaller than
* it is.
*/
lv = lvf * la * unused;
-
- /*
- * Inform pool about current CLV to see it via proc.
+
+ /*
+ * Inform pool about current CLV to see it via proc.
*/
ldlm_pool_set_clv(pl, lv);
- return (slv == 1 || lv < slv) ?
+ return (slv == 0 || lv < slv) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing if young lock is found and we reached passed
- * @count.
+ /*
+ * Stop lru processing if young lock is found and we reached passed
+ * @count.
*/
- return ((added >= count) &&
+ return ((added >= count) &&
cfs_time_before(cfs_time_current(),
cfs_time_add(lock->l_last_used,
- ns->ns_max_age))) ?
+ ns->ns_max_age))) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
+typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+ struct ldlm_lock *, int,
int, int);
static ldlm_cancel_lru_policy_t
ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
{
+ if (flags & LDLM_CANCEL_NO_WAIT)
+ return ldlm_cancel_no_wait_policy;
+
if (ns_connect_lru_resize(ns)) {
if (flags & LDLM_CANCEL_SHRINK)
- return ldlm_cancel_shrink_policy;
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
else if (flags & LDLM_CANCEL_LRUR)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
}
-
+
return ldlm_cancel_default_policy;
}
-
+
/* - Free space in lru for @count new locks,
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
* memory pressre policy function;
*
* flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy".
+ *
+ * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
+ * (typically before replaying locks) w/o
+ * sending any rpcs or waiting for any
+ * outstanding rpc to complete.
*/
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, int cancel_flags, int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
+ int count, int max, int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
- int added = 0, unused;
+ int added = 0, unused, remained;
ENTRY;
- spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
+ remained = unused;
if (!ns_connect_lru_resize(ns))
count += unused - ns->ns_max_unused;
pf = ldlm_cancel_lru_policy(ns, flags);
LASSERT(pf != NULL);
-
- while (!list_empty(&ns->ns_unused_list)) {
+
+ while (!cfs_list_empty(&ns->ns_unused_list)) {
+ ldlm_policy_res_t result;
+
+ /* all unused locks */
+ if (remained-- <= 0)
+ break;
+
/* For any flags, stop scanning if @max is reached. */
if (max && added >= max)
break;
- list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+ cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+ l_lru){
/* No locks which got blocking requests. */
LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ if (flags & LDLM_CANCEL_NO_WAIT &&
+ lock->l_flags & LDLM_FL_SKIPPED)
+ /* already processed */
+ continue;
+
/* Somebody is already doing CANCEL. No need in this
* lock in lru, do not traverse it again. */
if (!(lock->l_flags & LDLM_FL_CANCELING))
if (&lock->l_lru == &ns->ns_unused_list)
break;
+ LDLM_LOCK_GET(lock);
+ cfs_spin_unlock(&ns->ns_lock);
+ lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+
/* Pass the lock through the policy filter and see if it
* should stay in lru.
*
* we find a lock that should stay in the cache.
* We should take into account lock age anyway
* as new lock even if it is small of weight is
- * valuable resource.
+ * valuable resource.
*
* That is, for shrinker policy we drop only
* old locks, but additionally chose them by
- * their weight. Big extent locks will stay in
+ * their weight. Big extent locks will stay in
* the cache. */
- if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
+ result = pf(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ cfs_spin_lock(&ns->ns_lock);
break;
-
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- spin_unlock(&ns->ns_unused_lock);
+ }
+ if (result == LDLM_POLICY_SKIP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ cfs_spin_lock(&ns->ns_lock);
+ continue;
+ }
lock_res_and_lock(lock);
/* Check flags again under the lock. */
* cancel by itseft or the lock is matched
* is already not unused. */
unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- spin_lock(&ns->ns_unused_lock);
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ cfs_spin_lock(&ns->ns_lock);
continue;
}
LASSERT(!lock->l_readers && !lock->l_writers);
/* If we have chosen to cancel this lock voluntarily, we
* better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
* silently cancelling this lock. */
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
/* We can't re-add to l_lru as it confuses the
* refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop ns_lock below. We use l_bl_ast
+ * arrives after we drop lr_lock below. We use l_bl_ast
* and can't use l_pending_chain as it is used both on
* server and client nevertheless bug 5666 says it is
* used only on server */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
- spin_lock(&ns->ns_unused_lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+ cfs_spin_lock(&ns->ns_lock);
added++;
unused--;
}
- spin_unlock(&ns->ns_unused_lock);
- RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
+ cfs_spin_unlock(&ns->ns_lock);
+ RETURN(added);
}
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
- int count, int max, int flags)
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
+ int count, int max, ldlm_cancel_flags_t cancel_flags,
+ int flags)
{
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock;
- int added = 0, unused;
- ENTRY;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
- spin_lock(&ns->ns_unused_lock);
- unused = ns->ns_nr_unused;
-
- list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
- /* For any flags, stop scanning if @max is reached. */
- if (max && added >= max)
- break;
-
- /* Somebody is already doing CANCEL or there is a
- * blocking request will send cancel. Let's not count
- * this lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST))
- continue;
-
- /* Pass the lock through the policy filter and see if it
- * should stay in lru. */
- if (pf(ns, lock, unused, added, count) == LDLM_POLICY_KEEP_LOCK)
- break;
-
- added++;
- unused--;
- }
- spin_unlock(&ns->ns_unused_lock);
- RETURN(added);
+ int added;
+ added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
+ if (added <= 0)
+ return added;
+ return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
}
/* when called with LDLM_ASYNC the blocking callback will be handled
* in a thread and this function will return after the thread has been
* asked to call the callback. when called with LDLM_SYNC the blocking
* callback will be performed in this function. */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode,
int flags)
{
CFS_LIST_HEAD(cancels);
ENTRY;
#ifndef __KERNEL__
- sync = LDLM_SYNC; /* force to be sync in user space */
+ mode = LDLM_SYNC; /* force to be sync in user space */
#endif
- count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags);
- if (sync == LDLM_ASYNC) {
- rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count);
- if (rc == 0)
- RETURN(count);
- }
+ /* Just prepare the list of locks, do not actually cancel them yet.
+ * Locks are cancelled later in a separate thread. */
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode);
+ if (rc == 0)
+ RETURN(count);
- /* If an error occured in ASYNC mode, or
- * this is SYNC mode, cancel the list. */
- ldlm_cli_cancel_list(&cancels, count, NULL, 0);
- RETURN(count);
+ RETURN(0);
}
/* Find and cancel locally unused locks found on resource, matched to the
* given policy, mode. GET the found locks and add them into the @cancels
* list. */
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- struct list_head *cancels,
+ cfs_list_t *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
- int cancel_flags, void *opaque)
+ ldlm_cancel_flags_t cancel_flags, void *opaque)
{
struct ldlm_lock *lock;
int count = 0;
ENTRY;
lock_res(res);
- list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+ cfs_list_for_each_entry(lock, &res->lr_granted, l_res_link) {
if (opaque != NULL && lock->l_ast_data != opaque) {
LDLM_ERROR(lock, "data %p doesn't match opaque %p",
lock->l_ast_data, opaque);
continue;
}
- if (lock->l_readers || lock->l_writers) {
- if (cancel_flags & LDLM_FL_WARN) {
- LDLM_ERROR(lock, "lock in use");
- //LBUG();
- }
+ if (lock->l_readers || lock->l_writers)
continue;
- }
/* If somebody is already doing CANCEL, or blocking ast came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
+ if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
lock_flags;
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
LDLM_LOCK_GET(lock);
count++;
}
unlock_res(res);
- RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
+ RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags));
}
+EXPORT_SYMBOL(ldlm_cancel_resource_local);
-/* If @req is NULL, send CANCEL request to server with handles of locks
- * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
+/* If @req is NULL, send CANCEL request to server with handles of locks
+ * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
* separately per lock.
- * If @req is not NULL, put handles of locks in @cancels into the request
+ * If @req is not NULL, put handles of locks in @cancels into the request
* buffer at the offset @off.
* Destroy @cancels at the end. */
-int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req, int flags)
+int ldlm_cli_cancel_list(cfs_list_t *cancels, int count,
+ struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
{
struct ldlm_lock *lock;
int res = 0;
ENTRY;
- if (list_empty(cancels) || count == 0)
+ if (cfs_list_empty(cancels) || count == 0)
RETURN(0);
-
- /* XXX: requests (both batched and not) could be sent in parallel.
+
+ /* XXX: requests (both batched and not) could be sent in parallel.
* Usually it is enough to have just 1 RPC, but it is possible that
* there are to many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
* the feature. */
while (count > 0) {
- LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
+ LASSERT(!cfs_list_empty(cancels));
+ lock = cfs_list_entry(cancels->next, struct ldlm_lock,
+ l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
LASSERT(count == 0);
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_cli_cancel_list);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int flags, void *opaque)
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
struct ldlm_resource *res;
CFS_LIST_HEAD(cancels);
RETURN(0);
}
+ LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
- 0, flags, opaque);
+ 0, flags | LCF_BL_AST, opaque);
rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
RETURN(0);
}
+EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
-static inline int have_no_nsresource(struct ldlm_namespace *ns)
-{
- int no_resource = 0;
-
- spin_lock(&ns->ns_hash_lock);
- if (ns->ns_resources == 0)
- no_resource = 1;
- spin_unlock(&ns->ns_hash_lock);
+struct ldlm_cli_cancel_arg {
+ int lc_flags;
+ void *lc_opaque;
+};
- RETURN(no_resource);
+static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ cfs_hlist_node_t *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ struct ldlm_cli_cancel_arg *lc = arg;
+ int rc;
+
+ rc = ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
+ NULL, LCK_MINMODE,
+ lc->lc_flags, lc->lc_opaque);
+ if (rc != 0) {
+ CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
+ res->lr_name.name[0], rc);
+ }
+ /* must return 0 for hash iteration */
+ return 0;
}
/* Cancel all locks on a namespace (or a specific resource, if given)
* that have 0 readers/writers.
*
- * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
+ * If flags & LCF_LOCAL, throw the locks away without trying
* to notify the server. */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- int flags, void *opaque)
+ ldlm_cancel_flags_t flags, void *opaque)
{
- int i;
+ struct ldlm_cli_cancel_arg arg = {
+ .lc_flags = flags,
+ .lc_opaque = opaque,
+ };
+
ENTRY;
if (ns == NULL)
RETURN(ELDLM_OK);
- if (res_id)
+ if (res_id != NULL) {
RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
LCK_MINMODE, flags,
opaque));
-
- spin_lock(&ns->ns_hash_lock);
- for (i = 0; i < RES_HASH_SIZE; i++) {
- struct list_head *tmp;
- tmp = ns->ns_hash[i].next;
- while (tmp != &(ns->ns_hash[i])) {
- struct ldlm_resource *res;
- int rc;
-
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
- ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
-
- rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
- NULL, LCK_MINMODE,
- flags, opaque);
-
- if (rc)
- CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
- res->lr_name.name[0], rc);
-
- spin_lock(&ns->ns_hash_lock);
- tmp = tmp->next;
- ldlm_resource_putref_locked(res);
- }
- }
- spin_unlock(&ns->ns_hash_lock);
-
- RETURN(ELDLM_OK);
-}
-
-/* join/split resource locks to/from lru list */
-int ldlm_cli_join_lru(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id, int join)
-{
- struct ldlm_resource *res;
- struct ldlm_lock *lock, *n;
- int count = 0;
- ENTRY;
-
- LASSERT(ns_is_client(ns));
-
- res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
- if (res == NULL)
- RETURN(count);
- LASSERT(res->lr_type == LDLM_EXTENT);
-
- lock_res(res);
- if (!join)
- goto split;
-
- list_for_each_entry_safe (lock, n, &res->lr_granted, l_res_link) {
- if (list_empty(&lock->l_lru) &&
- !lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_LOCAL) &&
- !(lock->l_flags & LDLM_FL_CBPENDING) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
- ldlm_lock_add_to_lru(lock);
- lock->l_flags &= ~LDLM_FL_NO_LRU;
- LDLM_DEBUG(lock, "join lock to lru");
- count++;
- }
- }
- goto unlock;
-split:
- spin_lock(&ns->ns_unused_lock);
- list_for_each_entry_safe (lock, n, &ns->ns_unused_list, l_lru) {
- if (lock->l_resource == res) {
- ldlm_lock_remove_from_lru_nolock(lock);
- lock->l_flags |= LDLM_FL_NO_LRU;
- LDLM_DEBUG(lock, "split lock from lru");
- count++;
- }
+ } else {
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_cli_hash_cancel_unused, &arg);
+ RETURN(ELDLM_OK);
}
- spin_unlock(&ns->ns_unused_lock);
-unlock:
- unlock_res(res);
- ldlm_resource_putref(res);
- RETURN(count);
}
+EXPORT_SYMBOL(ldlm_cli_cancel_unused);
/* Lock iterators. */
int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
void *closure)
{
- struct list_head *tmp, *next;
+ cfs_list_t *tmp, *next;
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
RETURN(LDLM_ITER_CONTINUE);
lock_res(res);
- list_for_each_safe(tmp, next, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- list_for_each_safe(tmp, next, &res->lr_converting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_converting) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
}
- list_for_each_safe(tmp, next, &res->lr_waiting) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each_safe(tmp, next, &res->lr_waiting) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
if (iter(lock, closure) == LDLM_ITER_STOP)
GOTO(out, rc = LDLM_ITER_STOP);
unlock_res(res);
RETURN(rc);
}
+EXPORT_SYMBOL(ldlm_resource_foreach);
struct iter_helper_data {
ldlm_iterator_t iter;
return helper->iter(lock, helper->closure);
}
-static int ldlm_res_iter_helper(struct ldlm_resource *res, void *closure)
+static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ cfs_hlist_node_t *hnode, void *arg)
+
{
- return ldlm_resource_foreach(res, ldlm_iter_helper, closure);
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+
+ return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
+ LDLM_ITER_STOP;
}
-int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
- void *closure)
+void ldlm_namespace_foreach(struct ldlm_namespace *ns,
+ ldlm_iterator_t iter, void *closure)
+
{
struct iter_helper_data helper = { iter: iter, closure: closure };
- return ldlm_namespace_foreach_res(ns, ldlm_res_iter_helper, &helper);
-}
-int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
- ldlm_res_iterator_t iter, void *closure)
-{
- int i, rc = LDLM_ITER_CONTINUE;
- struct ldlm_resource *res;
- struct list_head *tmp;
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_res_iter_helper, &helper);
- ENTRY;
- spin_lock(&ns->ns_hash_lock);
- for (i = 0; i < RES_HASH_SIZE; i++) {
- tmp = ns->ns_hash[i].next;
- while (tmp != &(ns->ns_hash[i])) {
- res = list_entry(tmp, struct ldlm_resource, lr_hash);
- ldlm_resource_getref(res);
- spin_unlock(&ns->ns_hash_lock);
-
- rc = iter(res, closure);
-
- spin_lock(&ns->ns_hash_lock);
- tmp = tmp->next;
- ldlm_resource_putref_locked(res);
- if (rc == LDLM_ITER_STOP)
- GOTO(out, rc);
- }
- }
- out:
- spin_unlock(&ns->ns_hash_lock);
- RETURN(rc);
}
+EXPORT_SYMBOL(ldlm_namespace_foreach);
-/* non-blocking function to manipulate a lock whose cb_data is being put away.*/
-void ldlm_resource_iterate(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_iterator_t iter, void *data)
+/* non-blocking function to manipulate a lock whose cb_data is being put away.
+ * return 0: find no resource
+ * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
+ * < 0: errors
+ */
+int ldlm_resource_iterate(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_iterator_t iter, void *data)
{
struct ldlm_resource *res;
+ int rc;
ENTRY;
if (ns == NULL) {
}
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL) {
- EXIT;
- return;
- }
+ if (res == NULL)
+ RETURN(0);
- ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_ADDREF(res);
+ rc = ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
- EXIT;
+ RETURN(rc);
}
+EXPORT_SYMBOL(ldlm_resource_iterate);
/* Lock replay */
static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
{
- struct list_head *list = closure;
+ cfs_list_t *list = closure;
/* we use l_pending_chain here, because it's unused on clients. */
- LASSERTF(list_empty(&lock->l_pending_chain),"lock %p next %p prev %p\n",
+ LASSERTF(cfs_list_empty(&lock->l_pending_chain),
+ "lock %p next %p prev %p\n",
lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
- /* bug 9573: don't replay locks left after eviction */
- if (!(lock->l_flags & LDLM_FL_FAILED))
- list_add(&lock->l_pending_chain, list);
+ /* bug 9573: don't replay locks left after eviction, or
+ * bug 17614: locks being actively cancelled. Get a reference
+ * on a lock so that it does not disapear under us (e.g. due to cancel)
+ */
+ if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
+ cfs_list_add(&lock->l_pending_chain, list);
+ LDLM_LOCK_GET(lock);
+ }
+
return LDLM_ITER_CONTINUE;
}
-static int replay_lock_interpret(struct ptlrpc_request *req,
+static int replay_lock_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct ldlm_async_args *aa, int rc)
{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ struct obd_export *exp;
ENTRY;
- atomic_dec(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
if (rc != ELDLM_OK)
GOTO(out, rc);
GOTO(out, rc = -ESTALE);
}
- lock->l_remote_handle = reply->lock_handle;
+ /* Key change rehash lock in per-export hash with new key */
+ exp = req->rq_export;
+ if (exp && exp->exp_lock_hash) {
+ /* In the function below, .hs_keycmp resolves to
+ * ldlm_export_lock_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ cfs_hash_rehash_key(exp->exp_lock_hash,
+ &lock->l_remote_handle,
+ &reply->lock_handle,
+ &lock->l_exp_hash);
+ } else {
+ lock->l_remote_handle = reply->lock_handle;
+ }
+
LDLM_DEBUG(lock, "replayed lock:");
ptlrpc_import_recovery_state_machine(req->rq_import);
LDLM_LOCK_PUT(lock);
out:
if (rc != ELDLM_OK)
- ptlrpc_connect_import(req->rq_import, NULL);
-
+ ptlrpc_connect_import(req->rq_import);
RETURN(rc);
}
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
else if (lock->l_granted_mode)
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
- else if (!list_empty(&lock->l_res_link))
+ else if (!cfs_list_empty(&lock->l_res_link))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
else
flags = LDLM_FL_REPLAY;
LDLM_DEBUG(lock, "replaying lock:");
- atomic_inc(&req->rq_import->imp_replay_inflight);
+ cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = replay_lock_interpret;
- ptlrpcd_add_req(req);
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
RETURN(0);
}
+/**
+ * Cancel as many unused locks as possible before replay. since we are
+ * in recovery, we can't wait for any outstanding RPCs to send any RPC
+ * to the server.
+ *
+ * Called only in recovery before replaying locks. there is no need to
+ * replay locks that are unused. since the clients may hold thousands of
+ * cached unused locks, dropping the unused locks can greatly reduce the
+ * load on the servers at recovery time.
+ */
+static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
+{
+ int canceled;
+ CFS_LIST_HEAD(cancels);
+
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
+ "replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
+
+ /* We don't need to care whether or not LRU resize is enabled
+ * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * count parameter */
+ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
+ LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+
+ CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
+ canceled, ldlm_ns_name(ns));
+}
+
int ldlm_replay_locks(struct obd_import *imp)
{
struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
ENTRY;
- LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+ LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+
+ /* don't replay locks if import failed recovery */
+ if (imp->imp_vbr_failed)
+ RETURN(0);
/* ensure this doesn't fall to 0 before all have been queued */
- atomic_inc(&imp->imp_replay_inflight);
+ cfs_atomic_inc(&imp->imp_replay_inflight);
+
+ if (ldlm_cancel_unused_locks_before_replay)
+ ldlm_cancel_unused_locks_for_replay(ns);
- (void)ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
+ ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
- list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- list_del_init(&lock->l_pending_chain);
- if (rc)
+ cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ cfs_list_del_init(&lock->l_pending_chain);
+ if (rc) {
+ LDLM_LOCK_RELEASE(lock);
continue; /* or try to do the rest? */
+ }
rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_RELEASE(lock);
}
- atomic_dec(&imp->imp_replay_inflight);
+ cfs_atomic_dec(&imp->imp_replay_inflight);
RETURN(rc);
}
+EXPORT_SYMBOL(ldlm_replay_locks);