/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
+ * GPL HEADER START
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LDLM
#include "ldlm_internal.h"
+int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
+CFS_MODULE_PARM(ldlm_enqueue_min, "i", int, 0644,
+ "lock enqueue timeout minimum");
+
static void interrupted_completion_wait(void *data)
{
}
if (ptlrpc_check_suspend())
RETURN(0);
- LDLM_ERROR(lock, "lock timed out (enqueued at %lu, %lus ago); "
- "not entering recovery in server code, just going "
- "back to sleep", lock->l_enqueued_time.tv_sec,
- CURRENT_SECONDS - lock->l_enqueued_time.tv_sec);
+ LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago); not entering recovery in "
+ "server code, just going back to sleep",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
next_dump = cfs_time_shift(300);
obd = lock->l_conn_export->exp_obd;
imp = obd->u.cli.cl_import;
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
- LDLM_ERROR(lock, "lock timed out (enqueued at %lu, %lus ago), entering "
- "recovery for %s@%s", lock->l_enqueued_time.tv_sec,
- CURRENT_SECONDS - lock->l_enqueued_time.tv_sec,
- obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
+ LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago), entering recovery for %s@%s",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
+ obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
RETURN(0);
}
-static int is_granted_or_cancelled(struct ldlm_lock *lock)
+/* We use the same basis for both server side and client side functions
+ from a single node. */
+int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
- int ret = 0;
+ int timeout = at_get(&lock->l_resource->lr_namespace->ns_at_estimate);
+ if (AT_OFF)
+ return obd_timeout / 2;
+ /* Since these are non-updating timeouts, we should be conservative.
+ It would be nice to have some kind of "early reply" mechanism for
+ lock callbacks too... */
+ timeout = timeout + (timeout >> 1); /* 150% */
+ return max(timeout, ldlm_enqueue_min);
+}
+EXPORT_SYMBOL(ldlm_get_enq_timeout);
- lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & LDLM_FL_FAILED))
- ret = 1;
- unlock_res_and_lock(lock);
+/**
+ * Helper function for ldlm_completion_ast(), updating timings when lock is
+ * actually granted.
+ */
+static int ldlm_completion_tail(struct ldlm_lock *lock)
+{
+ long delay;
+ int result;
+
+ if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue: destroyed");
+ result = -EIO;
+ } else {
+ delay = cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity);
+ LDLM_DEBUG(lock, "client-side enqueue: granted after "
+ CFS_DURATION_T"s", delay);
+
+ /* Update our time estimate */
+ at_add(&lock->l_resource->lr_namespace->ns_at_estimate, delay);
+ result = 0;
+ }
+ return result;
+}
- return ret;
+/**
+ * Implementation of ->l_completion_ast() for a client, that doesn't wait
+ * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
+ * other threads that cannot block for long.
+ */
+int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data)
+{
+ ENTRY;
+
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ RETURN(0);
+ }
+
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ cfs_waitq_signal(&lock->l_waitq);
+ RETURN(ldlm_completion_tail(lock));
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "going forward");
+ ldlm_lock_dump(D_OTHER, lock, 0);
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
}
+/**
+ * Client side LDLM "completion" AST. This is called in several cases:
+ *
+ * - when a reply to an ENQUEUE rpc is received from the server
+ * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
+ * this point (determined by flags);
+ *
+ * - when LDLM_CP_CALLBACK rpc comes to client to notify it that lock has
+ * been granted;
+ *
+ * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
+ * gets correct lvb;
+ *
+ * - to force all locks when resource is destroyed (cleanup_resource());
+ *
+ * - during lock conversion (not used currently).
+ *
+ * If lock is not granted in the first case, this function waits until second
+ * or penultimate cases happen in some other thread.
+ *
+ */
int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data)
{
/* XXX ALLOCATE - 160 bytes */
struct obd_device *obd;
struct obd_import *imp = NULL;
struct l_wait_info lwi;
+ __u32 timeout;
int rc = 0;
ENTRY;
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"sleeping");
ldlm_lock_dump(D_OTHER, lock, 0);
- ldlm_reprocess_all(lock->l_resource);
noreproc:
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, then there is no import */
- if (obd != NULL)
+ if (obd != NULL) {
imp = obd->u.cli.cl_import;
+ }
+
+ /* Wait a long time for enqueue - server may have to callback a
+ lock from another client. Server will evict the other client if it
+ doesn't respond reasonably, and then give us the lock. */
+ timeout = ldlm_get_enq_timeout(lock) * 2;
lwd.lwd_lock = lock;
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
- lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
ldlm_expired_completion_wait,
interrupted_completion_wait, &lwd);
}
spin_unlock(&imp->imp_lock);
}
- /* Go to sleep until the lock is granted or cancelled. */
- rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
-
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- RETURN(-EIO);
+ if (ns_is_client(lock->l_resource->lr_namespace) &&
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
+ OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ rc = -EINTR;
+ } else {
+ /* Go to sleep until the lock is granted or cancelled. */
+ rc = l_wait_event(lock->l_waitq,
+ is_granted_or_cancelled(lock), &lwi);
}
if (rc) {
RETURN(rc);
}
- LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
- RETURN(0);
+ RETURN(ldlm_completion_tail(lock));
}
-/*
- * ->l_blocking_ast() callback for LDLM locks acquired by server-side OBDs.
+/**
+ * A helper to build a blocking ast function
+ *
+ * Perform a common operation for blocking asts:
+ * defferred lock cancellation.
+ *
+ * \param lock the lock blocking or canceling ast was called on
+ * \retval 0
+ * \see mdt_blocking_ast
+ * \see ldlm_blocking_ast
*/
-int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, int flag)
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
{
int do_ast;
ENTRY;
- if (flag == LDLM_CB_CANCELING) {
- /* Don't need to do anything here. */
- RETURN(0);
- }
-
- lock_res_and_lock(lock);
- /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
- * that ldlm_blocking_ast is called just before intent_policy method
- * takes the ns_lock, then by the time we get the lock, we might not
- * be the correct blocking function anymore. So check, and return
- * early, if so. */
- if (lock->l_blocking_ast != ldlm_blocking_ast) {
- unlock_res_and_lock(lock);
- RETURN(0);
- }
-
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
RETURN(0);
}
+/**
+ * Server blocking AST
+ *
+ * ->l_blocking_ast() callback for LDLM locks acquired by server-side
+ * OBDs.
+ *
+ * \param lock the lock which blocks a request or cancelling lock
+ * \param desc unused
+ * \param data unused
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0
+ * \see ldlm_blocking_ast_nocheck
+ */
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ ENTRY;
+
+ if (flag == LDLM_CB_CANCELING) {
+ /* Don't need to do anything here. */
+ RETURN(0);
+ }
+
+ lock_res_and_lock(lock);
+ /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
+ * that ldlm_blocking_ast is called just before intent_policy method
+ * takes the ns_lock, then by the time we get the lock, we might not
+ * be the correct blocking function anymore. So check, and return
+ * early, if so. */
+ if (lock->l_blocking_ast != ldlm_blocking_ast) {
+ unlock_res_and_lock(lock);
+ RETURN(0);
+ }
+ RETURN(ldlm_blocking_ast_nocheck(lock));
+}
+
/*
* ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
* comment in filter_intent_policy() on why you may need this.
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
void *data, __u32 lvb_len, void *lvb_swabber,
+ const __u64 *client_cookie,
struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
int err;
+ const struct ldlm_callback_suite cbs = { .lcs_completion = completion,
+ .lcs_blocking = blocking,
+ .lcs_glimpse = glimpse,
+ };
ENTRY;
LASSERT(!(*flags & LDLM_FL_REPLAY));
LBUG();
}
- lock = ldlm_lock_create(ns, res_id, type, mode, blocking,
- completion, glimpse, data, lvb_len);
+ lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len);
if (unlikely(!lock))
GOTO(out_nolock, err = -ENOMEM);
LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
unlock_res_and_lock(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
+ if (client_cookie != NULL)
+ lock->l_client_cookie = *client_cookie;
if (type == LDLM_EXTENT)
lock->l_req_extent = policy->l_extent;
LDLM_DEBUG(lock, "client-side local enqueue END");
EXIT;
out:
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
out_nolock:
return err;
}
struct ldlm_lock *lock,
struct lustre_handle *lockh, int mode)
{
+ int need_cancel = 0;
+
/* Set a flag to prevent us from sending a CANCEL (bug 407) */
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ /* Check that lock is not granted or failed, we might race. */
+ if ((lock->l_req_mode != lock->l_granted_mode) &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * bug 17645 */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB;
+ need_cancel = 1;
+ }
unlock_res_and_lock(lock);
- LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- ldlm_lock_decref_and_cancel(lockh, mode);
+ if (need_cancel) {
+ LDLM_DEBUG(lock,
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+ "LDLM_FL_ATOMIC_CB");
+ ldlm_lock_decref_and_cancel(lockh, mode);
+ } else {
+ LDLM_DEBUG(lock, "lock was granted or failed in race");
+ ldlm_lock_decref(lockh, mode);
+ }
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock(). */
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
+ struct lustre_handle old_hash_key;
struct ldlm_lock *lock;
struct ldlm_reply *reply;
int cleanup_phase = 1;
rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
if (rc == ELDLM_LOCK_ABORTED) {
/* Before we return, swab the reply */
- reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF,
- sizeof(*reply),
- lustre_swab_ldlm_reply);
- if (reply == NULL) {
- CERROR("Can't unpack ldlm_reply\n");
+ reply = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_REP);
+ if (reply == NULL)
rc = -EPROTO;
- }
if (lvb_len) {
- void *tmplvb;
- tmplvb = lustre_swab_repbuf(req,
- DLM_REPLY_REC_OFF,
- lvb_len,
- lvb_swabber);
+ struct ost_lvb *tmplvb;
+
+ req_capsule_set_size(&req->rq_pill,
+ &RMF_DLM_LVB, RCL_SERVER,
+ lvb_len);
+ tmplvb = req_capsule_server_swab_get(&req->rq_pill,
+ &RMF_DLM_LVB,
+ lvb_swabber);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
if (lvb != NULL)
GOTO(cleanup, rc);
}
- reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
- lustre_swab_ldlm_reply);
- if (reply == NULL) {
- CERROR("Can't unpack ldlm_reply\n");
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
GOTO(cleanup, rc = -EPROTO);
- }
/* lock enqueued on the server */
cleanup_phase = 0;
lock_res_and_lock(lock);
+ old_hash_key = lock->l_remote_handle;
lock->l_remote_handle = reply->lock_handle;
+
+ /* Key change rehash lock in per-export hash with new key */
+ if (exp->exp_lock_hash)
+ lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
+
*flags = reply->lock_flags;
lock->l_flags |= reply->lock_flags & LDLM_INHERIT_FLAGS;
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
(long)lock->l_resource->lr_name.name[1],
(long)lock->l_resource->lr_name.name[2]);
- ldlm_lock_change_resource(ns, lock,
- &reply->lock_desc.l_resource.lr_name);
- if (lock->l_resource == NULL) {
- LBUG();
+ rc = ldlm_lock_change_resource(ns, lock,
+ &reply->lock_desc.l_resource.lr_name);
+ if (rc || lock->l_resource == NULL)
GOTO(cleanup, rc = -ENOMEM);
- }
LDLM_DEBUG(lock, "client-side enqueue, new resource");
}
if (with_policy)
* clobber the LVB with an older one. */
if (lvb_len && (lock->l_req_mode != lock->l_granted_mode)) {
void *tmplvb;
- tmplvb = lustre_swab_repbuf(req, DLM_REPLY_REC_OFF, lvb_len,
- lvb_swabber);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ lvb_len);
+ tmplvb = req_capsule_server_swab_get(&req->rq_pill,
+ &RMF_DLM_LVB,
+ lvb_swabber);
if (tmplvb == NULL)
GOTO(cleanup, rc = -EPROTO);
memcpy(lock->l_lvb_data, tmplvb, lvb_len);
failed_lock_cleanup(ns, lock, lockh, mode);
/* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
LDLM_LOCK_PUT(lock);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
return rc;
}
/* PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
* a single page on the send/receive side. XXX: 512 should be changed
* to more adequate value. */
-static inline int ldlm_req_handles_avail(struct obd_export *exp,
- int *size, int bufcount, int off)
+static inline int ldlm_req_handles_avail(int req_size, int off)
{
- int avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512);
- int old_size = size[DLM_LOCKREQ_OFF];
+ int avail;
- size[DLM_LOCKREQ_OFF] = sizeof(struct ldlm_request);
- avail -= lustre_msg_size(class_exp2cliimp(exp)->imp_msg_magic,
- bufcount, size);
- avail /= sizeof(struct lustre_handle);
+ avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
+ if (likely(avail >= 0))
+ avail /= (int)sizeof(struct lustre_handle);
+ else
+ avail = 0;
avail += LDLM_LOCKREQ_HANDLES - off;
- size[DLM_LOCKREQ_OFF] = old_size;
return avail;
}
-static inline int ldlm_cancel_handles_avail(struct obd_export *exp)
+static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
+ enum req_location loc,
+ int off)
{
- int size[2] = { sizeof(struct ptlrpc_body),
- sizeof(struct ldlm_request) };
- return ldlm_req_handles_avail(exp, size, 2, 0);
+ int size = req_capsule_msg_size(pill, loc);
+ return ldlm_req_handles_avail(size, off);
+}
+
+static inline int ldlm_format_handles_avail(struct obd_import *imp,
+ const struct req_format *fmt,
+ enum req_location loc, int off)
+{
+ int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+ return ldlm_req_handles_avail(size, off);
}
/* Cancel lru locks and pack them into the enqueue request. Pack there the given
* @count locks in @cancels. */
-struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
- int bufcount, int *size,
- struct list_head *cancels,
- int count)
+int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count)
{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_request *dlm = NULL;
- struct ptlrpc_request *req;
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct req_capsule *pill = &req->rq_pill;
+ struct ldlm_request *dlm = NULL;
+ int flags, avail, to_free, bufcount, pack = 0;
CFS_LIST_HEAD(head);
+ int rc;
ENTRY;
if (cancels == NULL)
cancels = &head;
if (exp_connect_cancelset(exp)) {
/* Estimate the amount of available space in the request. */
- int avail = ldlm_req_handles_avail(exp, size, bufcount,
- LDLM_ENQUEUE_CANCEL_OFF);
- int flags, cancel;
+ bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT);
+ avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
- LASSERT(avail >= count);
-
- flags = ns_connect_lru_resize(ns) ?
+ flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
- cancel = ns_connect_lru_resize(ns) ? 0 : 1;
+ to_free = !ns_connect_lru_resize(ns) &&
+ opc == LDLM_ENQUEUE ? 1 : 0;
- /* Cancel lru locks here _only_ if the server supports
+ /* Cancel lru locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
- * rpc right on enqueue, what will make it slower, vs.
- * asynchronous rpc in blocking thread. */
- count += ldlm_cancel_lru_local(ns, cancels, cancel,
- avail - count, 0, flags);
- size[DLM_LOCKREQ_OFF] =
- ldlm_request_bufsize(count, LDLM_ENQUEUE);
- }
- req = ptlrpc_prep_req(class_exp2cliimp(exp), LUSTRE_DLM_VERSION,
- LDLM_ENQUEUE, bufcount, size, NULL);
- if (exp_connect_cancelset(exp) && req) {
- dlm = lustre_msg_buf(req->rq_reqmsg,
- DLM_LOCKREQ_OFF, sizeof(*dlm));
- /* Skip first lock handler in ldlm_request_pack(), this method
- * will incrment @lock_count according to the lock handle amount
- * actually written to the buffer. */
- dlm->lock_count = LDLM_ENQUEUE_CANCEL_OFF;
- ldlm_cli_cancel_list(cancels, count, req, DLM_LOCKREQ_OFF, 0);
+ * rpc, what will make us slower. */
+ if (avail > count)
+ count += ldlm_cancel_lru_local(ns, cancels, to_free,
+ avail - count, 0, flags);
+ if (avail > count)
+ pack = count;
+ else
+ pack = avail;
+ req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
+ ldlm_request_bufsize(pack, opc));
+ }
+
+ rc = ptlrpc_request_pack(req, version, opc);
+ if (rc) {
+ ldlm_lock_list_put(cancels, l_bl_ast, count);
+ RETURN(rc);
+ }
+
+ if (exp_connect_cancelset(exp)) {
+ if (canceloff) {
+ dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
+ LASSERT(dlm);
+ /* Skip first lock handler in ldlm_request_pack(),
+ * this method will incrment @lock_count according
+ * to the lock handle amount actually written to
+ * the buffer. */
+ dlm->lock_count = canceloff;
+ }
+ /* Pack into the request @pack lock handles. */
+ ldlm_cli_cancel_list(cancels, pack, req, 0);
+ /* Prepare and send separate cancel rpc for others. */
+ ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
} else {
ldlm_lock_list_put(cancels, l_bl_ast, count);
}
- RETURN(req);
+ RETURN(0);
+}
+
+int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
+ struct list_head *cancels, int count)
+{
+ return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
+ LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
}
/* If a request has some specific initialisation it is passed in @reqp,
struct lustre_handle *lockh, int async)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct ldlm_lock *lock;
- struct ldlm_request *body;
- struct ldlm_reply *reply;
- int size[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
- [DLM_LOCKREQ_OFF] = sizeof(*body),
- [DLM_REPLY_REC_OFF] = lvb_len };
- int is_replay = *flags & LDLM_FL_REPLAY;
- int req_passed_in = 1, rc, err;
+ struct ldlm_lock *lock;
+ struct ldlm_request *body;
+ int is_replay = *flags & LDLM_FL_REPLAY;
+ int req_passed_in = 1;
+ int rc, err;
struct ptlrpc_request *req;
ENTRY;
/* If we're replaying this lock, just check some invariants.
* If we're creating a new lock, get everything all setup nice. */
if (is_replay) {
- lock = ldlm_handle2lock(lockh);
+ lock = ldlm_handle2lock_long(lockh, 0);
LASSERT(lock != NULL);
LDLM_DEBUG(lock, "client-side enqueue START");
LASSERT(exp == lock->l_conn_export);
} else {
+ const struct ldlm_callback_suite cbs = {
+ .lcs_completion = einfo->ei_cb_cp,
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl,
+ .lcs_weigh = einfo->ei_cb_wg
+ };
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
- einfo->ei_mode, einfo->ei_cb_bl,
- einfo->ei_cb_cp, einfo->ei_cb_gl,
- einfo->ei_cbdata, lvb_len);
+ einfo->ei_mode, &cbs, einfo->ei_cbdata,
+ lvb_len);
if (lock == NULL)
RETURN(-ENOMEM);
/* for the local lock, add the reference */
/* lock not sent to server yet */
if (reqp == NULL || *reqp == NULL) {
- req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE,
+ LUSTRE_DLM_VERSION,
+ LDLM_ENQUEUE);
if (req == NULL) {
failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
RETURN(-ENOMEM);
}
req_passed_in = 0;
if (reqp)
*reqp = req;
} else {
+ int len;
+
req = *reqp;
- LASSERTF(lustre_msg_buflen(req->rq_reqmsg, DLM_LOCKREQ_OFF) >=
- sizeof(*body), "buflen[%d] = %d, not "LPSZ"\n",
- DLM_LOCKREQ_OFF,
- lustre_msg_buflen(req->rq_reqmsg, DLM_LOCKREQ_OFF),
- sizeof(*body));
+ len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
+ RCL_CLIENT);
+ LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
+ DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
}
lock->l_conn_export = exp;
lock->l_blocking_ast = einfo->ei_cb_bl;
/* Dump lock data into the request buffer */
- body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
body->lock_flags = *flags;
body->lock_handle[0] = *lockh;
/* Continue as normal. */
if (!req_passed_in) {
- size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
- ptlrpc_req_set_repsize(req, 2 + (lvb_len > 0), size);
+ if (lvb_len > 0) {
+ req_capsule_extend(&req->rq_pill,
+ &RQF_LDLM_ENQUEUE_LVB);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
+ RCL_SERVER, lvb_len);
+ }
+ ptlrpc_request_set_replen(req);
}
/*
}
LDLM_DEBUG(lock, "sending request");
+
rc = ptlrpc_queue_wait(req);
+
err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
einfo->ei_mode, flags, lvb, lvb_len,
lvb_swabber, lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
* one reference that we took */
if (err == -ENOLCK)
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
else
rc = err;
}
static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
- int *flags)
+ __u32 *flags)
{
struct ldlm_resource *res;
int rc;
* conversion of locks which are on the waiting or converting queue */
/* Caller of this code is supposed to take care of lock readers/writers
accounting */
-int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
+int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
{
- struct ldlm_request *body;
- struct ldlm_reply *reply;
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
+ struct ldlm_request *body;
+ struct ldlm_reply *reply;
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res;
struct ptlrpc_request *req;
- int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
- [DLM_LOCKREQ_OFF] = sizeof(*body) };
- int rc;
+ int rc;
ENTRY;
lock = ldlm_handle2lock(lockh);
LDLM_DEBUG(lock, "client-side convert");
- req = ptlrpc_prep_req(class_exp2cliimp(lock->l_conn_export),
- LUSTRE_DLM_VERSION, LDLM_CONVERT, 2, size, NULL);
- if (!req)
- GOTO(out, rc = -ENOMEM);
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(lock->l_conn_export),
+ &RQF_LDLM_CONVERT, LUSTRE_DLM_VERSION,
+ LDLM_CONVERT);
+ if (req == NULL) {
+ LDLM_LOCK_PUT(lock);
+ RETURN(-ENOMEM);
+ }
- body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
body->lock_desc.l_req_mode = new_mode;
body->lock_flags = *flags;
- size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
- ptlrpc_req_set_repsize(req, 2, size);
+ ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc != ELDLM_OK)
GOTO(out, rc);
- reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
- lustre_swab_ldlm_reply);
- if (reply == NULL) {
- CERROR ("Can't unpack ldlm_reply\n");
- GOTO (out, rc = -EPROTO);
- }
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
+ GOTO(out, rc = -EPROTO);
if (req->rq_status)
GOTO(out, rc = req->rq_status);
{
int rc = LDLM_FL_LOCAL_ONLY;
ENTRY;
-
+
if (lock->l_conn_export) {
int local_only;
/* Pack @count locks in @head into ldlm_request buffer at the offset @off,
of the request @req. */
-static void ldlm_cancel_pack(struct ptlrpc_request *req, int off,
+static void ldlm_cancel_pack(struct ptlrpc_request *req,
struct list_head *head, int count)
{
struct ldlm_request *dlm;
int max, packed = 0;
ENTRY;
- dlm = lustre_msg_buf(req->rq_reqmsg, off, sizeof(*dlm));
+ dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
LASSERT(dlm != NULL);
/* Check the room in the request buffer. */
- max = lustre_msg_buflen(req->rq_reqmsg, off) -
+ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
sizeof(struct ldlm_request);
max /= sizeof(struct lustre_handle);
max += LDLM_LOCKREQ_HANDLES;
int count, int flags)
{
struct ptlrpc_request *req = NULL;
- struct ldlm_request *body;
- int size[2] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
- [DLM_LOCKREQ_OFF] = sizeof(*body) };
struct obd_import *imp;
int free, sent = 0;
int rc = 0;
LASSERT(exp != NULL);
LASSERT(count > 0);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, obd_fail_val);
+
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
RETURN(count);
- free = ldlm_req_handles_avail(exp, size, 2, 0);
+ free = ldlm_format_handles_avail(class_exp2cliimp(exp),
+ &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
if (count > free)
count = free;
- size[DLM_LOCKREQ_OFF] = ldlm_request_bufsize(count, LDLM_CANCEL);
while (1) {
+ int bufcount;
+
imp = class_exp2cliimp(exp);
if (imp == NULL || imp->imp_invalid) {
CDEBUG(D_DLMTRACE,
RETURN(count);
}
- req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_CANCEL, 2,
- size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
+ if (req == NULL)
GOTO(out, rc = -ENOMEM);
+ bufcount = req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
+ ldlm_request_bufsize(count, LDLM_CANCEL));
+
+ rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(out, rc);
+ }
req->rq_no_resend = 1;
req->rq_no_delay = 1;
- /* XXX FIXME bug 249 */
req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
- body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF,
- sizeof(*body));
- ldlm_cancel_pack(req, DLM_LOCKREQ_OFF, cancels, count);
+ ldlm_cancel_pack(req, cancels, count);
- ptlrpc_req_set_repsize(req, 1, NULL);
+ ptlrpc_request_set_replen(req);
if (flags & LDLM_FL_ASYNC) {
- ptlrpcd_add_req(req);
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
sent = count;
GOTO(out, 0);
} else {
return &imp->imp_obd->obd_namespace->ns_pool;
}
+/**
+ * Update client's obd pool related fields with new SLV and Limit from \a req.
+ */
int ldlm_cli_update_pool(struct ptlrpc_request *req)
{
- struct ldlm_pool *pl;
+ struct obd_device *obd;
+ __u64 old_slv, new_slv;
+ __u32 new_limit;
ENTRY;
-
- if (!imp_connect_lru_resize(req->rq_import))
- RETURN(0);
-
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
- lustre_msg_get_limit(req->rq_repmsg) == 0)
- RETURN(0);
-
- pl = ldlm_imp2pl(req->rq_import);
-
- spin_lock(&pl->pl_lock);
-
- /* Check if we need to wakeup pools thread for fast SLV change.
- * This is only done when threads period is noticably long like
- * 10s or more. */
-#if defined(__KERNEL__) && (LDLM_POOLS_THREAD_PERIOD >= 10)
+ if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
+ !imp_connect_lru_resize(req->rq_import)))
{
- __u64 old_slv, new_slv, fast_change;
-
- old_slv = ldlm_pool_get_slv(pl);
- new_slv = lustre_msg_get_slv(req->rq_repmsg);
- fast_change = old_slv * LDLM_POOLS_FAST_SLV_CHANGE;
- do_div(fast_change, 100);
-
- /* Wake up pools thread only if SLV has changed more than
- * 50% since last update. In this case we want to react asap.
- * Otherwise it is no sense to wake up pools as they are
- * re-calculated every LDLM_POOLS_THREAD_PERIOD anyways. */
- if (old_slv > new_slv && old_slv - new_slv > fast_change)
- ldlm_pools_wakeup();
+ /*
+ * Do nothing for corner cases.
+ */
+ RETURN(0);
}
-#endif
- /* In some cases RPC may contain slv and limit zeroed out. This is
+
+ /*
+ * In some cases RPC may contain slv and limit zeroed out. This is
* the case when server does not support lru resize feature. This is
* also possible in some recovery cases when server side reqs have no
- * ref to obd export and thus access to server side namespace is no
- * possible. */
- if (lustre_msg_get_slv(req->rq_repmsg) != 0 &&
- lustre_msg_get_limit(req->rq_repmsg) != 0) {
- ldlm_pool_set_slv(pl, lustre_msg_get_slv(req->rq_repmsg));
- ldlm_pool_set_limit(pl, lustre_msg_get_limit(req->rq_repmsg));
- } else {
- DEBUG_REQ(D_HA, req, "zero SLV or Limit found "
- "(SLV: "LPU64", Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
+ * ref to obd export and thus access to server side namespace is no
+ * possible.
+ */
+ if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
+ lustre_msg_get_limit(req->rq_repmsg) == 0) {
+ DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
+ "(SLV: "LPU64", Limit: %u)",
+ lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
+ RETURN(0);
}
- spin_unlock(&pl->pl_lock);
+
+ new_limit = lustre_msg_get_limit(req->rq_repmsg);
+ new_slv = lustre_msg_get_slv(req->rq_repmsg);
+ obd = req->rq_import->imp_obd;
+
+ /*
+ * Set new SLV and Limit to obd fields to make accessible for pool
+ * thread. We do not access obd_namespace and pool directly here
+ * as there is no reliable way to make sure that they are still
+ * alive in cleanup time. Evil races are possible which may cause
+ * oops in that time.
+ */
+ write_lock(&obd->obd_pool_lock);
+ old_slv = obd->obd_pool_slv;
+ obd->obd_pool_slv = new_slv;
+ obd->obd_pool_limit = new_limit;
+ write_unlock(&obd->obd_pool_lock);
RETURN(0);
}
int ldlm_cli_cancel(struct lustre_handle *lockh)
{
+ struct obd_export *exp;
int avail, flags, count = 1, rc = 0;
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
ENTRY;
/* concurrent cancels on the same handle can happen */
- lock = __ldlm_handle2lock(lockh, LDLM_FL_CANCELING);
+ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
if (lock == NULL) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
RETURN(0);
rc = ldlm_cli_cancel_local(lock);
if (rc < 0 || rc == LDLM_FL_LOCAL_ONLY) {
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
RETURN(rc < 0 ? rc : 0);
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* here and send them all as one LDLM_CANCEL rpc. */
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, &cancels);
- avail = ldlm_cancel_handles_avail(lock->l_conn_export);
- LASSERT(avail > 0);
-
- ns = lock->l_resource->lr_namespace;
- flags = ns_connect_lru_resize(ns) ? LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
- count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - count,
- LDLM_FL_BL_AST, flags);
- ldlm_cli_cancel_list(&cancels, count, NULL, 0, 0);
+
+ exp = lock->l_conn_export;
+ if (exp_connect_cancelset(exp)) {
+ avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
+ &RQF_LDLM_CANCEL,
+ RCL_CLIENT, 0);
+ LASSERT(avail > 0);
+
+ ns = lock->l_resource->lr_namespace;
+ flags = ns_connect_lru_resize(ns) ?
+ LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
+ LDLM_FL_BL_AST, flags);
+ }
+ ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(0);
}
if (rc == LDLM_FL_LOCAL_ONLY) {
/* CANCEL RPC should not be sent to server. */
list_del_init(&lock->l_bl_ast);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
count--;
}
}
if (bl_ast > 0) {
count -= bl_ast;
- ldlm_cli_cancel_list(&head, bl_ast, NULL, 0, 0);
+ ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
}
RETURN(count);
}
-/* Return 1 if @lock should be canceled according to shrinker policy.
- * Return zero otherwise. */
-static int ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int asked)
-{
- int lock_cost;
- __u64 page_nr;
-
- if (lock->l_resource->lr_type == LDLM_EXTENT) {
- struct ldlm_extent *l_extent;
-
- /* For all extent locks cost is 1 + number of pages in
- * their extent. */
- l_extent = &lock->l_policy_data.l_extent;
- page_nr = (l_extent->end - l_extent->start);
- do_div(page_nr, CFS_PAGE_SIZE);
-
-#ifdef __KERNEL__
- /* XXX: In fact this is evil hack, we can't access inode
- * here. For doing it right we need somehow to have number
- * of covered by lock. This should be fixed later when 10718
- * is landed. */
- if (lock->l_ast_data != NULL) {
- struct inode *inode = lock->l_ast_data;
- if (page_nr > inode->i_mapping->nrpages)
- page_nr = inode->i_mapping->nrpages;
- }
-#endif
- lock_cost = 1 + page_nr;
- } else {
- /* For all locks which are not extent ones cost is 1 */
- lock_cost = 1;
- }
-
- /* Keep all expensive locks in lru for the memory pressure time
- * cancel policy. They anyways may be canceled by lru resize
- * pplicy if they have not small enough CLV. */
- return (lock_cost <= ns->ns_shrink_thumb);
-}
-
-/* Return 1 if @lock should be canceled according to lru resize policy.
- * Return zero otherwise. */
-static int ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int asked)
+/**
+ * Callback function for lru-resize policy. Makes decision whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current scan
+ * \a added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
cfs_time_t cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
__u64 slv, lvf, lv;
cfs_time_t la;
- spin_lock(&pl->pl_lock);
- slv = ldlm_pool_get_slv(pl);
- lvf = atomic_read(&pl->pl_lock_volume_factor);
- spin_unlock(&pl->pl_lock);
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
+ */
+ if (count && added >= count)
+ return LDLM_POLICY_KEEP_LOCK;
- la = cfs_duration_sec(cfs_time_sub(cur,
+ slv = ldlm_pool_get_slv(pl);
+ lvf = ldlm_pool_get_lvf(pl);
+ la = cfs_duration_sec(cfs_time_sub(cur,
lock->l_last_used));
- /* Stop when slv is not yet come from server or
- * lv is smaller than it is. */
+ /*
+ * Stop when slv is not yet come from server or lv is smaller than
+ * it is.
+ */
lv = lvf * la * unused;
- return (slv > 1 && lv >= slv);
+
+ /*
+ * Inform pool about current CLV to see it via proc.
+ */
+ ldlm_pool_set_clv(pl, lv);
+ return (slv == 1 || lv < slv) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+}
+
+/**
+ * Callback function for proc used policy. Makes decision whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current scan
+ * \a added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
+ */
+ return (added >= count) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-/* Return 1 if @lock should be canceled according to passed policy.
- * Return zero otherwise. */
-static int ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int asked)
+/**
+ * Callback function for aged policy. Makes decision whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current scan
+ * \a added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- /* Do nothing here, we allow canceling all locks which
- * are passed here from upper layer logic. So that locks
- * number to be canceled will be limited by @count and
- * @max in ldlm_cancel_lru_local(). */
- return 1;
+ /*
+ * Stop lru processing if young lock is found and we reached passed
+ * @count.
+ */
+ return ((added >= count) &&
+ cfs_time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used,
+ ns->ns_max_age))) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-/* Return 1 if @lock should be canceled according to aged policy.
- * Return zero otherwise. */
-static int ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int asked)
+/**
+ * Callback function for default policy. Makes decision whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current scan
+ * \a added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- /* Cancel old locks if reached asked limit. */
- return !((added >= asked) &&
- cfs_time_before_64(cfs_time_current(),
- cfs_time_add(lock->l_last_used,
- ns->ns_max_age)));
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
+ */
+ return (added >= count) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef int (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
- int, int);
+typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+ struct ldlm_lock *, int,
+ int, int);
static ldlm_cancel_lru_policy_t
ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
{
if (ns_connect_lru_resize(ns)) {
if (flags & LDLM_CANCEL_SHRINK)
- return ldlm_cancel_shrink_policy;
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
else if (flags & LDLM_CANCEL_LRUR)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
}
- return NULL;
+
+ return ldlm_cancel_default_policy;
}
-
+
/* - Free space in lru for @count new locks,
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
* the beginning of lru list);
*
* flags & LDLM_CANCEL_SHRINK - cancel not more than @count locks according to
- * memory pressre policy function.
+ * memory pressre policy function;
+ *
+ * flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy".
*/
int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
int count, int max, int cancel_flags, int flags)
{
- ldlm_cancel_lru_policy_t cancel_lru_policy_func;
- int added = 0, unused, cancel;
+ ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
+ int added = 0, unused;
ENTRY;
spin_lock(&ns->ns_unused_lock);
if (!ns_connect_lru_resize(ns))
count += unused - ns->ns_max_unused;
- cancel_lru_policy_func = ldlm_cancel_lru_policy(ns, flags);
-
- list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru) {
- /* Make sure that we skip locks being already in cancel. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST))
- continue;
+ pf = ldlm_cancel_lru_policy(ns, flags);
+ LASSERT(pf != NULL);
- /* For any flags, stop scanning if @max or passed @count is
- * reached. */
- if ((max && added >= max) || (count && added >= count))
+ while (!list_empty(&ns->ns_unused_list)) {
+ /* For any flags, stop scanning if @max is reached. */
+ if (max && added >= max)
break;
- /* Pass the lock through the policy filter and see if it
- * should stay in lru. */
- if (cancel_lru_policy_func != NULL) {
- cancel = cancel_lru_policy_func(ns, lock, unused,
- added, count);
-
- /* Take next lock for shrink policy, we need to check
- * whole list. Stop scanning for other policies. */
- if ((flags & LDLM_CANCEL_SHRINK) && !cancel)
- continue;
- else if (!cancel)
+ list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+ /* No locks which got blocking requests. */
+ LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+
+ /* Somebody is already doing CANCEL. No need in this
+ * lock in lru, do not traverse it again. */
+ if (!(lock->l_flags & LDLM_FL_CANCELING))
break;
+
+ ldlm_lock_remove_from_lru_nolock(lock);
}
+ if (&lock->l_lru == &ns->ns_unused_list)
+ break;
- if (cancels != NULL) {
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- spin_unlock(&ns->ns_unused_lock);
-
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
- /* other thread is removing lock from lru or
- * somebody is already doing CANCEL or
- * there is a blocking request which will send
- * cancel by itseft. */
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- spin_lock(&ns->ns_unused_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop ns_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
+ LDLM_LOCK_GET(lock);
+ spin_unlock(&ns->ns_unused_lock);
+ lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+
+ /* Pass the lock through the policy filter and see if it
+ * should stay in lru.
+ *
+ * Even for shrinker policy we stop scanning if
+ * we find a lock that should stay in the cache.
+ * We should take into account lock age anyway
+ * as new lock even if it is small of weight is
+ * valuable resource.
+ *
+ * That is, for shrinker policy we drop only
+ * old locks, but additionally chose them by
+ * their weight. Big extent locks will stay in
+ * the cache. */
+ if (pf(ns, lock, unused, added, count) ==
+ LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_unused_lock);
+ break;
+ }
+
+ lock_res_and_lock(lock);
+ /* Check flags again under the lock. */
+ if ((lock->l_flags & LDLM_FL_CANCELING) ||
+ (ldlm_lock_remove_from_lru(lock) == 0)) {
+ /* other thread is removing lock from lru or
+ * somebody is already doing CANCEL or
+ * there is a blocking request which will send
+ * cancel by itseft or the lock is matched
+ * is already not unused. */
unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_unused_lock);
+ continue;
}
+ LASSERT(!lock->l_readers && !lock->l_writers);
+
+ /* If we have chosen to cancel this lock voluntarily, we
+ * better send cancel notification to server, so that it
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
+ * silently cancelling this lock. */
+ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+
+ /* Setting the CBPENDING flag is a little misleading,
+ * but prevents an important race; namely, once
+ * CBPENDING is set, the lock can accumulate no more
+ * readers/writers. Since readers and writers are
+ * already zero here, ldlm_lock_decref() won't see
+ * this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+ /* We can't re-add to l_lru as it confuses the
+ * refcounting in ldlm_lock_remove_from_lru() if an AST
+ * arrives after we drop ns_lock below. We use l_bl_ast
+ * and can't use l_pending_chain as it is used both on
+ * server and client nevertheless bug 5666 says it is
+ * used only on server */
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, cancels);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+ spin_lock(&ns->ns_unused_lock);
added++;
unused--;
}
spin_unlock(&ns->ns_unused_lock);
-
- if (cancels == NULL)
- RETURN(added);
-
RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
* in a thread and this function will return after the thread has been
* asked to call the callback. when called with LDLM_SYNC the blocking
* callback will be performed in this function. */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
int flags)
{
CFS_LIST_HEAD(cancels);
RETURN(count);
}
- /* If an error occured in ASYNC mode, or
- * this is SYNC mode, cancel the list. */
- ldlm_cli_cancel_list(&cancels, count, NULL, 0, 0);
+ /* If an error occured in ASYNC mode, or this is SYNC mode,
+ * cancel the list. */
+ ldlm_cli_cancel_list(&cancels, count, NULL, 0);
RETURN(count);
}
/* If somebody is already doing CANCEL, or blocking ast came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
+ if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
}
-/* If @req is NULL, send CANCEL request to server with handles of locks
- * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
+/* If @req is NULL, send CANCEL request to server with handles of locks
+ * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
* separately per lock.
- * If @req is not NULL, put handles of locks in @cancels into the request
+ * If @req is not NULL, put handles of locks in @cancels into the request
* buffer at the offset @off.
* Destroy @cancels at the end. */
int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req, int off, int flags)
+ struct ptlrpc_request *req, int flags)
{
struct ldlm_lock *lock;
int res = 0;
if (list_empty(cancels) || count == 0)
RETURN(0);
-
- /* XXX: requests (both batched and not) could be sent in parallel.
+
+ /* XXX: requests (both batched and not) could be sent in parallel.
* Usually it is enough to have just 1 RPC, but it is possible that
* there are to many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
if (exp_connect_cancelset(lock->l_conn_export)) {
res = count;
if (req)
- ldlm_cancel_pack(req, off, cancels, count);
+ ldlm_cancel_pack(req, cancels, count);
else
res = ldlm_cli_cancel_req(lock->l_conn_export,
cancels, count,
count -= res;
ldlm_lock_list_put(cancels, l_bl_ast, res);
}
- LASSERT(list_empty(cancels));
LASSERT(count == 0);
RETURN(0);
}
RETURN(0);
}
+ LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
0, flags, opaque);
- rc = ldlm_cli_cancel_list(&cancels, count, NULL, 0, flags);
+ rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
RETURN(0);
}
ldlm_resource_getref(res);
spin_unlock(&ns->ns_hash_lock);
+ LDLM_RESOURCE_ADDREF(res);
rc = ldlm_cli_cancel_unused_resource(ns, &res->lr_name,
NULL, LCK_MINMODE,
flags, opaque);
CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
res->lr_name.name[0], rc);
+ LDLM_RESOURCE_DELREF(res);
spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
RETURN(ELDLM_OK);
}
-/* join/split resource locks to/from lru list */
-int ldlm_cli_join_lru(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id, int join)
-{
- struct ldlm_resource *res;
- struct ldlm_lock *lock, *n;
- int count = 0;
- ENTRY;
-
- LASSERT(ns_is_client(ns));
-
- res = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
- if (res == NULL)
- RETURN(count);
- LASSERT(res->lr_type == LDLM_EXTENT);
-
- lock_res(res);
- if (!join)
- goto split;
-
- list_for_each_entry_safe (lock, n, &res->lr_granted, l_res_link) {
- if (list_empty(&lock->l_lru) &&
- !lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_LOCAL) &&
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
- ldlm_lock_add_to_lru(lock);
- lock->l_flags &= ~LDLM_FL_NO_LRU;
- LDLM_DEBUG(lock, "join lock to lru");
- count++;
- }
- }
- goto unlock;
-split:
- spin_lock(&ns->ns_unused_lock);
- list_for_each_entry_safe (lock, n, &ns->ns_unused_list, l_lru) {
- if (lock->l_resource == res) {
- ldlm_lock_remove_from_lru_nolock(lock);
- lock->l_flags |= LDLM_FL_NO_LRU;
- LDLM_DEBUG(lock, "split lock from lru");
- count++;
- }
- }
- spin_unlock(&ns->ns_unused_lock);
-unlock:
- unlock_res(res);
- ldlm_resource_putref(res);
- RETURN(count);
-}
-
/* Lock iterators. */
int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
res = list_entry(tmp, struct ldlm_resource, lr_hash);
ldlm_resource_getref(res);
spin_unlock(&ns->ns_hash_lock);
+ LDLM_RESOURCE_ADDREF(res);
rc = iter(res, closure);
+ LDLM_RESOURCE_DELREF(res);
spin_lock(&ns->ns_hash_lock);
tmp = tmp->next;
ldlm_resource_putref_locked(res);
return;
}
+ LDLM_RESOURCE_ADDREF(res);
ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
EXIT;
}
return LDLM_ITER_CONTINUE;
}
-static int replay_lock_interpret(struct ptlrpc_request *req,
+static int replay_lock_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
struct ldlm_async_args *aa, int rc)
{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
+ struct lustre_handle old_hash_key;
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ struct obd_export *exp;
ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
GOTO(out, rc);
- reply = lustre_swab_repbuf(req, DLM_LOCKREPLY_OFF, sizeof(*reply),
- lustre_swab_ldlm_reply);
- if (reply == NULL) {
- CERROR("Can't unpack ldlm_reply\n");
- GOTO (out, rc = -EPROTO);
- }
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
+ GOTO(out, rc = -EPROTO);
lock = ldlm_handle2lock(&aa->lock_handle);
if (!lock) {
GOTO(out, rc = -ESTALE);
}
+ old_hash_key = lock->l_remote_handle;
lock->l_remote_handle = reply->lock_handle;
+
+ /* Key change rehash lock in per-export hash with new key */
+ exp = req->rq_export;
+ if (exp && exp->exp_lock_hash)
+ lustre_hash_rehash_key(exp->exp_lock_hash, &old_hash_key,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
+
LDLM_DEBUG(lock, "replayed lock:");
ptlrpc_import_recovery_state_machine(req->rq_import);
LDLM_LOCK_PUT(lock);
if (rc != ELDLM_OK)
ptlrpc_connect_import(req->rq_import, NULL);
-
RETURN(rc);
}
static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
{
struct ptlrpc_request *req;
- struct ldlm_request *body;
- struct ldlm_reply *reply;
struct ldlm_async_args *aa;
- int buffers = 2;
- int size[3] = { sizeof(struct ptlrpc_body) };
+ struct ldlm_request *body;
int flags;
ENTRY;
else
flags = LDLM_FL_REPLAY;
- size[DLM_LOCKREQ_OFF] = sizeof(*body);
- req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_ENQUEUE, 2, size,
- NULL);
- if (!req)
+ req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
+ LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
+ if (req == NULL)
RETURN(-ENOMEM);
/* We're part of recovery, so don't wait for it. */
req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
- body = lustre_msg_buf(req->rq_reqmsg, DLM_LOCKREQ_OFF, sizeof(*body));
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
body->lock_flags = flags;
ldlm_lock2handle(lock, &body->lock_handle[0]);
- size[DLM_LOCKREPLY_OFF] = sizeof(*reply);
if (lock->l_lvb_len != 0) {
- buffers = 3;
- size[DLM_REPLY_REC_OFF] = lock->l_lvb_len;
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ lock->l_lvb_len);
}
- ptlrpc_req_set_repsize(req, buffers, size);
+ ptlrpc_request_set_replen(req);
/* notify the server we've replayed all requests.
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
atomic_inc(&req->rq_import->imp_replay_inflight);
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = (struct ldlm_async_args *)&req->rq_async_args;
+ aa = ptlrpc_req_async_args(req);
aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = replay_lock_interpret;
- ptlrpcd_add_req(req);
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PSCOPE_OTHER);
RETURN(0);
}
int ldlm_replay_locks(struct obd_import *imp)
{
struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- struct list_head list;
+ CFS_LIST_HEAD(list);
struct ldlm_lock *lock, *next;
int rc = 0;
ENTRY;
- CFS_INIT_LIST_HEAD(&list);
LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);