/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_lockd.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
*/
#ifndef EXPORT_SYMTAB
#define DEBUG_SUBSYSTEM S_LDLM
#ifdef __KERNEL__
-# include <linux/module.h>
-# include <linux/slab.h>
-# include <linux/init.h>
-# include <linux/wait.h>
+# include <libcfs/libcfs.h>
#else
# include <liblustre.h>
#endif
-#include <linux/lustre_dlm.h>
-#include <linux/obd_class.h>
+#include <lustre_dlm.h>
+#include <obd_class.h>
+#include <libcfs/list.h>
#include "ldlm_internal.h"
-extern kmem_cache_t *ldlm_resource_slab;
-extern kmem_cache_t *ldlm_lock_slab;
-extern struct lustre_lock ldlm_handle_lock;
-extern struct list_head ldlm_namespace_list;
-extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
-extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
+#ifdef __KERNEL__
+static int ldlm_num_threads;
+CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
+ "number of DLM service threads to start");
+#endif
+
+extern cfs_mem_cache_t *ldlm_resource_slab;
+extern cfs_mem_cache_t *ldlm_lock_slab;
+static struct semaphore ldlm_ref_sem;
+static int ldlm_refcount;
-static DECLARE_MUTEX(ldlm_ref_sem);
-static int ldlm_refcount = 0;
+/* LDLM state */
-/* LDLM state */
+static struct ldlm_state *ldlm_state;
-static struct ldlm_state *ldlm ;
+inline cfs_time_t round_timeout(cfs_time_t timeout)
+{
+ return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
+}
-inline unsigned long round_timeout(unsigned long timeout)
+/* timeout for initial callback (AST) reply (bz10399) */
+static inline unsigned int ldlm_get_rq_timeout(void)
{
- return ((timeout / HZ) + 1) * HZ;
+ /* Non-AT value */
+ unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
+
+ return timeout < 1 ? 1 : timeout;
}
#ifdef __KERNEL__
-/* XXX should this be per-ldlm? */
+/* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
+static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
static struct list_head waiting_locks_list;
-static spinlock_t waiting_locks_spinlock;
-static struct timer_list waiting_locks_timer;
+static cfs_timer_t waiting_locks_timer;
static struct expired_lock_thread {
- wait_queue_head_t elt_waitq;
+ cfs_waitq_t elt_waitq;
int elt_state;
+ int elt_dump;
struct list_head elt_expired_locks;
- spinlock_t elt_lock;
} expired_lock_thread;
#endif
struct ldlm_bl_pool {
spinlock_t blp_lock;
+
+ /*
+ * blp_prio_list is used for callbacks that should be handled
+ * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
+ * see bug 13843
+ */
+ struct list_head blp_prio_list;
+
+ /*
+ * blp_list is used for all other callbacks which are likely
+ * to take longer to process.
+ */
struct list_head blp_list;
- wait_queue_head_t blp_waitq;
- atomic_t blp_num_threads;
+
+ cfs_waitq_t blp_waitq;
struct completion blp_comp;
+ atomic_t blp_num_threads;
+ atomic_t blp_busy_threads;
+ int blp_min_threads;
+ int blp_max_threads;
};
struct ldlm_bl_work_item {
struct ldlm_namespace *blwi_ns;
struct ldlm_lock_desc blwi_ld;
struct ldlm_lock *blwi_lock;
+ struct list_head blwi_head;
+ int blwi_count;
};
#ifdef __KERNEL__
{
int need_to_run;
- spin_lock_bh(&expired_lock_thread.elt_lock);
+ ENTRY;
+ spin_lock_bh(&waiting_locks_spinlock);
need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
- spin_unlock_bh(&expired_lock_thread.elt_lock);
+ spin_unlock_bh(&waiting_locks_spinlock);
RETURN(need_to_run);
}
{
struct list_head *expired = &expired_lock_thread.elt_expired_locks;
struct l_wait_info lwi = { 0 };
- unsigned long flags;
+ int do_dump;
ENTRY;
- lock_kernel();
- kportal_daemonize("ldlm_elt");
-
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
-
- unlock_kernel();
+ cfs_daemonize("ldlm_elt");
expired_lock_thread.elt_state = ELT_READY;
- wake_up(&expired_lock_thread.elt_waitq);
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
while (1) {
l_wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_TERMINATE,
&lwi);
- spin_lock_bh(&expired_lock_thread.elt_lock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ if (expired_lock_thread.elt_dump) {
+ spin_unlock_bh(&waiting_locks_spinlock);
+
+ /* from waiting_locks_callback, but not in timer */
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(__FILE__,
+ "waiting_locks_callback",
+ expired_lock_thread.elt_dump);
+
+ spin_lock_bh(&waiting_locks_spinlock);
+ expired_lock_thread.elt_dump = 0;
+ }
+
+ do_dump = 0;
+
while (!list_empty(expired)) {
- struct ldlm_lock *lock = list_entry(expired->next,
- struct ldlm_lock,
- l_pending_chain);
- spin_unlock_bh(&expired_lock_thread.elt_lock);
+ struct obd_export *export;
+ struct ldlm_lock *lock;
- ptlrpc_fail_export(lock->l_export);
+ lock = list_entry(expired->next, struct ldlm_lock,
+ l_pending_chain);
+ if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
+ (void *)lock >= LP_POISON) {
+ spin_unlock_bh(&waiting_locks_spinlock);
+ CERROR("free lock on elt list %p\n", lock);
+ LBUG();
+ }
+ list_del_init(&lock->l_pending_chain);
+ if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
+ (void *)lock->l_export >= LP_POISON) {
+ CERROR("lock with free export on elt list %p\n",
+ lock->l_export);
+ lock->l_export = NULL;
+ LDLM_ERROR(lock, "free export");
+ continue;
+ }
+ export = class_export_get(lock->l_export);
+ spin_unlock_bh(&waiting_locks_spinlock);
+
+ do_dump++;
+ class_fail_export(export);
+ class_export_put(export);
+ spin_lock_bh(&waiting_locks_spinlock);
+ }
+ spin_unlock_bh(&waiting_locks_spinlock);
- spin_lock_bh(&expired_lock_thread.elt_lock);
+ if (do_dump && obd_dump_on_eviction) {
+ CERROR("dump the log upon eviction\n");
+ libcfs_debug_dumplog();
}
- spin_unlock_bh(&expired_lock_thread.elt_lock);
if (expired_lock_thread.elt_state == ELT_TERMINATE)
break;
}
expired_lock_thread.elt_state = ELT_STOPPED;
- wake_up(&expired_lock_thread.elt_waitq);
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
RETURN(0);
}
+static int ldlm_add_waiting_lock(struct ldlm_lock *lock);
+
+/* This is called from within a timer interrupt and cannot schedule */
static void waiting_locks_callback(unsigned long unused)
{
- struct ldlm_lock *lock;
- char str[PTL_NALFMT_SIZE];
+ struct ldlm_lock *lock, *last = NULL;
+repeat:
spin_lock_bh(&waiting_locks_spinlock);
while (!list_empty(&waiting_locks_list)) {
lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
- if (lock->l_callback_timeout > jiffies)
+ if (cfs_time_after(lock->l_callback_timeout, cfs_time_current()) ||
+ (lock->l_req_mode == LCK_GROUP))
break;
- LDLM_ERROR(lock, "lock callback timer expired: evicting client "
- "%s@%s nid "LPX64" (%s) ",
- lock->l_export->exp_client_uuid.uuid,
- lock->l_export->exp_connection->c_remote_uuid.uuid,
- lock->l_export->exp_connection->c_peer.peer_nid,
- portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
- lock->l_export->exp_connection->c_peer.peer_nid,
- str));
+ if (ptlrpc_check_suspend()) {
+ /* there is a case when we talk to one mds, holding
+ * lock from another mds. this way we easily can get
+ * here, if second mds is being recovered. so, we
+ * suspend timeouts. bug 6019 */
+
+ LDLM_ERROR(lock, "recharge timeout: %s@%s nid %s ",
+ lock->l_export->exp_client_uuid.uuid,
+ lock->l_export->exp_connection->c_remote_uuid.uuid,
+ libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
+
+ list_del_init(&lock->l_pending_chain);
+ spin_unlock_bh(&waiting_locks_spinlock);
+ ldlm_add_waiting_lock(lock);
+ goto repeat;
+ }
+
+ /* if timeout overlaps the activation time of suspended timeouts
+ * then extend it to give a chance for client to reconnect */
+ if (cfs_time_before(cfs_time_sub(lock->l_callback_timeout,
+ cfs_time_seconds(obd_timeout)/2),
+ ptlrpc_suspend_wakeup_time())) {
+ LDLM_ERROR(lock, "extend timeout due to recovery: %s@%s nid %s ",
+ lock->l_export->exp_client_uuid.uuid,
+ lock->l_export->exp_connection->c_remote_uuid.uuid,
+ libcfs_nid2str(lock->l_export->exp_connection->c_peer.nid));
+
+ list_del_init(&lock->l_pending_chain);
+ spin_unlock_bh(&waiting_locks_spinlock);
+ ldlm_add_waiting_lock(lock);
+ goto repeat;
+ }
+
+ LDLM_ERROR(lock, "lock callback timer expired after %lds: "
+ "evicting client at %s ",
+ cfs_time_current_sec()- lock->l_enqueued_time.tv_sec,
+ libcfs_nid2str(
+ lock->l_export->exp_connection->c_peer.nid));
+
+ last = lock;
- spin_lock_bh(&expired_lock_thread.elt_lock);
list_del(&lock->l_pending_chain);
list_add(&lock->l_pending_chain,
&expired_lock_thread.elt_expired_locks);
- spin_unlock_bh(&expired_lock_thread.elt_lock);
- wake_up(&expired_lock_thread.elt_waitq);
}
+ if (!list_empty(&expired_lock_thread.elt_expired_locks)) {
+ if (obd_dump_on_timeout)
+ expired_lock_thread.elt_dump = __LINE__;
+
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ }
+
+ /*
+ * Make sure the timer will fire again if we have any locks
+ * left.
+ */
+ if (!list_empty(&waiting_locks_list)) {
+ cfs_time_t timeout_rounded;
+ lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
+ l_pending_chain);
+ timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
+ cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
+ }
spin_unlock_bh(&waiting_locks_spinlock);
}
* lock. We add it to the pending-callback chain, and schedule the lock-timeout
* timer to fire appropriately. (We round up to the next second, to avoid
* floods of timer firings during periods of high lock contention and traffic).
+ *
+ * Called with the namespace lock held.
*/
-static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
+static int __ldlm_add_waiting_lock(struct ldlm_lock *lock)
{
- unsigned long timeout_rounded;
+ int timeout;
+ cfs_time_t timeout_rounded;
- spin_lock_bh(&waiting_locks_spinlock);
- if (!list_empty(&lock->l_pending_chain)) {
- LDLM_DEBUG(lock, "not re-adding to wait list");
- spin_unlock_bh(&waiting_locks_spinlock);
+ if (!list_empty(&lock->l_pending_chain))
return 0;
- }
- LDLM_DEBUG(lock, "adding to wait list");
- lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
+ timeout = ldlm_get_enq_timeout(lock);
+
+ lock->l_callback_timeout = cfs_time_shift(timeout);
timeout_rounded = round_timeout(lock->l_callback_timeout);
- if (timeout_rounded < waiting_locks_timer.expires ||
- !timer_pending(&waiting_locks_timer)) {
- mod_timer(&waiting_locks_timer, timeout_rounded);
+ if (cfs_time_before(timeout_rounded,
+ cfs_timer_deadline(&waiting_locks_timer)) ||
+ !cfs_timer_is_armed(&waiting_locks_timer)) {
+ cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
}
+ /* if the new lock has a shorter timeout than something earlier on
+ the list, we'll wait the longer amount of time; no big deal. */
list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
- spin_unlock_bh(&waiting_locks_spinlock);
return 1;
}
+static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
+{
+ int ret;
+
+ LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
+
+ spin_lock_bh(&waiting_locks_spinlock);
+ if (lock->l_destroyed) {
+ static cfs_time_t next;
+ spin_unlock_bh(&waiting_locks_spinlock);
+ LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
+ if (cfs_time_after(cfs_time_current(), next)) {
+ next = cfs_time_shift(14400);
+ libcfs_debug_dumpstack(NULL);
+ }
+ return 0;
+ }
+
+ ret = __ldlm_add_waiting_lock(lock);
+ spin_unlock_bh(&waiting_locks_spinlock);
+
+ LDLM_DEBUG(lock, "%sadding to wait list",
+ ret == 0 ? "not re-" : "");
+ return ret;
+}
+
/*
* Remove a lock from the pending list, likely because it had its cancellation
* callback arrive without incident. This adjusts the lock-timeout timer if
* needed. Returns 0 if the lock wasn't pending after all, 1 if it was.
+ *
+ * Called with namespace lock held.
*/
-int ldlm_del_waiting_lock(struct ldlm_lock *lock)
+int __ldlm_del_waiting_lock(struct ldlm_lock *lock)
{
struct list_head *list_next;
- if (lock->l_export == NULL) {
- /* We don't have a "waiting locks list" on clients. */
- LDLM_DEBUG(lock, "client lock: no-op");
- return 0;
- }
-
- spin_lock_bh(&waiting_locks_spinlock);
-
- if (list_empty(&lock->l_pending_chain)) {
- spin_unlock_bh(&waiting_locks_spinlock);
- LDLM_DEBUG(lock, "wasn't waiting");
+ if (list_empty(&lock->l_pending_chain))
return 0;
- }
list_next = lock->l_pending_chain.next;
if (lock->l_pending_chain.prev == &waiting_locks_list) {
/* Removing the head of the list, adjust timer. */
if (list_next == &waiting_locks_list) {
/* No more, just cancel. */
- del_timer(&waiting_locks_timer);
+ cfs_timer_disarm(&waiting_locks_timer);
} else {
struct ldlm_lock *next;
next = list_entry(list_next, struct ldlm_lock,
l_pending_chain);
- mod_timer(&waiting_locks_timer,
- round_timeout(next->l_callback_timeout));
+ cfs_timer_arm(&waiting_locks_timer,
+ round_timeout(next->l_callback_timeout));
}
}
list_del_init(&lock->l_pending_chain);
+
+ return 1;
+}
+
+int ldlm_del_waiting_lock(struct ldlm_lock *lock)
+{
+ int ret;
+
+ if (lock->l_export == NULL) {
+ /* We don't have a "waiting locks list" on clients. */
+ LDLM_DEBUG(lock, "client lock: no-op");
+ return 0;
+ }
+
+ spin_lock_bh(&waiting_locks_spinlock);
+ ret = __ldlm_del_waiting_lock(lock);
spin_unlock_bh(&waiting_locks_spinlock);
- LDLM_DEBUG(lock, "removed");
+
+ LDLM_DEBUG(lock, "%s", ret == 0 ? "wasn't waiting" : "removed");
+ return ret;
+}
+
+/*
+ * Prolong the lock
+ *
+ * Called with namespace lock held.
+ */
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
+{
+ if (lock->l_export == NULL) {
+ /* We don't have a "waiting locks list" on clients. */
+ LDLM_DEBUG(lock, "client lock: no-op");
+ return 0;
+ }
+
+ spin_lock_bh(&waiting_locks_spinlock);
+
+ if (list_empty(&lock->l_pending_chain)) {
+ spin_unlock_bh(&waiting_locks_spinlock);
+ LDLM_DEBUG(lock, "wasn't waiting");
+ return 0;
+ }
+
+ __ldlm_del_waiting_lock(lock);
+ __ldlm_add_waiting_lock(lock);
+ spin_unlock_bh(&waiting_locks_spinlock);
+
+ LDLM_DEBUG(lock, "refreshed");
return 1;
}
static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
{
+ LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
RETURN(1);
}
RETURN(0);
}
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock)
+{
+ RETURN(0);
+}
#endif /* __KERNEL__ */
-static void ldlm_failed_ast(struct ldlm_lock *lock, int rc, char *ast_type)
+static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
+ const char *ast_type)
+{
+ struct ptlrpc_connection *conn = lock->l_export->exp_connection;
+ char *str = libcfs_nid2str(conn->c_peer.nid);
+
+ LCONSOLE_ERROR_MSG(0x138, "A client on nid %s was evicted from "
+ "service %s.\n", str,
+ lock->l_export->exp_obd->obd_name);
+
+ LCONSOLE_ERROR_MSG(0x012, "Lock %s callback to %s timed out for "
+ "resource %d\n", ast_type,
+ obd_export_nid2str(lock->l_export), rc);
+
+ if (obd_dump_on_timeout)
+ libcfs_debug_dumplog();
+#ifdef __KERNEL__
+ spin_lock_bh(&waiting_locks_spinlock);
+ list_add(&lock->l_pending_chain, &expired_lock_thread.elt_expired_locks);
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ spin_unlock_bh(&waiting_locks_spinlock);
+#else
+ class_fail_export(lock->l_export);
+#endif
+}
+
+static int ldlm_handle_ast_error(struct ldlm_lock *lock,
+ struct ptlrpc_request *req, int rc,
+ const char *ast_type)
+{
+ lnet_process_id_t peer = req->rq_import->imp_connection->c_peer;
+
+ if (rc == -ETIMEDOUT || rc == -EINTR || rc == -ENOTCONN) {
+ LASSERT(lock->l_export);
+ if (lock->l_export->exp_libclient) {
+ LDLM_DEBUG(lock, "%s AST to liblustre client (nid %s)"
+ " timeout, just cancelling lock", ast_type,
+ libcfs_nid2str(peer.nid));
+ ldlm_lock_cancel(lock);
+ rc = -ERESTART;
+ } else if (lock->l_flags & LDLM_FL_CANCEL) {
+ LDLM_DEBUG(lock, "%s AST timeout from nid %s, but "
+ "cancel was received (AST reply lost?)",
+ ast_type, libcfs_nid2str(peer.nid));
+ ldlm_lock_cancel(lock);
+ rc = -ERESTART;
+ } else {
+ ldlm_del_waiting_lock(lock);
+ ldlm_failed_ast(lock, rc, ast_type);
+ }
+ } else if (rc) {
+ if (rc == -EINVAL)
+ LDLM_DEBUG(lock, "client (nid %s) returned %d"
+ " from %s AST - normal race",
+ libcfs_nid2str(peer.nid),
+ req->rq_repmsg ?
+ lustre_msg_get_status(req->rq_repmsg) : -1,
+ ast_type);
+ else
+ LDLM_ERROR(lock, "client (nid %s) returned %d "
+ "from %s AST", libcfs_nid2str(peer.nid),
+ (req->rq_repmsg != NULL) ?
+ lustre_msg_get_status(req->rq_repmsg) : 0,
+ ast_type);
+ ldlm_lock_cancel(lock);
+ /* Server-side AST functions are called from ldlm_reprocess_all,
+ * which needs to be told to please restart its reprocessing. */
+ rc = -ERESTART;
+ }
+
+ return rc;
+}
+
+static int ldlm_cb_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
+{
+ struct ldlm_cb_set_arg *arg;
+ struct ldlm_lock *lock;
+ ENTRY;
+
+ LASSERT(data != NULL);
+
+ arg = req->rq_async_args.pointer_arg[0];
+ lock = req->rq_async_args.pointer_arg[1];
+ LASSERT(lock != NULL);
+ if (rc != 0) {
+ /* If client canceled the lock but the cancel has not
+ * been recieved yet, we need to update lvbo to have the
+ * proper attributes cached. */
+ if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK)
+ ldlm_res_lvbo_update(lock->l_resource, NULL,
+ 0, 1);
+ rc = ldlm_handle_ast_error(lock, req, rc,
+ arg->type == LDLM_BL_CALLBACK
+ ? "blocking" : "completion");
+ }
+
+ LDLM_LOCK_RELEASE(lock);
+
+ if (rc == -ERESTART)
+ atomic_set(&arg->restart, 1);
+
+ RETURN(0);
+}
+
+static inline int ldlm_bl_and_cp_ast_fini(struct ptlrpc_request *req,
+ struct ldlm_cb_set_arg *arg,
+ struct ldlm_lock *lock,
+ int instant_cancel)
{
- const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
- char str[PTL_NALFMT_SIZE];
-
- CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
- ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
- ast_type, rc,
- lock->l_resource->lr_name.name[0],
- lock->l_resource->lr_name.name[1],
- ldlm_lockname[lock->l_granted_mode],
- lock->l_export->exp_client_uuid.uuid,
- conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
- portals_nid2str(conn->c_peer.peer_ni->pni_number,
- conn->c_peer.peer_nid, str));
- ptlrpc_fail_export(lock->l_export);
+ int rc = 0;
+ ENTRY;
+
+ if (unlikely(instant_cancel)) {
+ rc = ptl_send_rpc(req, 1);
+ ptlrpc_req_finished(req);
+ if (rc == 0)
+ /* If we cancelled the lock, we need to restart
+ * ldlm_reprocess_queue */
+ atomic_set(&arg->restart, 1);
+ } else {
+ LDLM_LOCK_GET(lock);
+ ptlrpc_set_add_req(arg->set, req);
+ }
+
+ RETURN(rc);
}
+/*
+ * ->l_blocking_ast() method for server-side locks. This is invoked when newly
+ * enqueued server lock conflicts with given one.
+ *
+ * Sends blocking ast rpc to the client owning that lock; arms timeout timer
+ * to wait for client response.
+ */
int ldlm_server_blocking_ast(struct ldlm_lock *lock,
struct ldlm_lock_desc *desc,
void *data, int flag)
{
- struct ldlm_request *body;
- struct ptlrpc_request *req;
- int rc = 0, size = sizeof(*body);
+ struct ldlm_cb_set_arg *arg = data;
+ struct ldlm_request *body;
+ struct ptlrpc_request *req;
+ int instant_cancel = 0;
+ int rc = 0;
ENTRY;
- if (flag == LDLM_CB_CANCELING) {
+ if (flag == LDLM_CB_CANCELING)
/* Don't need to do anything here. */
RETURN(0);
- }
LASSERT(lock);
+ LASSERT(data != NULL);
+ if (lock->l_export->exp_obd->obd_recovering != 0) {
+ LDLM_ERROR(lock, "BUG 6063: lock collide during recovery");
+ ldlm_lock_dump(D_ERROR, lock, 0);
+ }
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
+ req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
+ &RQF_LDLM_BL_CALLBACK,
+ LUSTRE_DLM_VERSION, LDLM_BL_CALLBACK);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ req->rq_async_args.pointer_arg[0] = arg;
+ req->rq_async_args.pointer_arg[1] = lock;
+ req->rq_interpret_reply = ldlm_cb_interpret;
+ req->rq_no_resend = 1;
+
+ lock_res(lock->l_resource);
if (lock->l_granted_mode != lock->l_req_mode) {
/* this blocking AST will be communicated as part of the
* completion AST instead */
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
- LDLM_DEBUG(lock, "lock not granted, not sending blocking AST"); RETURN(0);
+ unlock_res(lock->l_resource);
+ ptlrpc_req_finished(req);
+ LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");
+ RETURN(0);
}
if (lock->l_destroyed) {
/* What's the point? */
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+ unlock_res(lock->l_resource);
+ ptlrpc_req_finished(req);
RETURN(0);
}
-#if 0
- if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
- ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
- RETURN(-ETIMEDOUT);
- }
-#endif
-
- req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
- LDLM_BL_CALLBACK, 1, &size, NULL);
- if (!req)
- RETURN(-ENOMEM);
+ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
+ instant_cancel = 1;
- body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
- memcpy(&body->lock_handle1, &lock->l_remote_handle,
- sizeof(body->lock_handle1));
- memcpy(&body->lock_desc, desc, sizeof(*desc));
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ body->lock_handle[0] = lock->l_remote_handle;
+ body->lock_desc = *desc;
body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
LDLM_DEBUG(lock, "server preparing blocking AST");
- req->rq_replen = lustre_msg_size(0, NULL);
- if (lock->l_granted_mode == lock->l_req_mode)
+ ptlrpc_request_set_replen(req);
+ if (instant_cancel) {
+ unlock_res(lock->l_resource);
+ ldlm_lock_cancel(lock);
+ } else {
+ LASSERT(lock->l_granted_mode == lock->l_req_mode);
ldlm_add_waiting_lock(lock);
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+ unlock_res(lock->l_resource);
+ }
req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
- rc = ptlrpc_queue_wait(req);
- if (rc == -ETIMEDOUT || rc == -EINTR) {
-#ifdef __KERNEL__
- ldlm_del_waiting_lock(lock);
- ldlm_failed_ast(lock, rc, "blocking");
-#else
- /* XXX
- * Here we treat all clients as liblustre. When BLOCKING AST
- * timeout we don't evicting the client and only cancel
- * the lock.
- * restore to orignial implementation later!!!
- * XXX
- */
- CERROR("BLOCKING AST to client (nid "LPU64") timeout, "
- "simply cancel lock 0x%p\n",
- req->rq_connection->c_peer.peer_nid, lock);
- ldlm_lock_cancel(lock);
- rc = -ERESTART;
-#endif
- } else if (rc) {
- if (rc == -EINVAL)
- CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
- "from blocking AST for lock %p--normal race\n",
- req->rq_connection->c_peer.peer_nid,
- req->rq_repmsg->status, lock);
- else if (rc == -ENOTCONN)
- CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
- "from blocking AST for lock %p--this client was "
- "probably rebooted while it held a lock, nothing"
- " serious\n",req->rq_connection->c_peer.peer_nid,
- req->rq_repmsg->status, lock);
- else
- CDEBUG(D_ERROR, "client (nid "LPU64") returned %d "
- "from blocking AST for lock %p\n",
- req->rq_connection->c_peer.peer_nid,
- (req->rq_repmsg != NULL)?
- req->rq_repmsg->status : 0,
- lock);
- LDLM_DEBUG(lock, "client sent rc %d rq_status %d from blocking "
- "AST", rc, req->rq_status);
- ldlm_lock_cancel(lock);
- /* Server-side AST functions are called from ldlm_reprocess_all,
- * which needs to be told to please restart its reprocessing. */
- rc = -ERESTART;
- }
+ /* ptlrpc_prep_req already set timeout */
+ if (AT_OFF)
+ req->rq_timeout = ldlm_get_rq_timeout();
- ptlrpc_req_finished(req);
+ if (lock->l_export && lock->l_export->exp_nid_stats &&
+ lock->l_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_BL_CALLBACK - LDLM_FIRST_OPC);
- RETURN(rc);
-}
+ rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
-/* XXX copied from ptlrpc/service.c */
-static long timeval_sub(struct timeval *large, struct timeval *small)
-{
- return (large->tv_sec - small->tv_sec) * 1000000 +
- (large->tv_usec - small->tv_usec);
+ RETURN(rc);
}
int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
{
- struct ldlm_request *body;
- struct ptlrpc_request *req;
- struct timeval granted_time;
- long total_enqueue_wait;
- int rc = 0, size = sizeof(*body);
+ struct ldlm_cb_set_arg *arg = data;
+ struct ldlm_request *body;
+ struct ptlrpc_request *req;
+ struct timeval granted_time;
+ long total_enqueue_wait;
+ int instant_cancel = 0;
+ int rc = 0;
ENTRY;
- if (lock == NULL) {
- LBUG();
- RETURN(-EINVAL);
- }
+ LASSERT(lock != NULL);
+ LASSERT(data != NULL);
do_gettimeofday(&granted_time);
- total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
+ total_enqueue_wait = cfs_timeval_sub(&granted_time,
+ &lock->l_enqueued_time, NULL);
- if (total_enqueue_wait / 1000000 > obd_timeout)
- LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
+ if (total_enqueue_wait / ONE_MILLION > obd_timeout)
+ /* non-fatal with AT - change to LDLM_DEBUG? */
+ LDLM_ERROR(lock, "enqueue wait took %luus from "CFS_TIME_T,
+ total_enqueue_wait, lock->l_enqueued_time.tv_sec);
- req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
- LDLM_CP_CALLBACK, 1, &size, NULL);
- if (!req)
+ req = ptlrpc_request_alloc(lock->l_export->exp_imp_reverse,
+ &RQF_LDLM_CP_CALLBACK);
+ if (req == NULL)
RETURN(-ENOMEM);
- body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
- memcpy(&body->lock_handle1, &lock->l_remote_handle,
- sizeof(body->lock_handle1));
+ lock_res_and_lock(lock);
+ if (lock->l_resource->lr_lvb_len)
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT,
+ lock->l_resource->lr_lvb_len);
+ unlock_res_and_lock(lock);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CP_CALLBACK);
+ if (rc) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ req->rq_async_args.pointer_arg[0] = arg;
+ req->rq_async_args.pointer_arg[1] = lock;
+ req->rq_interpret_reply = ldlm_cb_interpret;
+ req->rq_no_resend = 1;
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+
+ body->lock_handle[0] = lock->l_remote_handle;
body->lock_flags = flags;
ldlm_lock2desc(lock, &body->lock_desc);
+ if (lock->l_resource->lr_lvb_len) {
+ void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
+
+ lock_res_and_lock(lock);
+ memcpy(lvb, lock->l_resource->lr_lvb_data,
+ lock->l_resource->lr_lvb_len);
+ unlock_res_and_lock(lock);
+ }
LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
total_enqueue_wait);
- req->rq_replen = lustre_msg_size(0, NULL);
+
+ /* Server-side enqueue wait time estimate, used in
+ __ldlm_add_waiting_lock to set future enqueue timers */
+ at_add(&lock->l_resource->lr_namespace->ns_at_estimate,
+ total_enqueue_wait / ONE_MILLION);
+
+ ptlrpc_request_set_replen(req);
req->rq_send_state = LUSTRE_IMP_FULL;
- req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
+ /* ptlrpc_prep_req already set timeout */
+ if (AT_OFF)
+ req->rq_timeout = ldlm_get_rq_timeout();
/* We only send real blocking ASTs after the lock is granted */
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
+ lock_res_and_lock(lock);
if (lock->l_flags & LDLM_FL_AST_SENT) {
body->lock_flags |= LDLM_FL_AST_SENT;
- ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
+
+ /* We might get here prior to ldlm_handle_enqueue setting
+ * LDLM_FL_CANCEL_ON_BLOCK flag. Then we will put this lock
+ * into waiting list, but this is safe and similar code in
+ * ldlm_handle_enqueue will call ldlm_lock_cancel() still,
+ * that would not only cancel the lock, but will also remove
+ * it from waiting list */
+ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ unlock_res_and_lock(lock);
+ ldlm_lock_cancel(lock);
+ instant_cancel = 1;
+ lock_res_and_lock(lock);
+ } else {
+ /* start the lock-timeout clock */
+ ldlm_add_waiting_lock(lock);
+ }
}
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+ unlock_res_and_lock(lock);
+
+ if (lock->l_export && lock->l_export->exp_nid_stats &&
+ lock->l_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_CP_CALLBACK - LDLM_FIRST_OPC);
+
+ rc = ldlm_bl_and_cp_ast_fini(req, arg, lock, instant_cancel);
+
+ RETURN(rc);
+}
+
+int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ struct ldlm_request *body;
+ struct ptlrpc_request *req;
+ int rc;
+ ENTRY;
+
+ LASSERT(lock != NULL);
+
+ req = ptlrpc_request_alloc_pack(lock->l_export->exp_imp_reverse,
+ &RQF_LDLM_GL_CALLBACK,
+ LUSTRE_DLM_VERSION, LDLM_GL_CALLBACK);
+
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ body->lock_handle[0] = lock->l_remote_handle;
+ ldlm_lock2desc(lock, &body->lock_desc);
+
+ lock_res_and_lock(lock);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ lock->l_resource->lr_lvb_len);
+ unlock_res_and_lock(lock);
+ res = lock->l_resource;
+ ptlrpc_request_set_replen(req);
+
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ /* ptlrpc_prep_req already set timeout */
+ if (AT_OFF)
+ req->rq_timeout = ldlm_get_rq_timeout();
+
+ if (lock->l_export && lock->l_export->exp_nid_stats &&
+ lock->l_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(lock->l_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_GL_CALLBACK - LDLM_FIRST_OPC);
rc = ptlrpc_queue_wait(req);
- if (rc == -ETIMEDOUT || rc == -EINTR) {
- ldlm_del_waiting_lock(lock);
- ldlm_failed_ast(lock, rc, "completion");
- } else if (rc) {
- LDLM_ERROR(lock, "client sent rc %d rq_status %d from "
- "completion AST\n", rc, req->rq_status);
- ldlm_lock_cancel(lock);
- /* Server-side AST functions are called from ldlm_reprocess_all,
- * which needs to be told to please restart its reprocessing. */
- rc = -ERESTART;
- }
+ if (rc == -ELDLM_NO_LOCK_DATA)
+ LDLM_DEBUG(lock, "lost race - client has a lock but no inode");
+ else if (rc != 0)
+ rc = ldlm_handle_ast_error(lock, req, rc, "glimpse");
+ else
+ rc = ldlm_res_lvbo_update(res, req->rq_repmsg,
+ REPLY_REC_OFF, 1);
ptlrpc_req_finished(req);
+ if (rc == -ERESTART)
+ ldlm_reprocess_all(res);
RETURN(rc);
}
-int ldlm_handle_enqueue(struct ptlrpc_request *req,
- ldlm_completion_callback completion_callback,
- ldlm_blocking_callback blocking_callback)
+#ifdef __KERNEL__
+extern unsigned long long lu_time_stamp_get(void);
+#else
+#define lu_time_stamp_get() time(NULL)
+#endif
+
+static void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req,
+ struct lprocfs_stats *srv_stats)
+{
+ int lock_type = 0, op = 0;
+
+ lock_type = dlm_req->lock_desc.l_resource.lr_type;
+
+ switch (lock_type) {
+ case LDLM_PLAIN:
+ op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE;
+ break;
+ case LDLM_EXTENT:
+ if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT)
+ op = PTLRPC_LAST_CNTR + LDLM_GLIMPSE_ENQUEUE;
+ else
+ op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE;
+ break;
+ case LDLM_FLOCK:
+ op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE;
+ break;
+ case LDLM_IBITS:
+ op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE;
+ break;
+ default:
+ op = 0;
+ break;
+ }
+
+ if (op)
+ lprocfs_counter_incr(srv_stats, op);
+
+ return ;
+}
+
+/*
+ * Main server-side entry point into LDLM. This is called by ptlrpc service
+ * threads to carry out client lock enqueueing requests.
+ */
+int ldlm_handle_enqueue0(struct ldlm_namespace *ns,
+ struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req,
+ const struct ldlm_callback_suite *cbs)
{
- struct obd_device *obddev = req->rq_export->exp_obd;
struct ldlm_reply *dlm_rep;
- struct ldlm_request *dlm_req;
- int rc, size = sizeof(*dlm_rep), cookielen = 0;
__u32 flags;
- ldlm_error_t err;
+ ldlm_error_t err = ELDLM_OK;
struct ldlm_lock *lock = NULL;
void *cookie = NULL;
+ int rc = 0;
ENTRY;
LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
- dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
- if (dlm_req == NULL) {
- CERROR ("Can't unpack dlm_req\n");
- RETURN (-EFAULT);
+ ldlm_request_cancel(req, dlm_req, LDLM_ENQUEUE_CANCEL_OFF);
+ flags = dlm_req->lock_flags;
+
+ LASSERT(req->rq_export);
+
+ if (req->rq_rqbd->rqbd_service->srv_stats)
+ ldlm_svc_get_eopc(dlm_req,
+ req->rq_rqbd->rqbd_service->srv_stats);
+
+ if (req->rq_export && req->rq_export->exp_nid_stats &&
+ req->rq_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_ENQUEUE - LDLM_FIRST_OPC);
+
+ if (unlikely(dlm_req->lock_desc.l_resource.lr_type < LDLM_MIN_TYPE ||
+ dlm_req->lock_desc.l_resource.lr_type >= LDLM_MAX_TYPE)) {
+ DEBUG_REQ(D_ERROR, req, "invalid lock request type %d",
+ dlm_req->lock_desc.l_resource.lr_type);
+ GOTO(out, rc = -EFAULT);
}
- flags = dlm_req->lock_flags;
- if (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN &&
- (flags & LDLM_FL_HAS_INTENT)) {
- /* In this case, the reply buffer is allocated deep in
- * local_lock_enqueue by the policy function. */
- cookie = req;
- cookielen = sizeof(*req);
- } else {
- rc = lustre_pack_reply(req, 1, &size, NULL);
- if (rc) {
- CERROR("out of memory\n");
- RETURN(-ENOMEM);
+ if (unlikely(dlm_req->lock_desc.l_req_mode <= LCK_MINMODE ||
+ dlm_req->lock_desc.l_req_mode >= LCK_MAXMODE ||
+ dlm_req->lock_desc.l_req_mode &
+ (dlm_req->lock_desc.l_req_mode-1))) {
+ DEBUG_REQ(D_ERROR, req, "invalid lock request mode %d",
+ dlm_req->lock_desc.l_req_mode);
+ GOTO(out, rc = -EFAULT);
+ }
+
+ if (req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) {
+ if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
+ LDLM_PLAIN)) {
+ DEBUG_REQ(D_ERROR, req,
+ "PLAIN lock request from IBITS client?");
+ GOTO(out, rc = -EPROTO);
}
- if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
- cookie = &dlm_req->lock_desc.l_policy_data;
- cookielen = sizeof(ldlm_policy_data_t);
+ } else if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
+ LDLM_IBITS)) {
+ DEBUG_REQ(D_ERROR, req,
+ "IBITS lock request from unaware client?");
+ GOTO(out, rc = -EPROTO);
+ }
+
+#if 0
+ /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
+ against server's _CONNECT_SUPPORTED flags? (I don't want to use
+ ibits for mgc/mgs) */
+
+ /* INODEBITS_INTEROP: Perform conversion from plain lock to
+ * inodebits lock if client does not support them. */
+ if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS) &&
+ (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
+ dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
+ dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
+ MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
+ if (dlm_req->lock_desc.l_req_mode == LCK_PR)
+ dlm_req->lock_desc.l_req_mode = LCK_CR;
+ }
+#endif
+
+ if (unlikely(flags & LDLM_FL_REPLAY)) {
+ /* Find an existing lock in the per-export lock hash */
+ lock = lustre_hash_lookup(req->rq_export->exp_lock_hash,
+ (void *)&dlm_req->lock_handle[0]);
+ if (lock != NULL) {
+ DEBUG_REQ(D_DLMTRACE, req, "found existing lock cookie "
+ LPX64, lock->l_handle.h_cookie);
+ GOTO(existing_lock, rc = 0);
}
}
/* The lock's callback data might be set in the policy function */
- lock = ldlm_lock_create(obddev->obd_namespace,
- &dlm_req->lock_handle2,
- dlm_req->lock_desc.l_resource.lr_name,
+ lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
dlm_req->lock_desc.l_resource.lr_type,
dlm_req->lock_desc.l_req_mode,
- blocking_callback, completion_callback, NULL);
+ cbs, NULL, 0);
+
if (!lock)
- GOTO(out, err = -ENOMEM);
+ GOTO(out, rc = -ENOMEM);
do_gettimeofday(&lock->l_enqueued_time);
- memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
- sizeof(lock->l_remote_handle));
+ lock->l_remote_handle = dlm_req->lock_handle[0];
LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
- LASSERT(req->rq_export);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_BLOCKED, obd_timeout * 2);
+ /* Don't enqueue a lock onto the export if it has already
+ * been evicted. Cancel it now instead. (bug 3822) */
+ if (req->rq_export->exp_failed) {
+ LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
+ GOTO(out, rc = -ENOTCONN);
+ }
lock->l_export = class_export_get(req->rq_export);
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
- list_add(&lock->l_export_chain,
- &lock->l_export->exp_ldlm_data.led_held_locks);
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
- err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, cookielen,
- &flags);
+ if (lock->l_export->exp_lock_hash)
+ lustre_hash_add(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle,
+ &lock->l_exp_hash);
+
+existing_lock:
+
+ if (flags & LDLM_FL_HAS_INTENT) {
+ /* In this case, the reply buffer is allocated deep in
+ * local_lock_enqueue by the policy function. */
+ cookie = req;
+ } else {
+ lock_res_and_lock(lock);
+ if (lock->l_resource->lr_lvb_len) {
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB,
+ RCL_SERVER,
+ lock->l_resource->lr_lvb_len);
+ }
+ unlock_res_and_lock(lock);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
+ GOTO(out, rc = -ENOMEM);
+
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN)
+ lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
+ if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
+ lock->l_req_extent = lock->l_policy_data.l_extent;
+
+ err = ldlm_lock_enqueue(ns, &lock, cookie, (int *)&flags);
if (err)
GOTO(out, err);
- dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
+ dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
dlm_rep->lock_flags = flags;
+ ldlm_lock2desc(lock, &dlm_rep->lock_desc);
ldlm_lock2handle(lock, &dlm_rep->lock_handle);
- if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
- memcpy(&dlm_rep->lock_policy_data, &lock->l_policy_data,
- cookielen);
- }
- if (dlm_rep->lock_flags & LDLM_FL_LOCK_CHANGED) {
- memcpy(&dlm_rep->lock_resource_name, &lock->l_resource->lr_name,
- sizeof(dlm_rep->lock_resource_name));
- dlm_rep->lock_mode = lock->l_req_mode;
- }
/* We never send a blocking AST until the lock is granted, but
* we can tell it right now */
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
- if (lock->l_flags & LDLM_FL_AST_SENT)
+ lock_res_and_lock(lock);
+
+ /* Now take into account flags to be inherited from original lock
+ request both in reply to client and in our own lock flags. */
+ dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
+ lock->l_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS;
+
+ /* Don't move a pending lock onto the export if it has already
+ * been evicted. Cancel it now instead. (bug 5683) */
+ if (unlikely(req->rq_export->exp_failed ||
+ OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT))) {
+ LDLM_ERROR(lock, "lock on destroyed export %p", req->rq_export);
+ rc = -ENOTCONN;
+ } else if (lock->l_flags & LDLM_FL_AST_SENT) {
dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+ if (lock->l_granted_mode == lock->l_req_mode) {
+ /*
+ * Only cancel lock if it was granted, because it would
+ * be destroyed immediatelly and would never be granted
+ * in the future, causing timeouts on client. Not
+ * granted lock will be cancelled immediatelly after
+ * sending completion AST.
+ */
+ if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ unlock_res_and_lock(lock);
+ ldlm_lock_cancel(lock);
+ lock_res_and_lock(lock);
+ } else
+ ldlm_add_waiting_lock(lock);
+ }
+ }
+ /* Make sure we never ever grant usual metadata locks to liblustre
+ clients */
+ if ((dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN ||
+ dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) &&
+ req->rq_export->exp_libclient) {
+ if (unlikely(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ||
+ !(dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK))){
+ CERROR("Granting sync lock to libclient. "
+ "req fl %d, rep fl %d, lock fl %d\n",
+ dlm_req->lock_flags, dlm_rep->lock_flags,
+ lock->l_flags);
+ LDLM_ERROR(lock, "sync lock");
+ if (dlm_req->lock_flags & LDLM_FL_HAS_INTENT) {
+ struct ldlm_intent *it;
+
+ it = req_capsule_client_get(&req->rq_pill,
+ &RMF_LDLM_INTENT);
+ if (it != NULL) {
+ CERROR("This is intent %s ("LPU64")\n",
+ ldlm_it2str(it->opc), it->opc);
+ }
+ }
+ }
+ }
+
+ unlock_res_and_lock(lock);
EXIT;
out:
- req->rq_status = err;
+ req->rq_status = rc ?: err; /* return either error - bug 11190 */
+ if (!req->rq_packed_final) {
+ err = lustre_pack_reply(req, 1, NULL, NULL);
+ if (rc == 0)
+ rc = err;
+ }
/* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
* ldlm_reprocess_all. If this moves, revisit that code. -phil */
if (lock) {
LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
- "(err=%d)", err);
+ "(err=%d, rc=%d)", err, rc);
+
+ lock_res_and_lock(lock);
+ if (rc == 0) {
+ if (lock->l_resource->lr_lvb_len > 0) {
+ void *lvb;
+
+ lvb = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_LVB);
+ LASSERTF(lvb != NULL, "req %p, lock %p\n",
+ req, lock);
+
+ memcpy(lvb, lock->l_resource->lr_lvb_data,
+ lock->l_resource->lr_lvb_len);
+ }
+ } else {
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_destroy_nolock(lock);
+ }
+ unlock_res_and_lock(lock);
+
if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
ldlm_reprocess_all(lock->l_resource);
- LDLM_LOCK_PUT(lock);
+
+ LDLM_LOCK_RELEASE(lock);
}
- LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p)", lock);
- return 0;
+ LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p, rc %d)",
+ lock, rc);
+
+ return rc;
}
-int ldlm_handle_convert(struct ptlrpc_request *req)
+int ldlm_handle_enqueue(struct ptlrpc_request *req,
+ ldlm_completion_callback completion_callback,
+ ldlm_blocking_callback blocking_callback,
+ ldlm_glimpse_callback glimpse_callback)
{
struct ldlm_request *dlm_req;
+ struct ldlm_callback_suite cbs = {
+ .lcs_completion = completion_callback,
+ .lcs_blocking = blocking_callback,
+ .lcs_glimpse = glimpse_callback
+ };
+ int rc;
+
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ if (dlm_req != NULL) {
+ rc = ldlm_handle_enqueue0(req->rq_export->exp_obd->obd_namespace,
+ req, dlm_req, &cbs);
+ } else {
+ rc = -EFAULT;
+ }
+ return rc;
+}
+
+int ldlm_handle_convert0(struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req)
+{
struct ldlm_reply *dlm_rep;
struct ldlm_lock *lock;
- int rc, size = sizeof(*dlm_rep);
+ int rc;
ENTRY;
- dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
- if (dlm_req == NULL) {
- CERROR ("Can't unpack dlm_req\n");
- RETURN (-EFAULT);
- }
+ if (req->rq_export && req->rq_export->exp_nid_stats &&
+ req->rq_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_CONVERT - LDLM_FIRST_OPC);
- rc = lustre_pack_reply(req, 1, &size, NULL);
- if (rc) {
- CERROR("out of memory\n");
- RETURN(-ENOMEM);
- }
- dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ RETURN(rc);
+
+ dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
dlm_rep->lock_flags = dlm_req->lock_flags;
- lock = ldlm_handle2lock(&dlm_req->lock_handle1);
+ lock = ldlm_handle2lock(&dlm_req->lock_handle[0]);
if (!lock) {
req->rq_status = EINVAL;
} else {
+ void *res = NULL;
+
LDLM_DEBUG(lock, "server-side convert handler START");
- ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
- &dlm_rep->lock_flags);
- if (ldlm_del_waiting_lock(lock))
- CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
- req->rq_status = 0;
+
+ do_gettimeofday(&lock->l_enqueued_time);
+ res = ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
+ &dlm_rep->lock_flags);
+ if (res) {
+ if (ldlm_del_waiting_lock(lock))
+ LDLM_DEBUG(lock, "converted waiting lock");
+ req->rq_status = 0;
+ } else {
+ req->rq_status = EDEADLOCK;
+ }
}
if (lock) {
- ldlm_reprocess_all(lock->l_resource);
+ if (!req->rq_status)
+ ldlm_reprocess_all(lock->l_resource);
LDLM_DEBUG(lock, "server-side convert handler END");
LDLM_LOCK_PUT(lock);
} else
RETURN(0);
}
-int ldlm_handle_cancel(struct ptlrpc_request *req)
+int ldlm_handle_convert(struct ptlrpc_request *req)
{
+ int rc;
struct ldlm_request *dlm_req;
+
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ if (dlm_req != NULL) {
+ rc = ldlm_handle_convert0(req, dlm_req);
+ } else {
+ CERROR ("Can't unpack dlm_req\n");
+ rc = -EFAULT;
+ }
+ return rc;
+}
+
+/* Cancel all the locks whos handles are packed into ldlm_request */
+int ldlm_request_cancel(struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req, int first)
+{
+ struct ldlm_resource *res, *pres = NULL;
struct ldlm_lock *lock;
- char str[PTL_NALFMT_SIZE];
+ int i, count, done = 0;
+ ENTRY;
+
+ count = dlm_req->lock_count ? dlm_req->lock_count : 1;
+ if (first >= count)
+ RETURN(0);
+
+ /* There is no lock on the server at the replay time,
+ * skip lock cancelling to make replay tests to pass. */
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
+ RETURN(0);
+
+ LDLM_DEBUG_NOLOCK("server-side cancel handler START: %d locks, "
+ "starting at %d", count, first);
+
+ for (i = first; i < count; i++) {
+ lock = ldlm_handle2lock(&dlm_req->lock_handle[i]);
+ if (!lock) {
+ LDLM_DEBUG_NOLOCK("server-side cancel handler stale "
+ "lock (cookie "LPU64")",
+ dlm_req->lock_handle[i].cookie);
+ continue;
+ }
+
+ res = lock->l_resource;
+ done++;
+
+ if (res != pres) {
+ if (pres != NULL) {
+ ldlm_reprocess_all(pres);
+ LDLM_RESOURCE_DELREF(pres);
+ ldlm_resource_putref(pres);
+ }
+ if (res != NULL) {
+ ldlm_resource_getref(res);
+ LDLM_RESOURCE_ADDREF(res);
+ ldlm_res_lvbo_update(res, NULL, 0, 1);
+ }
+ pres = res;
+ }
+ ldlm_lock_cancel(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+ if (pres != NULL) {
+ ldlm_reprocess_all(pres);
+ LDLM_RESOURCE_DELREF(pres);
+ ldlm_resource_putref(pres);
+ }
+ LDLM_DEBUG_NOLOCK("server-side cancel handler END");
+ RETURN(done);
+}
+
+int ldlm_handle_cancel(struct ptlrpc_request *req)
+{
+ struct ldlm_request *dlm_req;
int rc;
ENTRY;
- dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
if (dlm_req == NULL) {
- CERROR("bad request buffer for cancel\n");
+ CDEBUG(D_INFO, "bad request buffer for cancel\n");
RETURN(-EFAULT);
}
- rc = lustre_pack_reply(req, 0, NULL, NULL);
- if (rc) {
- CERROR("out of memory\n");
- RETURN(-ENOMEM);
- }
+ if (req->rq_export && req->rq_export->exp_nid_stats &&
+ req->rq_export->exp_nid_stats->nid_ldlm_stats)
+ lprocfs_counter_incr(req->rq_export->exp_nid_stats->nid_ldlm_stats,
+ LDLM_CANCEL - LDLM_FIRST_OPC);
- lock = ldlm_handle2lock(&dlm_req->lock_handle1);
- if (!lock) {
- CERROR("received cancel for unknown lock cookie "LPX64
- " from nid "LPX64" (%s)\n", dlm_req->lock_handle1.cookie,
- req->rq_connection->c_peer.peer_nid,
- portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
- req->rq_connection->c_peer.peer_nid, str));
- LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
- "(cookie "LPU64")",
- dlm_req->lock_handle1.cookie);
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ RETURN(rc);
+
+ if (!ldlm_request_cancel(req, dlm_req, 0))
req->rq_status = ESTALE;
- } else {
- LDLM_DEBUG(lock, "server-side cancel handler START");
- ldlm_lock_cancel(lock);
- if (ldlm_del_waiting_lock(lock))
- CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
- req->rq_status = 0;
- }
if (ptlrpc_reply(req) != 0)
LBUG();
- if (lock) {
- ldlm_reprocess_all(lock->l_resource);
- LDLM_DEBUG(lock, "server-side cancel handler END");
- LDLM_LOCK_PUT(lock);
- }
-
RETURN(0);
}
-static void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld,
- struct ldlm_lock *lock)
+void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
{
int do_ast;
ENTRY;
- l_lock(&ns->ns_lock);
LDLM_DEBUG(lock, "client blocking AST callback handler START");
+ lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING;
+
+ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
+ lock->l_flags |= LDLM_FL_CANCEL;
+
do_ast = (!lock->l_readers && !lock->l_writers);
+ unlock_res_and_lock(lock);
if (do_ast) {
LDLM_DEBUG(lock, "already unused, calling "
"callback (%p)", lock->l_blocking_ast);
- if (lock->l_blocking_ast != NULL) {
- l_unlock(&ns->ns_lock);
- l_check_no_ns_lock(ns);
+ if (lock->l_blocking_ast != NULL)
lock->l_blocking_ast(lock, ld, lock->l_ast_data,
LDLM_CB_BLOCKING);
- l_lock(&ns->ns_lock);
- }
} else {
LDLM_DEBUG(lock, "Lock still has references, will be"
" cancelled later");
}
LDLM_DEBUG(lock, "client blocking callback handler END");
- l_unlock(&ns->ns_lock);
- LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
EXIT;
}
struct ldlm_request *dlm_req,
struct ldlm_lock *lock)
{
- LIST_HEAD(ast_list);
+ CFS_LIST_HEAD(ast_list);
ENTRY;
- l_lock(&ns->ns_lock);
- LDLM_DEBUG(lock, "client completion callback handler START");
+ LDLM_DEBUG(lock, "client completion callback handler START");
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
+ int to = cfs_time_seconds(1);
+ while (to > 0) {
+ cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, to);
+ if (lock->l_granted_mode == lock->l_req_mode ||
+ lock->l_destroyed)
+ break;
+ }
+ }
+
+ lock_res_and_lock(lock);
+ if (lock->l_destroyed ||
+ lock->l_granted_mode == lock->l_req_mode) {
+ /* bug 11300: the lock has already been granted */
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "Double grant race happened");
+ LDLM_LOCK_RELEASE(lock);
+ EXIT;
+ return;
+ }
/* If we receive the completion AST before the actual enqueue returned,
* then we might need to switch lock modes, resources, or extents. */
lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
LDLM_DEBUG(lock, "completion AST, new lock mode");
}
- if (lock->l_resource->lr_type != LDLM_PLAIN)
- memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
- sizeof(lock->l_policy_data));
+
+ if (lock->l_resource->lr_type != LDLM_PLAIN) {
+ lock->l_policy_data = dlm_req->lock_desc.l_policy_data;
+ LDLM_DEBUG(lock, "completion AST, new policy data");
+ }
ldlm_resource_unlink_lock(lock);
if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
&lock->l_resource->lr_name,
sizeof(lock->l_resource->lr_name)) != 0) {
- ldlm_lock_change_resource(ns, lock,
- dlm_req->lock_desc.l_resource.lr_name);
+ unlock_res_and_lock(lock);
+ if (ldlm_lock_change_resource(ns, lock,
+ &dlm_req->lock_desc.l_resource.lr_name) != 0) {
+ LDLM_ERROR(lock, "Failed to allocate resource");
+ LDLM_LOCK_RELEASE(lock);
+ EXIT;
+ return;
+ }
LDLM_DEBUG(lock, "completion AST, new resource");
+ CERROR("change resource!\n");
+ lock_res_and_lock(lock);
}
if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
- lock->l_flags |= LDLM_FL_CBPENDING;
+ /* BL_AST locks are not needed in lru.
+ * let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST");
}
- lock->l_resource->lr_tmp = &ast_list;
- ldlm_grant_lock(lock, req, sizeof(*req), 1);
- lock->l_resource->lr_tmp = NULL;
+ if (lock->l_lvb_len) {
+ if (req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
+ RCL_CLIENT) < lock->l_lvb_len) {
+ LDLM_ERROR(lock, "completion AST did not contain "
+ "expected LVB!");
+ } else {
+ void *lvb = req_capsule_client_swab_get(&req->rq_pill,
+ &RMF_DLM_LVB,
+ (void *)lock->l_lvb_swabber);
+ memcpy(lock->l_lvb_data, lvb, lock->l_lvb_len);
+ }
+ }
+
+ ldlm_grant_lock(lock, &ast_list);
+ unlock_res_and_lock(lock);
+
LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
- l_unlock(&ns->ns_lock);
- LDLM_LOCK_PUT(lock);
- ldlm_run_ast_work(ns, &ast_list);
+ ldlm_run_ast_work(&ast_list, LDLM_WORK_CP_AST);
LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
lock);
+ LDLM_LOCK_RELEASE(lock);
+ EXIT;
+}
+
+static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
+ struct ldlm_namespace *ns,
+ struct ldlm_request *dlm_req,
+ struct ldlm_lock *lock)
+{
+ int rc = -ENOSYS;
+ ENTRY;
+
+ LDLM_DEBUG(lock, "client glimpse AST callback handler");
+
+ if (lock->l_glimpse_ast != NULL)
+ rc = lock->l_glimpse_ast(lock, req);
+
+ if (req->rq_repmsg != NULL) {
+ ptlrpc_reply(req);
+ } else {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+
+ lock_res_and_lock(lock);
+ if (lock->l_granted_mode == LCK_PW &&
+ !lock->l_readers && !lock->l_writers &&
+ cfs_time_after(cfs_time_current(),
+ cfs_time_add(lock->l_last_used,
+ cfs_time_seconds(10)))) {
+ unlock_res_and_lock(lock);
+ if (ldlm_bl_to_thread_lock(ns, NULL, lock))
+ ldlm_handle_bl_callback(ns, NULL, lock);
+
+ EXIT;
+ return;
+ }
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_RELEASE(lock);
EXIT;
}
static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
{
+ if (req->rq_no_reply)
+ return 0;
+
req->rq_status = rc;
- rc = lustre_pack_reply(req, 0, NULL, NULL);
- if (rc)
- return rc;
+ if (!req->rq_packed_final) {
+ rc = lustre_pack_reply(req, 1, NULL, NULL);
+ if (rc)
+ return rc;
+ }
return ptlrpc_reply(req);
}
#ifdef __KERNEL__
-static int ldlm_bl_to_thread(struct ldlm_state *ldlm, struct ldlm_namespace *ns,
- struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
+static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld, struct ldlm_lock *lock,
+ struct list_head *cancels, int count)
{
- struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
struct ldlm_bl_work_item *blwi;
ENTRY;
+ if (cancels && count == 0)
+ RETURN(0);
+
OBD_ALLOC(blwi, sizeof(*blwi));
if (blwi == NULL)
RETURN(-ENOMEM);
blwi->blwi_ns = ns;
- blwi->blwi_ld = *ld;
- blwi->blwi_lock = lock;
-
+ if (ld != NULL)
+ blwi->blwi_ld = *ld;
+ if (count) {
+ list_add(&blwi->blwi_head, cancels);
+ list_del_init(cancels);
+ blwi->blwi_count = count;
+ } else {
+ blwi->blwi_lock = lock;
+ }
spin_lock(&blp->blp_lock);
- list_add_tail(&blwi->blwi_entry, &blp->blp_list);
- wake_up(&blp->blp_waitq);
+ if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ /* add LDLM_FL_DISCARD_DATA requests to the priority list */
+ list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ } else {
+ /* other blocking callbacks are added to the regular list */
+ list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+ }
+ cfs_waitq_signal(&blp->blp_waitq);
spin_unlock(&blp->blp_lock);
RETURN(0);
}
#endif
+int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
+ struct ldlm_lock *lock)
+{
+#ifdef __KERNEL__
+ RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0));
+#else
+ RETURN(-ENOSYS);
+#endif
+}
+
+int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
+ struct list_head *cancels, int count)
+{
+#ifdef __KERNEL__
+ RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count));
+#else
+ RETURN(-ENOSYS);
+#endif
+}
+
+/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
struct ldlm_namespace *ns;
struct ldlm_request *dlm_req;
struct ldlm_lock *lock;
- char str[PTL_NALFMT_SIZE];
int rc;
ENTRY;
* incoming request message body, but I am responsible for the
* message buffers. */
+ /* do nothing for sec context finalize */
+ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
+ RETURN(0);
+
+ req_capsule_init(&req->rq_pill, req, RCL_SERVER);
+
if (req->rq_export == NULL) {
struct ldlm_request *dlm_req;
- CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
- "export cookie "LPX64" (ptl req %d/rep %d); this is "
+ CDEBUG(D_RPCTRACE, "operation %d from %s with bad "
+ "export cookie "LPX64"; this is "
"normal if this node rebooted with a lock held\n",
- req->rq_reqmsg->opc, req->rq_connection->c_peer.peer_nid,
- portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
- req->rq_connection->c_peer.peer_nid, str),
- req->rq_reqmsg->handle.cookie,
- req->rq_request_portal, req->rq_reply_portal);
-
- dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
+ lustre_msg_get_opc(req->rq_reqmsg),
+ libcfs_id2str(req->rq_peer),
+ lustre_msg_get_handle(req->rq_reqmsg)->cookie);
+
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
if (dlm_req != NULL)
CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
- dlm_req->lock_handle1.cookie);
+ dlm_req->lock_handle[0].cookie);
ldlm_callback_reply(req, -ENOTCONN);
RETURN(0);
}
- if (req->rq_reqmsg->opc == LDLM_BL_CALLBACK) {
- OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
- } else if (req->rq_reqmsg->opc == LDLM_CP_CALLBACK) {
- OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
- } else if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
- OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
- } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
- OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
- } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
- OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
- } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
- OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
- } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
- OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
- } else {
- ldlm_callback_reply(req, -EPROTO);
- RETURN(0);
- }
-
LASSERT(req->rq_export != NULL);
LASSERT(req->rq_export->exp_obd != NULL);
-#ifdef ENABLE_ORPHANS
- /* FIXME - how to send reply */
- if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
- int rc = llog_origin_handle_cancel(req);
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
+ case LDLM_BL_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK))
+ RETURN(0);
+ break;
+ case LDLM_CP_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK))
+ RETURN(0);
+ break;
+ case LDLM_GL_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK))
+ RETURN(0);
+ break;
+ case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
+ req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
+ RETURN(0);
+ rc = llog_origin_handle_cancel(req);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
+ RETURN(0);
ldlm_callback_reply(req, rc);
RETURN(0);
- }
-
- if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
- int rc = llog_origin_handle_create(req);
- req->rq_status = rc;
- ptlrpc_reply(req);
+ case OBD_QC_CALLBACK:
+ req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
+ RETURN(0);
+ rc = target_handle_qc_callback(req);
+ ldlm_callback_reply(req, rc);
RETURN(0);
- }
-
- if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
- int rc = llog_origin_handle_next_block(req);
- req->rq_status = rc;
- ptlrpc_reply(req);
+ case QUOTA_DQACQ:
+ case QUOTA_DQREL:
+ /* reply in handler */
+ req_capsule_set(&req->rq_pill, &RQF_MDS_QUOTA_DQACQ);
+ rc = target_handle_dqacq_callback(req);
RETURN(0);
- }
-
- if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
- int rc = llog_origin_handle_read_header(req);
- req->rq_status = rc;
- ptlrpc_reply(req);
+ case LLOG_ORIGIN_HANDLE_CREATE:
+ req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ RETURN(0);
+ rc = llog_origin_handle_create(req);
+ ldlm_callback_reply(req, rc);
RETURN(0);
- }
-
- if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
- int rc = llog_origin_handle_close(req);
+ case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
+ req_capsule_set(&req->rq_pill,
+ &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ RETURN(0);
+ rc = llog_origin_handle_next_block(req);
+ ldlm_callback_reply(req, rc);
+ RETURN(0);
+ case LLOG_ORIGIN_HANDLE_READ_HEADER:
+ req_capsule_set(&req->rq_pill,
+ &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ RETURN(0);
+ rc = llog_origin_handle_read_header(req);
+ ldlm_callback_reply(req, rc);
+ RETURN(0);
+ case LLOG_ORIGIN_HANDLE_CLOSE:
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ RETURN(0);
+ rc = llog_origin_handle_close(req);
ldlm_callback_reply(req, rc);
RETURN(0);
+ default:
+ CERROR("unknown opcode %u\n",
+ lustre_msg_get_opc(req->rq_reqmsg));
+ ldlm_callback_reply(req, -EPROTO);
+ RETURN(0);
}
-#endif
+
ns = req->rq_export->exp_obd->obd_namespace;
LASSERT(ns != NULL);
- dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
+
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
if (dlm_req == NULL) {
- CERROR ("can't unpack dlm_req\n");
- ldlm_callback_reply (req, -EPROTO);
- RETURN (0);
+ ldlm_callback_reply(req, -EPROTO);
+ RETURN(0);
+ }
+
+ /* Force a known safe race, send a cancel to the server for a lock
+ * which the server has already started a blocking callback on. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
+ lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
+ rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]);
+ if (rc < 0)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
}
- lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
+ lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
if (!lock) {
- CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
- dlm_req->lock_handle1.cookie);
+ CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
+ "disappeared\n", dlm_req->lock_handle[0].cookie);
ldlm_callback_reply(req, -EINVAL);
RETURN(0);
}
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
+ lock_res_and_lock(lock);
lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
+ if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
+ /* If somebody cancels locks and cache is already droped,
+ * we can tell the server we have no lock. Otherwise, we
+ * should send cancel after dropping the cache. */
+ if ((lock->l_flags & LDLM_FL_CANCELING) &&
+ (lock->l_flags & LDLM_FL_BL_DONE)) {
+ LDLM_DEBUG(lock, "callback on lock "
+ LPX64" - lock disappeared\n",
+ dlm_req->lock_handle[0].cookie);
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_RELEASE(lock);
+ ldlm_callback_reply(req, -EINVAL);
+ RETURN(0);
+ }
+ /* BL_AST locks are not needed in lru.
+ * let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
+ lock->l_flags |= LDLM_FL_BL_AST;
+ }
+ unlock_res_and_lock(lock);
/* We want the ost thread to get this reply so that it can respond
* to ost requests (write cache writeback) that might be triggered
* cancelling right now, because it's unused, or have an intent result
* in the reply, so we might have to push the responsibility for sending
* the reply down into the AST handlers, alas. */
- if (req->rq_reqmsg->opc != LDLM_BL_CALLBACK)
- ldlm_callback_reply(req, 0);
- switch (req->rq_reqmsg->opc) {
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
-#ifdef __KERNEL__
- rc = ldlm_bl_to_thread(ldlm, ns, &dlm_req->lock_desc, lock);
- ldlm_callback_reply(req, rc);
-#else
- rc = 0;
- ldlm_callback_reply(req, rc);
- ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
-#endif
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
+ if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK))
+ ldlm_callback_reply(req, 0);
+ if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
+ ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
break;
case LDLM_CP_CALLBACK:
CDEBUG(D_INODE, "completion ast\n");
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
+ ldlm_callback_reply(req, 0);
ldlm_handle_cp_callback(req, ns, dlm_req, lock);
break;
+ case LDLM_GL_CALLBACK:
+ CDEBUG(D_INODE, "glimpse ast\n");
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
+ ldlm_handle_gl_callback(req, ns, dlm_req, lock);
+ break;
default:
LBUG(); /* checked above */
}
* incoming request message body, but I am responsible for the
* message buffers. */
+ req_capsule_init(&req->rq_pill, req, RCL_SERVER);
+
if (req->rq_export == NULL) {
struct ldlm_request *dlm_req;
- CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
- req->rq_reqmsg->opc, req->rq_request_portal,
- req->rq_reply_portal);
- CERROR("--> export cookie: "LPX64"\n",
- req->rq_reqmsg->handle.cookie);
- dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
+
+ CERROR("operation %d from %s with bad export cookie "LPU64"\n",
+ lustre_msg_get_opc(req->rq_reqmsg),
+ libcfs_id2str(req->rq_peer),
+ lustre_msg_get_handle(req->rq_reqmsg)->cookie);
+
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
if (dlm_req != NULL)
- ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
- RETURN(-ENOTCONN);
+ ldlm_lock_dump_handle(D_ERROR,
+ &dlm_req->lock_handle[0]);
+ ldlm_callback_reply(req, -ENOTCONN);
+ RETURN(0);
}
- switch (req->rq_reqmsg->opc) {
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
/* XXX FIXME move this back to mds/handler.c, bug 249 */
case LDLM_CANCEL:
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CANCEL);
CDEBUG(D_INODE, "cancel\n");
- OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL))
+ RETURN(0);
rc = ldlm_handle_cancel(req);
if (rc)
break;
RETURN(0);
-
+ case OBD_LOG_CANCEL:
+ req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
+ RETURN(0);
+ rc = llog_origin_handle_cancel(req);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
+ RETURN(0);
+ ldlm_callback_reply(req, rc);
+ RETURN(0);
default:
- CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
- RETURN(-EINVAL);
+ CERROR("invalid opcode %d\n",
+ lustre_msg_get_opc(req->rq_reqmsg));
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
+ ldlm_callback_reply(req, -EINVAL);
}
RETURN(0);
}
+void ldlm_revoke_lock_cb(void *obj, void *data)
+{
+ struct list_head *rpc_list = data;
+ struct ldlm_lock *lock = obj;
+
+ lock_res_and_lock(lock);
+
+ if (lock->l_req_mode != lock->l_granted_mode) {
+ unlock_res_and_lock(lock);
+ return;
+ }
+
+ LASSERT(lock->l_resource);
+ if (lock->l_resource->lr_type != LDLM_IBITS &&
+ lock->l_resource->lr_type != LDLM_PLAIN) {
+ unlock_res_and_lock(lock);
+ return;
+ }
+
+ if (lock->l_flags & LDLM_FL_AST_SENT) {
+ unlock_res_and_lock(lock);
+ return;
+ }
+
+ LASSERT(lock->l_blocking_ast);
+ LASSERT(!lock->l_blocking_lock);
+
+ lock->l_flags |= LDLM_FL_AST_SENT;
+ if (lock->l_export && lock->l_export->exp_lock_hash)
+ lustre_hash_del(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle, &lock->l_exp_hash);
+ list_add_tail(&lock->l_rk_ast, rpc_list);
+ LDLM_LOCK_GET(lock);
+
+ unlock_res_and_lock(lock);
+}
+
+void ldlm_revoke_export_locks(struct obd_export *exp)
+{
+ struct list_head rpc_list;
+ ENTRY;
+
+ CFS_INIT_LIST_HEAD(&rpc_list);
+ lustre_hash_for_each_empty(exp->exp_lock_hash,
+ ldlm_revoke_lock_cb, &rpc_list);
+ ldlm_run_ast_work(&rpc_list, LDLM_WORK_REVOKE_AST);
+
+ EXIT;
+}
+
#ifdef __KERNEL__
static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_work_item *blwi = NULL;
+ static unsigned int num_bl = 0;
spin_lock(&blp->blp_lock);
- if (!list_empty(&blp->blp_list)) {
- blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
- blwi_entry);
+ /* process a request from the blp_list at least every blp_num_threads */
+ if (!list_empty(&blp->blp_list) &&
+ (list_empty(&blp->blp_prio_list) || num_bl == 0))
+ blwi = list_entry(blp->blp_list.next,
+ struct ldlm_bl_work_item, blwi_entry);
+ else
+ if (!list_empty(&blp->blp_prio_list))
+ blwi = list_entry(blp->blp_prio_list.next,
+ struct ldlm_bl_work_item, blwi_entry);
+
+ if (blwi) {
+ if (++num_bl >= atomic_read(&blp->blp_num_threads))
+ num_bl = 0;
list_del(&blwi->blwi_entry);
}
spin_unlock(&blp->blp_lock);
return blwi;
}
+/* This only contains temporary data until the thread starts */
struct ldlm_bl_thread_data {
- int bltd_num;
+ char bltd_name[CFS_CURPROC_COMM_MAX];
struct ldlm_bl_pool *bltd_blp;
+ struct completion bltd_comp;
+ int bltd_num;
};
+static int ldlm_bl_thread_main(void *arg);
+
+static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
+{
+ struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
+ int rc;
+
+ init_completion(&bltd.bltd_comp);
+ rc = cfs_kernel_thread(ldlm_bl_thread_main, &bltd, 0);
+ if (rc < 0) {
+ CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
+ atomic_read(&blp->blp_num_threads), rc);
+ return rc;
+ }
+ wait_for_completion(&bltd.bltd_comp);
+
+ return 0;
+}
+
static int ldlm_bl_thread_main(void *arg)
{
- struct ldlm_bl_thread_data *bltd = arg;
- struct ldlm_bl_pool *blp = bltd->bltd_blp;
- unsigned long flags;
+ struct ldlm_bl_pool *blp;
ENTRY;
- /* XXX boiler-plate */
{
- char name[sizeof(current->comm)];
- snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
- bltd->bltd_num);
- kportal_daemonize(name);
- }
- SIGNAL_MASK_LOCK(current, flags);
- sigfillset(¤t->blocked);
- RECALC_SIGPENDING;
- SIGNAL_MASK_UNLOCK(current, flags);
+ struct ldlm_bl_thread_data *bltd = arg;
- atomic_inc(&blp->blp_num_threads);
- complete(&blp->blp_comp);
+ blp = bltd->bltd_blp;
+
+ bltd->bltd_num = atomic_inc_return(&blp->blp_num_threads) - 1;
+ atomic_inc(&blp->blp_busy_threads);
+
+ snprintf(bltd->bltd_name, sizeof(bltd->bltd_name) - 1,
+ "ldlm_bl_%02d", bltd->bltd_num);
+ cfs_daemonize(bltd->bltd_name);
+
+ complete(&bltd->bltd_comp);
+ /* cannot use bltd after this, it is only on caller's stack */
+ }
- while(1) {
+ while (1) {
struct l_wait_info lwi = { 0 };
struct ldlm_bl_work_item *blwi = NULL;
- l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)) != NULL,
- &lwi);
+ blwi = ldlm_bl_get_work(blp);
- if (blwi->blwi_ns == NULL)
- break;
+ if (blwi == NULL) {
+ int busy;
+
+ atomic_dec(&blp->blp_busy_threads);
+ l_wait_event_exclusive(blp->blp_waitq,
+ (blwi = ldlm_bl_get_work(blp)) != NULL,
+ &lwi);
+ busy = atomic_inc_return(&blp->blp_busy_threads);
- ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
- blwi->blwi_lock);
+ if (blwi->blwi_ns == NULL)
+ /* added by ldlm_cleanup() */
+ break;
+
+ /* Not fatal if racy and have a few too many threads */
+ if (unlikely(busy < blp->blp_max_threads &&
+ busy >= atomic_read(&blp->blp_num_threads)))
+ /* discard the return value, we tried */
+ ldlm_bl_thread_start(blp);
+ } else {
+ if (blwi->blwi_ns == NULL)
+ /* added by ldlm_cleanup() */
+ break;
+ }
+
+ if (blwi->blwi_count) {
+ /* The special case when we cancel locks in lru
+ * asynchronously, we pass the list of locks here.
+ * Thus lock is marked LDLM_FL_CANCELING, and already
+ * canceled locally. */
+ ldlm_cli_cancel_list(&blwi->blwi_head,
+ blwi->blwi_count, NULL, 0);
+ } else {
+ ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
+ blwi->blwi_lock);
+ }
OBD_FREE(blwi, sizeof(*blwi));
}
+ atomic_dec(&blp->blp_busy_threads);
atomic_dec(&blp->blp_num_threads);
complete(&blp->blp_comp);
RETURN(0);
#endif
static int ldlm_setup(void);
-static int ldlm_cleanup(int force);
+static int ldlm_cleanup(void);
int ldlm_get_ref(void)
{
int rc = 0;
- down(&ldlm_ref_sem);
+ ENTRY;
+ mutex_down(&ldlm_ref_sem);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
if (rc)
ldlm_refcount--;
}
- up(&ldlm_ref_sem);
+ mutex_up(&ldlm_ref_sem);
RETURN(rc);
}
-void ldlm_put_ref(int force)
+void ldlm_put_ref(void)
{
- down(&ldlm_ref_sem);
+ ENTRY;
+ mutex_down(&ldlm_ref_sem);
if (ldlm_refcount == 1) {
- int rc = ldlm_cleanup(force);
+ int rc = ldlm_cleanup();
if (rc)
CERROR("ldlm_cleanup failed: %d\n", rc);
else
} else {
ldlm_refcount--;
}
- up(&ldlm_ref_sem);
+ mutex_up(&ldlm_ref_sem);
+
+ EXIT;
+}
+
+/*
+ * Export handle<->lock hash operations.
+ */
+static unsigned
+ldlm_export_lock_hash(lustre_hash_t *lh, void *key, unsigned mask)
+{
+ return lh_u64_hash(((struct lustre_handle *)key)->cookie, mask);
+}
+
+static void *
+ldlm_export_lock_key(struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+ ENTRY;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ RETURN(&lock->l_remote_handle);
+}
+
+static int
+ldlm_export_lock_compare(void *key, struct hlist_node *hnode)
+{
+ ENTRY;
+ RETURN(lustre_handle_equal(ldlm_export_lock_key(hnode), key));
+}
+
+static void *
+ldlm_export_lock_get(struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+ ENTRY;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ LDLM_LOCK_GET(lock);
+
+ RETURN(lock);
+}
+
+static void *
+ldlm_export_lock_put(struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+ ENTRY;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ LDLM_LOCK_RELEASE(lock);
+
+ RETURN(lock);
+}
+
+static lustre_hash_ops_t ldlm_export_lock_ops = {
+ .lh_hash = ldlm_export_lock_hash,
+ .lh_key = ldlm_export_lock_key,
+ .lh_compare = ldlm_export_lock_compare,
+ .lh_get = ldlm_export_lock_get,
+ .lh_put = ldlm_export_lock_put
+};
+
+int ldlm_init_export(struct obd_export *exp)
+{
+ ENTRY;
+
+ exp->exp_lock_hash =
+ lustre_hash_init(obd_uuid2str(&exp->exp_client_uuid),
+ 128, 65536, &ldlm_export_lock_ops, LH_REHASH);
+
+ if (!exp->exp_lock_hash)
+ RETURN(-ENOMEM);
+ RETURN(0);
+}
+EXPORT_SYMBOL(ldlm_init_export);
+
+void ldlm_destroy_export(struct obd_export *exp)
+{
+ ENTRY;
+ lustre_hash_exit(exp->exp_lock_hash);
+ exp->exp_lock_hash = NULL;
EXIT;
}
+EXPORT_SYMBOL(ldlm_destroy_export);
static int ldlm_setup(void)
{
struct ldlm_bl_pool *blp;
int rc = 0;
+ int ldlm_min_threads = LDLM_THREADS_AUTO_MIN;
+ int ldlm_max_threads = LDLM_THREADS_AUTO_MAX;
#ifdef __KERNEL__
int i;
#endif
ENTRY;
- if (ldlm != NULL)
+ if (ldlm_state != NULL)
RETURN(-EALREADY);
- OBD_ALLOC(ldlm, sizeof(*ldlm));
- if (ldlm == NULL)
+ OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
+ if (ldlm_state == NULL)
RETURN(-ENOMEM);
-#ifdef __KERNEL__
+#ifdef LPROCFS
rc = ldlm_proc_setup();
if (rc != 0)
GOTO(out_free, rc);
#endif
- ldlm->ldlm_cb_service =
- ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
- LDLM_MAXREQSIZE, LDLM_CB_REQUEST_PORTAL,
- LDLM_CB_REPLY_PORTAL,
- ldlm_callback_handler, "ldlm_cbd",
- ldlm_svc_proc_dir);
+#ifdef __KERNEL__
+ if (ldlm_num_threads) {
+ /* If ldlm_num_threads is set, it is the min and the max. */
+ if (ldlm_num_threads > LDLM_THREADS_AUTO_MAX)
+ ldlm_num_threads = LDLM_THREADS_AUTO_MAX;
+ if (ldlm_num_threads < LDLM_THREADS_AUTO_MIN)
+ ldlm_num_threads = LDLM_THREADS_AUTO_MIN;
+ ldlm_min_threads = ldlm_max_threads = ldlm_num_threads;
+ }
+#endif
- if (!ldlm->ldlm_cb_service) {
+ ldlm_state->ldlm_cb_service =
+ ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
+ LDLM_MAXREPSIZE, LDLM_CB_REQUEST_PORTAL,
+ LDLM_CB_REPLY_PORTAL, 1800,
+ ldlm_callback_handler, "ldlm_cbd",
+ ldlm_svc_proc_dir, NULL,
+ ldlm_min_threads, ldlm_max_threads,
+ "ldlm_cb",
+ LCT_MD_THREAD|LCT_DT_THREAD);
+
+ if (!ldlm_state->ldlm_cb_service) {
CERROR("failed to start service\n");
GOTO(out_proc, rc = -ENOMEM);
}
- ldlm->ldlm_cancel_service =
- ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
- LDLM_MAXREQSIZE, LDLM_CANCEL_REQUEST_PORTAL,
- LDLM_CANCEL_REPLY_PORTAL,
- ldlm_cancel_handler, "ldlm_canceld",
- ldlm_svc_proc_dir);
-
- if (!ldlm->ldlm_cancel_service) {
+ ldlm_state->ldlm_cancel_service =
+ ptlrpc_init_svc(LDLM_NBUFS, LDLM_BUFSIZE, LDLM_MAXREQSIZE,
+ LDLM_MAXREPSIZE, LDLM_CANCEL_REQUEST_PORTAL,
+ LDLM_CANCEL_REPLY_PORTAL, 6000,
+ ldlm_cancel_handler, "ldlm_canceld",
+ ldlm_svc_proc_dir, NULL,
+ ldlm_min_threads, ldlm_max_threads,
+ "ldlm_cn",
+ LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD);
+
+ if (!ldlm_state->ldlm_cancel_service) {
CERROR("failed to start service\n");
GOTO(out_proc, rc = -ENOMEM);
}
OBD_ALLOC(blp, sizeof(*blp));
if (blp == NULL)
GOTO(out_proc, rc = -ENOMEM);
- ldlm->ldlm_bl_pool = blp;
+ ldlm_state->ldlm_bl_pool = blp;
- atomic_set(&blp->blp_num_threads, 0);
- init_waitqueue_head(&blp->blp_waitq);
spin_lock_init(&blp->blp_lock);
-
- INIT_LIST_HEAD(&blp->blp_list);
+ CFS_INIT_LIST_HEAD(&blp->blp_list);
+ CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
+ cfs_waitq_init(&blp->blp_waitq);
+ atomic_set(&blp->blp_num_threads, 0);
+ atomic_set(&blp->blp_busy_threads, 0);
+ blp->blp_min_threads = ldlm_min_threads;
+ blp->blp_max_threads = ldlm_max_threads;
#ifdef __KERNEL__
- for (i = 0; i < LDLM_NUM_THREADS; i++) {
- struct ldlm_bl_thread_data bltd = {
- .bltd_num = i,
- .bltd_blp = blp,
- };
- init_completion(&blp->blp_comp);
- if (kernel_thread(ldlm_bl_thread_main, &bltd, 0) < 0) {
- CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
- LBUG();
+ for (i = 0; i < blp->blp_min_threads; i++) {
+ rc = ldlm_bl_thread_start(blp);
+ if (rc < 0)
GOTO(out_thread, rc);
- }
- wait_for_completion(&blp->blp_comp);
}
- rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cancel_service,
- LDLM_NUM_THREADS, "ldlm_cn");
- if (rc) {
- LBUG();
+ rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cancel_service);
+ if (rc)
GOTO(out_thread, rc);
- }
- rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cb_service,
- LDLM_NUM_THREADS, "ldlm_cb");
- if (rc) {
- LBUG();
+ rc = ptlrpc_start_threads(NULL, ldlm_state->ldlm_cb_service);
+ if (rc)
GOTO(out_thread, rc);
- }
- INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
- spin_lock_init(&expired_lock_thread.elt_lock);
+ CFS_INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
expired_lock_thread.elt_state = ELT_STOPPED;
- init_waitqueue_head(&expired_lock_thread.elt_waitq);
+ cfs_waitq_init(&expired_lock_thread.elt_waitq);
- rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
+ CFS_INIT_LIST_HEAD(&waiting_locks_list);
+ spin_lock_init(&waiting_locks_spinlock);
+ cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
+
+ rc = cfs_kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FILES);
if (rc < 0) {
CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
GOTO(out_thread, rc);
wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_READY);
-
- INIT_LIST_HEAD(&waiting_locks_list);
- spin_lock_init(&waiting_locks_spinlock);
- waiting_locks_timer.function = waiting_locks_callback;
- waiting_locks_timer.data = 0;
- init_timer(&waiting_locks_timer);
#endif
+#ifdef __KERNEL__
+ rc = ldlm_pools_init();
+ if (rc)
+ GOTO(out_thread, rc);
+#endif
RETURN(0);
#ifdef __KERNEL__
out_thread:
- ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
- ptlrpc_unregister_service(ldlm->ldlm_cb_service);
+ ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
+ ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
#endif
out_proc:
-#ifdef __KERNEL__
+#ifdef LPROCFS
ldlm_proc_cleanup();
out_free:
#endif
- OBD_FREE(ldlm, sizeof(*ldlm));
- ldlm = NULL;
+ OBD_FREE(ldlm_state, sizeof(*ldlm_state));
+ ldlm_state = NULL;
return rc;
}
-static int ldlm_cleanup(int force)
+static int ldlm_cleanup(void)
{
#ifdef __KERNEL__
- struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
#endif
ENTRY;
- if (!list_empty(&ldlm_namespace_list)) {
+ if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+ !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
- ldlm_dump_all_namespaces();
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
RETURN(-EBUSY);
}
#ifdef __KERNEL__
+ ldlm_pools_fini();
+#endif
+
+#ifdef __KERNEL__
while (atomic_read(&blp->blp_num_threads) > 0) {
struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
spin_lock(&blp->blp_lock);
list_add_tail(&blwi.blwi_entry, &blp->blp_list);
- wake_up(&blp->blp_waitq);
+ cfs_waitq_signal(&blp->blp_waitq);
spin_unlock(&blp->blp_lock);
wait_for_completion(&blp->blp_comp);
}
OBD_FREE(blp, sizeof(*blp));
- ptlrpc_stop_all_threads(ldlm->ldlm_cb_service);
- ptlrpc_unregister_service(ldlm->ldlm_cb_service);
- ptlrpc_stop_all_threads(ldlm->ldlm_cancel_service);
- ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
+ ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
+ ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
ldlm_proc_cleanup();
expired_lock_thread.elt_state = ELT_TERMINATE;
- wake_up(&expired_lock_thread.elt_waitq);
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
wait_event(expired_lock_thread.elt_waitq,
expired_lock_thread.elt_state == ELT_STOPPED);
-
+#else
+ ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
+ ptlrpc_unregister_service(ldlm_state->ldlm_cancel_service);
#endif
- OBD_FREE(ldlm, sizeof(*ldlm));
- ldlm = NULL;
+ OBD_FREE(ldlm_state, sizeof(*ldlm_state));
+ ldlm_state = NULL;
RETURN(0);
}
int __init ldlm_init(void)
{
- ldlm_resource_slab = kmem_cache_create("ldlm_resources",
+ init_mutex(&ldlm_ref_sem);
+ init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN);
if (ldlm_resource_slab == NULL)
return -ENOMEM;
- ldlm_lock_slab = kmem_cache_create("ldlm_locks",
+ ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN);
if (ldlm_lock_slab == NULL) {
- kmem_cache_destroy(ldlm_resource_slab);
+ cfs_mem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
- l_lock_init(&ldlm_handle_lock);
+ ldlm_interval_slab = cfs_mem_cache_create("interval_node",
+ sizeof(struct ldlm_interval),
+ 0, SLAB_HWCACHE_ALIGN);
+ if (ldlm_interval_slab == NULL) {
+ cfs_mem_cache_destroy(ldlm_resource_slab);
+ cfs_mem_cache_destroy(ldlm_lock_slab);
+ return -ENOMEM;
+ }
return 0;
}
void __exit ldlm_exit(void)
{
- if ( ldlm_refcount )
+ int rc;
+ if (ldlm_refcount)
CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
- if (kmem_cache_destroy(ldlm_resource_slab) != 0)
- CERROR("couldn't free ldlm resource slab\n");
- if (kmem_cache_destroy(ldlm_lock_slab) != 0)
- CERROR("couldn't free ldlm lock slab\n");
+ rc = cfs_mem_cache_destroy(ldlm_resource_slab);
+ LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
+ rc = cfs_mem_cache_destroy(ldlm_lock_slab);
+ LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
+ rc = cfs_mem_cache_destroy(ldlm_interval_slab);
+ LASSERTF(rc == 0, "couldn't free interval node slab\n");
}
-/* ldlm_flock.c */
-EXPORT_SYMBOL(ldlm_flock_completion_ast);
+/* ldlm_extent.c */
+EXPORT_SYMBOL(ldlm_extent_shift_kms);
/* ldlm_lock.c */
+EXPORT_SYMBOL(ldlm_get_processing_policy);
EXPORT_SYMBOL(ldlm_lock2desc);
EXPORT_SYMBOL(ldlm_register_intent);
-EXPORT_SYMBOL(ldlm_unregister_intent);
EXPORT_SYMBOL(ldlm_lockname);
EXPORT_SYMBOL(ldlm_typename);
EXPORT_SYMBOL(ldlm_lock2handle);
EXPORT_SYMBOL(__ldlm_handle2lock);
+EXPORT_SYMBOL(ldlm_lock_get);
EXPORT_SYMBOL(ldlm_lock_put);
+EXPORT_SYMBOL(ldlm_lock_fast_match);
+EXPORT_SYMBOL(ldlm_lock_fast_release);
EXPORT_SYMBOL(ldlm_lock_match);
EXPORT_SYMBOL(ldlm_lock_cancel);
EXPORT_SYMBOL(ldlm_lock_addref);
+EXPORT_SYMBOL(ldlm_lock_addref_try);
EXPORT_SYMBOL(ldlm_lock_decref);
EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
EXPORT_SYMBOL(ldlm_lock_change_resource);
EXPORT_SYMBOL(ldlm_lock_dump_handle);
EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
EXPORT_SYMBOL(ldlm_reprocess_all_ns);
+EXPORT_SYMBOL(ldlm_lock_allow_match);
/* ldlm_request.c */
+EXPORT_SYMBOL(ldlm_completion_ast_async);
EXPORT_SYMBOL(ldlm_completion_ast);
+EXPORT_SYMBOL(ldlm_blocking_ast);
+EXPORT_SYMBOL(ldlm_glimpse_ast);
EXPORT_SYMBOL(ldlm_expired_completion_wait);
+EXPORT_SYMBOL(ldlm_prep_enqueue_req);
+EXPORT_SYMBOL(ldlm_prep_elc_req);
EXPORT_SYMBOL(ldlm_cli_convert);
EXPORT_SYMBOL(ldlm_cli_enqueue);
+EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
+EXPORT_SYMBOL(ldlm_cli_enqueue_local);
EXPORT_SYMBOL(ldlm_cli_cancel);
EXPORT_SYMBOL(ldlm_cli_cancel_unused);
+EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
+EXPORT_SYMBOL(ldlm_cli_cancel_req);
EXPORT_SYMBOL(ldlm_replay_locks);
EXPORT_SYMBOL(ldlm_resource_foreach);
EXPORT_SYMBOL(ldlm_namespace_foreach);
EXPORT_SYMBOL(ldlm_namespace_foreach_res);
-EXPORT_SYMBOL(ldlm_change_cbdata);
+EXPORT_SYMBOL(ldlm_resource_iterate);
+EXPORT_SYMBOL(ldlm_cancel_resource_local);
+EXPORT_SYMBOL(ldlm_cli_cancel_list);
/* ldlm_lockd.c */
EXPORT_SYMBOL(ldlm_server_blocking_ast);
EXPORT_SYMBOL(ldlm_server_completion_ast);
+EXPORT_SYMBOL(ldlm_server_glimpse_ast);
EXPORT_SYMBOL(ldlm_handle_enqueue);
+EXPORT_SYMBOL(ldlm_handle_enqueue0);
EXPORT_SYMBOL(ldlm_handle_cancel);
+EXPORT_SYMBOL(ldlm_request_cancel);
EXPORT_SYMBOL(ldlm_handle_convert);
+EXPORT_SYMBOL(ldlm_handle_convert0);
EXPORT_SYMBOL(ldlm_del_waiting_lock);
EXPORT_SYMBOL(ldlm_get_ref);
EXPORT_SYMBOL(ldlm_put_ref);
-
-#if 0
-/* ldlm_test.c */
-EXPORT_SYMBOL(ldlm_test);
-EXPORT_SYMBOL(ldlm_regression_start);
-EXPORT_SYMBOL(ldlm_regression_stop);
-#endif
+EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
+EXPORT_SYMBOL(ldlm_revoke_export_locks);
/* ldlm_resource.c */
EXPORT_SYMBOL(ldlm_namespace_new);
EXPORT_SYMBOL(ldlm_namespace_cleanup);
EXPORT_SYMBOL(ldlm_namespace_free);
EXPORT_SYMBOL(ldlm_namespace_dump);
-
-/* l_lock.c */
-EXPORT_SYMBOL(l_lock);
-EXPORT_SYMBOL(l_unlock);
+EXPORT_SYMBOL(ldlm_dump_all_namespaces);
+EXPORT_SYMBOL(ldlm_resource_get);
+EXPORT_SYMBOL(ldlm_resource_putref);
+EXPORT_SYMBOL(ldlm_resource_unlink_lock);
/* ldlm_lib.c */
+EXPORT_SYMBOL(client_import_add_conn);
+EXPORT_SYMBOL(client_import_del_conn);
EXPORT_SYMBOL(client_obd_setup);
EXPORT_SYMBOL(client_obd_cleanup);
EXPORT_SYMBOL(client_connect_import);
EXPORT_SYMBOL(client_disconnect_export);
-EXPORT_SYMBOL(target_abort_recovery);
+EXPORT_SYMBOL(target_start_recovery_thread);
+EXPORT_SYMBOL(target_stop_recovery_thread);
EXPORT_SYMBOL(target_handle_connect);
+EXPORT_SYMBOL(target_cleanup_recovery);
EXPORT_SYMBOL(target_destroy_export);
EXPORT_SYMBOL(target_cancel_recovery_timer);
EXPORT_SYMBOL(target_send_reply);
EXPORT_SYMBOL(target_queue_recovery_request);
EXPORT_SYMBOL(target_handle_ping);
+EXPORT_SYMBOL(target_pack_pool_reply);
EXPORT_SYMBOL(target_handle_disconnect);
-EXPORT_SYMBOL(target_queue_final_reply);
-EXPORT_SYMBOL(ldlm_put_lock_into_req);
+
+/* l_lock.c */
+EXPORT_SYMBOL(lock_res_and_lock);
+EXPORT_SYMBOL(unlock_res_and_lock);