-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company LP.
+ * Developed under the sponsorship of the US Government under
+ * Subcontract No. B514193
+ *
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
*/
-
/*
- * 2003 - 2005 Copyright, Hewlett-Packard Development Compnay, LP.
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/**
+ * This file implements POSIX lock type for Lustre.
+ * Its policy properties are start and end of extent and PID.
+ *
+ * These locks are only done through MDS due to POSIX semantics requiring
+ * e.g. that locks could be only partially released and as such split into
+ * two parts, and also that two adjacent locks from the same process may be
+ * merged into a single wider lock.
+ *
+ * Lock modes are mapped like this:
+ * PR and PW for READ and WRITE locks
+ * NL to request a releasing of a portion of the lock
*
- * Developed under the sponsorship of the U.S. Government
- * under Subcontract No. B514193
+ * These flock locks never timeout.
*/
#define DEBUG_SUBSYSTEM S_LDLM
#ifdef __KERNEL__
-#include <linux/lustre_dlm.h>
-#include <linux/obd_support.h>
-#include <linux/obd_class.h>
-#include <linux/lustre_lib.h>
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
#include <libcfs/list.h>
#else
#include <liblustre.h>
+#include <obd_class.h>
#endif
#include "ldlm_internal.h"
-static struct list_head ldlm_flock_waitq = LIST_HEAD_INIT(ldlm_flock_waitq);
-static int ldlm_deadlock_timeout = 30 * HZ;
+int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag);
/**
* list_for_remaining_safe - iterate over the remaining entries in a list
* and safeguard against removal of a list entry.
- * @pos: the &struct list_head to use as a loop counter. pos MUST
+ * \param pos the &struct list_head to use as a loop counter. pos MUST
* have been initialized prior to using it in this macro.
- * @n: another &struct list_head to use as temporary storage
- * @head: the head for your list.
+ * \param n another &struct list_head to use as temporary storage
+ * \param head the head for your list.
*/
#define list_for_remaining_safe(pos, n, head) \
for (n = pos->next; pos != (head); pos = n, n = pos->next)
static inline int
ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
{
- return((new->l_policy_data.l_flock.pid ==
- lock->l_policy_data.l_flock.pid) &&
- (new->l_policy_data.l_flock.nid ==
- lock->l_policy_data.l_flock.nid));
+ return((new->l_policy_data.l_flock.owner ==
+ lock->l_policy_data.l_flock.owner) &&
+ (new->l_export == lock->l_export));
}
static inline int
lock->l_policy_data.l_flock.start));
}
+static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
+ struct ldlm_lock *lock)
+{
+ /* For server only */
+ if (req->l_export == NULL)
+ return;
+
+ LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+
+ req->l_policy_data.l_flock.blocking_owner =
+ lock->l_policy_data.l_flock.owner;
+ req->l_policy_data.l_flock.blocking_export =
+ lock->l_export;
+ req->l_policy_data.l_flock.blocking_refs = 0;
+
+ cfs_hash_add(req->l_export->exp_flock_hash,
+ &req->l_policy_data.l_flock.owner,
+ &req->l_exp_flock_hash);
+}
+
+static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
+{
+ /* For server only */
+ if (req->l_export == NULL)
+ return;
+
+ check_res_locked(req->l_resource);
+ if (req->l_export->exp_flock_hash != NULL &&
+ !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+ cfs_hash_del(req->l_export->exp_flock_hash,
+ &req->l_policy_data.l_flock.owner,
+ &req->l_exp_flock_hash);
+}
+
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
{
ENTRY;
- LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
- mode, flags);
+ LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
+ mode, flags);
- /* don't need to take the locks here because the lock
- * is on a local destroy list, not the resource list. */
- list_del_init(&lock->l_res_link);
+ /* Safe to not lock here, since it should be empty anyway */
+ LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- /* client side - set flags to prevent sending a CANCEL */
+ cfs_list_del_init(&lock->l_res_link);
+ if (flags == LDLM_FL_WAIT_NOREPROC &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
- ldlm_lock_decref_internal(lock, mode);
+
+ /* when reaching here, it is under lock_res_and_lock(). Thus,
+ need call the nolock version of ldlm_lock_decref_internal*/
+ ldlm_lock_decref_internal_nolock(lock, mode);
}
- ldlm_lock_destroy(lock);
+ ldlm_lock_destroy_nolock(lock);
EXIT;
}
+/**
+ * POSIX locks deadlock detection code.
+ *
+ * Given a new lock \a req and an existing lock \a bl_lock it conflicts
+ * with, we need to iterate through all blocked POSIX locks for this
+ * export and see if there is a deadlock condition arising. (i.e. when
+ * one client holds a lock on something and want a lock on something
+ * else and at the same time another client has the opposite situation).
+ */
+static int
+ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
+{
+ struct obd_export *req_exp = req->l_export;
+ struct obd_export *bl_exp = bl_lock->l_export;
+ __u64 req_owner = req->l_policy_data.l_flock.owner;
+ __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
+
+ /* For server only */
+ if (req_exp == NULL)
+ return 0;
+
+ class_export_get(bl_exp);
+ while (1) {
+ struct obd_export *bl_exp_new;
+ struct ldlm_lock *lock = NULL;
+ struct ldlm_flock *flock;
+
+ if (bl_exp->exp_flock_hash != NULL)
+ lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
+ &bl_owner);
+ if (lock == NULL)
+ break;
+
+ LASSERT(req != lock);
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->owner == bl_owner);
+ bl_owner = flock->blocking_owner;
+ bl_exp_new = class_export_get(flock->blocking_export);
+ class_export_put(bl_exp);
+
+ cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
+ bl_exp = bl_exp_new;
+
+ if (bl_owner == req_owner && bl_exp == req_exp) {
+ class_export_put(bl_exp);
+ return 1;
+ }
+ }
+ class_export_put(bl_exp);
+
+ return 0;
+}
+
+/**
+ * Process a granting attempt for flock lock.
+ * Must be called under ns lock held.
+ *
+ * This function looks for any conflicts for \a lock in the granted or
+ * waiting queues. The lock is granted if no conflicts are found in
+ * either queue.
+ *
+ * It is also responsible for splitting a lock if a portion of the lock
+ * is released.
+ *
+ * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
+ * - blocking ASTs have already been sent
+ *
+ * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
+ * - blocking ASTs have not been sent yet, so list of conflicting locks
+ * would be collected and ASTs sent.
+ */
int
-ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err, struct list_head *work_list)
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
+ ldlm_error_t *err, cfs_list_t *work_list)
{
- struct list_head destroy_list = LIST_HEAD_INIT(destroy_list);
struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = res->lr_namespace;
- struct list_head *pos;
- struct list_head *tmp = NULL;
- struct ldlm_lock *lock;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+ cfs_list_t *tmp;
+ cfs_list_t *ownlocks = NULL;
+ struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
- struct ldlm_lock *new2;
+ struct ldlm_lock *new2 = NULL;
ldlm_mode_t mode = req->l_req_mode;
+ int local = ns_is_client(ns);
int added = (mode == LCK_NL);
int overlaps = 0;
- int rc = LDLM_ITER_CONTINUE;
- int i = 0;
+ int splitted = 0;
+ const struct ldlm_callback_suite null_cbs = { NULL };
ENTRY;
- CDEBUG(D_DLMTRACE, "flags %#x mode %u pid "LPU64" nid "LPU64" "
- "start "LPU64" end "LPU64"\n", *flags, mode,
- req->l_policy_data.l_flock.pid,
- req->l_policy_data.l_flock.nid,
- req->l_policy_data.l_flock.start,
+ CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
+ LPU64" end "LPU64"\n", *flags,
+ new->l_policy_data.l_flock.owner,
+ new->l_policy_data.l_flock.pid, mode,
+ req->l_policy_data.l_flock.start,
req->l_policy_data.l_flock.end);
*err = ELDLM_OK;
- /* No blocking ASTs are sent for Posix file & record locks */
- req->l_blocking_ast = NULL;
+ if (local) {
+ /* No blocking ASTs are sent to the clients for
+ * Posix file & record locks */
+ req->l_blocking_ast = NULL;
+ } else {
+ /* Called on the server for lock cancels. */
+ req->l_blocking_ast = ldlm_flock_blocking_ast;
+ }
+reprocess:
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
- list_for_each(pos, &res->lr_granted) {
- lock = list_entry(pos, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
- tmp = pos;
+ ownlocks = tmp;
break;
}
}
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
- list_for_each(pos, &res->lr_granted) {
- lock = list_entry(pos, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
- if (!tmp)
- tmp = pos;
+ if (!ownlocks)
+ ownlocks = tmp;
continue;
}
if (!ldlm_flocks_overlap(lock, req))
continue;
- /* deadlock detection will be done will be postponed
- * until ldlm_flock_completion_ast(). */
-
- *flags |= LDLM_FL_LOCK_CHANGED;
-
- req->l_policy_data.l_flock.blocking_pid =
- lock->l_policy_data.l_flock.pid;
- req->l_policy_data.l_flock.blocking_nid =
- lock->l_policy_data.l_flock.nid;
-
if (!first_enq)
RETURN(LDLM_ITER_CONTINUE);
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- list_move(&req->l_res_link, &destroy_list);
+ ldlm_flock_destroy(req, mode, *flags);
*err = -EAGAIN;
- GOTO(out, rc = LDLM_ITER_STOP);
+ RETURN(LDLM_ITER_STOP);
}
if (*flags & LDLM_FL_TEST_LOCK) {
+ ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = lock->l_granted_mode;
req->l_policy_data.l_flock.pid =
lock->l_policy_data.l_flock.pid;
- req->l_policy_data.l_flock.nid =
- lock->l_policy_data.l_flock.nid;
req->l_policy_data.l_flock.start =
lock->l_policy_data.l_flock.start;
req->l_policy_data.l_flock.end =
lock->l_policy_data.l_flock.end;
- list_move(&req->l_res_link, &destroy_list);
- GOTO(out, rc = LDLM_ITER_STOP);
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ RETURN(LDLM_ITER_STOP);
}
+ /* add lock to blocking list before deadlock
+ * check to prevent race */
+ ldlm_flock_blocking_link(req, lock);
+
+ if (ldlm_flock_deadlock(req, lock)) {
+ ldlm_flock_blocking_unlink(req);
+ ldlm_flock_destroy(req, mode, *flags);
+ *err = -EDEADLK;
+ RETURN(LDLM_ITER_STOP);
+ }
+
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
RETURN(LDLM_ITER_STOP);
}
if (*flags & LDLM_FL_TEST_LOCK) {
+ ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = LCK_NL;
*flags |= LDLM_FL_LOCK_CHANGED;
- list_move(&req->l_res_link, &destroy_list);
- GOTO(out, rc = LDLM_ITER_STOP);
+ RETURN(LDLM_ITER_STOP);
}
+ /* In case we had slept on this lock request take it off of the
+ * deadlock detection hash list. */
+ ldlm_flock_blocking_unlink(req);
+
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
- pos = (tmp != NULL) ? tmp : &res->lr_granted;
- list_for_remaining_safe(pos, tmp, &res->lr_granted) {
- lock = list_entry(pos, struct ldlm_lock, l_res_link);
+ if (!ownlocks)
+ ownlocks = &res->lr_granted;
+
+ list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
+ lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
* overflow and underflow. */
if ((new->l_policy_data.l_flock.start >
(lock->l_policy_data.l_flock.end + 1))
- && (lock->l_policy_data.l_flock.end != ~0))
+ && (lock->l_policy_data.l_flock.end !=
+ OBD_OBJECT_EOF))
continue;
if ((new->l_policy_data.l_flock.end <
}
if (added) {
- list_move(&lock->l_res_link, &destroy_list);
+ ldlm_flock_destroy(lock, mode, *flags);
} else {
new = lock;
added = 1;
new->l_policy_data.l_flock.end + 1;
break;
}
- list_move(&lock->l_res_link, &destroy_list);
+ ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
continue;
}
if (new->l_policy_data.l_flock.end >=
* it must see the original lock data in the reply. */
/* XXX - if ldlm_lock_new() can sleep we should
- * release the ns_lock, allocate the new lock,
+ * release the lr_lock, allocate the new lock,
* and restart processing this lock. */
- new2 = ldlm_lock_create(ns, NULL, res->lr_name, LDLM_FLOCK,
- lock->l_granted_mode, NULL, NULL, NULL,
- NULL, 0);
if (!new2) {
- list_move(&req->l_res_link, &destroy_list);
- *err = -ENOLCK;
- GOTO(out, rc = LDLM_ITER_STOP);
+ unlock_res_and_lock(req);
+ new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
+ lock->l_granted_mode, &null_cbs,
+ NULL, 0, LVB_T_NONE);
+ lock_res_and_lock(req);
+ if (!new2) {
+ ldlm_flock_destroy(req, lock->l_granted_mode,
+ *flags);
+ *err = -ENOLCK;
+ RETURN(LDLM_ITER_STOP);
+ }
+ goto reprocess;
}
+ splitted = 1;
+
new2->l_granted_mode = lock->l_granted_mode;
new2->l_policy_data.l_flock.pid =
new->l_policy_data.l_flock.pid;
- new2->l_policy_data.l_flock.nid =
- new->l_policy_data.l_flock.nid;
+ new2->l_policy_data.l_flock.owner =
+ new->l_policy_data.l_flock.owner;
new2->l_policy_data.l_flock.start =
lock->l_policy_data.l_flock.start;
new2->l_policy_data.l_flock.end =
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export != NULL) {
- new2->l_export = class_export_get(lock->l_export);
- list_add(&new2->l_export_chain,
- &new2->l_export->exp_ldlm_data.led_held_locks);
+ new2->l_export = class_export_lock_get(lock->l_export, new2);
+ if (new2->l_export->exp_lock_hash &&
+ cfs_hlist_unhashed(&new2->l_exp_hash))
+ cfs_hash_add(new2->l_export->exp_lock_hash,
+ &new2->l_remote_handle,
+ &new2->l_exp_hash);
}
if (*flags == LDLM_FL_WAIT_NOREPROC)
ldlm_lock_addref_internal_nolock(new2,
lock->l_granted_mode);
/* insert new2 at lock */
- ldlm_resource_add_lock(res, pos, new2);
- LDLM_LOCK_PUT(new2);
+ ldlm_resource_add_lock(res, ownlocks, new2);
+ LDLM_LOCK_RELEASE(new2);
break;
}
+ /* if new2 is created but never used, destroy it*/
+ if (splitted == 0 && new2 != NULL)
+ ldlm_lock_destroy_nolock(new2);
+
/* At this point we're granting the lock request. */
req->l_granted_mode = req->l_req_mode;
- if (added) {
- list_move(&req->l_res_link, &destroy_list);
- } else {
- /* Add req to the granted queue before calling
- * ldlm_reprocess_all() below. */
- list_del_init(&req->l_res_link);
- /* insert new lock before pos in the list. */
- ldlm_resource_add_lock(res, pos, req);
+ /* Add req to the granted queue before calling ldlm_reprocess_all(). */
+ if (!added) {
+ cfs_list_del_init(&req->l_res_link);
+ /* insert new lock before ownlocks in list. */
+ ldlm_resource_add_lock(res, ownlocks, req);
}
if (*flags != LDLM_FL_WAIT_NOREPROC) {
+#ifdef HAVE_SERVER_SUPPORT
if (first_enq) {
/* If this is an unlock, reprocess the waitq and
- * send completions ASTs for locks that can now be
+ * send completions ASTs for locks that can now be
* granted. The only problem with doing this
* reprocessing here is that the completion ASTs for
* newly granted locks will be sent before the unlock
* but only once because first_enq will be false from
* ldlm_reprocess_queue. */
if ((mode == LCK_NL) && overlaps) {
- struct list_head rpc_list =
- LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
int rc;
- restart:
+restart:
ldlm_reprocess_queue(res, &res->lr_waiting,
&rpc_list);
- unlock_res(res);
- rc = ldlm_run_cp_ast_work(&rpc_list);
- lock_res(res);
+
+ unlock_res_and_lock(req);
+ rc = ldlm_run_ast_work(ns, &rpc_list,
+ LDLM_WORK_CP_AST);
+ lock_res_and_lock(req);
if (rc == -ERESTART)
GOTO(restart, -ERESTART);
}
LASSERT(req->l_completion_ast);
ldlm_add_ast_work_item(req, NULL, work_list);
}
+#else /* !HAVE_SERVER_SUPPORT */
+ /* The only one possible case for client-side calls flock
+ * policy function is ldlm_flock_completion_ast inside which
+ * carries LDLM_FL_WAIT_NOREPROC flag. */
+ CERROR("Illegal parameter for client-side-only module.\n");
+ LBUG();
+#endif /* HAVE_SERVER_SUPPORT */
}
- out:
- if (!list_empty(&destroy_list)) {
- /* FIXME: major hack. when called from ldlm_lock_enqueue()
- * the res and the lock are locked. When called from
- * ldlm_reprocess_queue() the res is locked but the lock
- * is not. */
- if (added && first_enq && res->lr_namespace->ns_client)
- unlock_bitlock(req);
-
- unlock_res(res);
-
- CDEBUG(D_DLMTRACE, "Destroy locks:\n");
-
- list_for_each_safe(pos, tmp, &destroy_list) {
- lock = list_entry(pos, struct ldlm_lock, l_res_link);
- ldlm_lock_dump(D_DLMTRACE, lock, ++i);
- ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
- }
+ /* In case we're reprocessing the requested lock we can't destroy
+ * it until after calling ldlm_add_ast_work_item() above so that laawi()
+ * can bump the reference count on \a req. Otherwise \a req
+ * could be freed before the completion AST can be sent. */
+ if (added)
+ ldlm_flock_destroy(req, mode, *flags);
- if (added && first_enq && res->lr_namespace->ns_client)
- lock_bitlock(req);
-
- lock_res(res);
- }
-
- RETURN(rc);
+ ldlm_resource_dump(D_INFO, res);
+ RETURN(LDLM_ITER_CONTINUE);
}
-struct ldlm_sleep_flock {
- __u64 lsf_pid;
- __u64 lsf_nid;
- __u64 lsf_blocking_pid;
- __u64 lsf_blocking_nid;
- struct list_head lsf_list;
+struct ldlm_flock_wait_data {
+ struct ldlm_lock *fwd_lock;
+ int fwd_generation;
};
-int
-ldlm_handle_flock_deadlock_check(struct ptlrpc_request *req)
+static void
+ldlm_flock_interrupted_wait(void *data)
{
- struct ldlm_request *dlm_req;
- struct ldlm_sleep_flock *lsf;
- struct list_head *pos;
- __u64 pid, nid, blocking_pid, blocking_nid;
- unsigned int flags;
- int rc = 0;
+ struct ldlm_lock *lock;
ENTRY;
- req->rq_status = 0;
-
- dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
- lustre_swab_ldlm_request);
- if (dlm_req == NULL) {
- CERROR("bad request buffer for flock deadlock check\n");
- RETURN(-EFAULT);
- }
-
- flags = dlm_req->lock_flags;
- pid = dlm_req->lock_desc.l_policy_data.l_flock.pid;
- nid = dlm_req->lock_desc.l_policy_data.l_flock.nid;
- blocking_pid = dlm_req->lock_desc.l_policy_data.l_flock.blocking_pid;
- blocking_nid = dlm_req->lock_desc.l_policy_data.l_flock.blocking_nid;
-
- CDEBUG(D_DLMTRACE, "flags: 0x%x req: pid: "LPU64" nid "LPU64" "
- "blk: pid: "LPU64" nid: "LPU64"\n",
- dlm_req->lock_flags, pid, nid, blocking_pid, blocking_nid);
-
- if (flags & LDLM_FL_GET_BLOCKING) {
- struct ldlm_lock *lock;
- struct ldlm_reply *dlm_rep;
- int size = sizeof(*dlm_rep);
-
- lock = ldlm_handle2lock(&dlm_req->lock_handle1);
- if (!lock) {
- CERROR("received deadlock check for unknown lock "
- "cookie "LPX64" from client %s id %s\n",
- dlm_req->lock_handle1.cookie,
- req->rq_export->exp_client_uuid.uuid,
- req->rq_peerstr);
- req->rq_status = -ESTALE;
- RETURN(0);
- }
-
- lock_res_and_lock(lock);
- blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
- blocking_nid = lock->l_policy_data.l_flock.blocking_nid;
- unlock_res_and_lock(lock);
-
- rc = lustre_pack_reply(req, 1, &size, NULL);
- if (rc) {
- CERROR("lustre_pack_reply failed: rc = %d\n", rc);
- req->rq_status = rc;
- RETURN(0);
- }
-
- dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*dlm_rep));
- dlm_rep->lock_desc.l_policy_data.l_flock.blocking_pid =
- blocking_pid;
- dlm_rep->lock_desc.l_policy_data.l_flock.blocking_nid =
- blocking_nid;
- } else {
- rc = lustre_pack_reply(req, 0, NULL, NULL);
- }
-
- if (flags & LDLM_FL_DEADLOCK_CHK) {
- __u64 orig_blocking_pid = blocking_pid;
- __u64 orig_blocking_nid = blocking_nid;
- restart:
- list_for_each(pos, &ldlm_flock_waitq) {
- lsf = list_entry(pos,struct ldlm_sleep_flock,lsf_list);
-
- /* We want to return a deadlock condition for the
- * last lock on the waitq that created the deadlock
- * situation. Posix verification suites expect this
- * behavior. We'll stop if we haven't found a deadlock
- * up to the point where the current process is queued
- * to let the last lock on the queue that's in the
- * deadlock loop detect the deadlock. In this case
- * just update the blocking info.*/
- if ((lsf->lsf_pid == pid) && (lsf->lsf_nid == nid)) {
- lsf->lsf_blocking_pid = blocking_pid;
- lsf->lsf_blocking_nid = blocking_nid;
- break;
- }
-
- if ((lsf->lsf_pid != blocking_pid) ||
- (lsf->lsf_nid != blocking_nid))
- continue;
-
- blocking_pid = lsf->lsf_blocking_pid;
- blocking_nid = lsf->lsf_blocking_nid;
-
- if (blocking_pid == pid && blocking_nid == nid){
- req->rq_status = -EDEADLOCK;
- flags |= LDLM_FL_DEADLOCK_DEL;
- break;
- }
+ lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
- goto restart;
- }
+ /* take lock off the deadlock detection hash list. */
+ lock_res_and_lock(lock);
+ ldlm_flock_blocking_unlink(lock);
- /* If we got all the way thru the list then we're not on it. */
- if (pos == &ldlm_flock_waitq) {
- OBD_ALLOC(lsf, sizeof(*lsf));
- if (!lsf)
- RETURN(-ENOSPC);
-
- lsf->lsf_pid = pid;
- lsf->lsf_nid = nid;
- lsf->lsf_blocking_pid = orig_blocking_pid;
- lsf->lsf_blocking_nid = orig_blocking_nid;
- list_add_tail(&lsf->lsf_list, &ldlm_flock_waitq);
- }
- }
-
- if (flags & LDLM_FL_DEADLOCK_DEL) {
- list_for_each_entry(lsf, &ldlm_flock_waitq, lsf_list) {
- if ((lsf->lsf_pid == pid) && (lsf->lsf_nid == nid)) {
- list_del_init(&lsf->lsf_list);
- OBD_FREE(lsf, sizeof(*lsf));
- break;
- }
- }
- }
+ /* client side - set flag to prevent lock from being put on LRU list */
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
- RETURN(rc);
+ EXIT;
}
+/**
+ * Flock completion callback function.
+ *
+ * \param lock [in,out]: A lock to be handled
+ * \param flags [in]: flags
+ * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
+ *
+ * \retval 0 : success
+ * \retval <0 : failure
+ */
int
-ldlm_send_flock_deadlock_check(struct obd_device *obd, struct ldlm_lock *lock,
- unsigned int flags)
+ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
- struct obd_import *imp;
- struct ldlm_request *body;
- struct ldlm_reply *reply;
- struct ptlrpc_request *req;
- int rc, size = sizeof(*body);
+ struct file_lock *getlk = lock->l_ast_data;
+ struct obd_device *obd;
+ struct obd_import *imp = NULL;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ ldlm_error_t err;
+ int rc = 0;
ENTRY;
- CDEBUG(D_DLMTRACE, "obd: %p flags: 0x%x\n", obd, flags);
-
- imp = obd->u.cli.cl_import;
- req = ptlrpc_prep_req(imp, LUSTRE_DLM_VERSION, LDLM_FLK_DEADLOCK_CHK, 1,
- &size, NULL);
- if (!req)
- RETURN(-ENOMEM);
+ CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
+ flags, data, getlk);
+
+ /* Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too. */
+ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+ (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+ if (lock->l_req_mode == lock->l_granted_mode &&
+ lock->l_granted_mode != LCK_NL &&
+ NULL == data)
+ ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+ /* Need to wake up the waiter if we were evicted */
+ cfs_waitq_signal(&lock->l_waitq);
+ RETURN(0);
+ }
- body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
- body->lock_flags = flags;
- ldlm_lock2desc(lock, &body->lock_desc);
- memcpy(&body->lock_handle1, &lock->l_remote_handle,
- sizeof(body->lock_handle1));
+ LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
- if (flags & LDLM_FL_GET_BLOCKING) {
- size = sizeof(*reply);
- req->rq_replen = lustre_msg_size(1, &size);
- } else {
- req->rq_replen = lustre_msg_size(0, NULL);
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ if (NULL == data)
+ /* mds granted the lock in the reply */
+ goto granted;
+ /* CP AST RPC: lock get granted, wake it up */
+ cfs_waitq_signal(&lock->l_waitq);
+ RETURN(0);
}
- rc = ptlrpc_queue_wait(req);
- if (rc != ELDLM_OK)
- GOTO(out, rc);
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "sleeping");
+ fwd.fwd_lock = lock;
+ obd = class_exp2obd(lock->l_conn_export);
- if (flags & LDLM_FL_GET_BLOCKING) {
- reply = lustre_swab_repbuf(req, 0, sizeof (*reply),
- lustre_swab_ldlm_reply);
- if (reply == NULL) {
- CERROR ("Can't unpack ldlm_reply\n");
- GOTO (out, rc = -EPROTO);
- }
+ /* if this is a local lock, there is no import */
+ if (NULL != obd)
+ imp = obd->u.cli.cl_import;
- lock->l_policy_data.l_flock.blocking_pid =
- reply->lock_desc.l_policy_data.l_flock.blocking_pid;
- lock->l_policy_data.l_flock.blocking_nid =
- reply->lock_desc.l_policy_data.l_flock.blocking_nid;
-
- CDEBUG(D_DLMTRACE, "LDLM_FL_GET_BLOCKING: pid: "LPU64" "
- "nid: "LPU64" blk: pid: "LPU64" nid: "LPU64"\n",
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.nid,
- lock->l_policy_data.l_flock.blocking_pid,
- lock->l_policy_data.l_flock.blocking_nid);
+ if (NULL != imp) {
+ spin_lock(&imp->imp_lock);
+ fwd.fwd_generation = imp->imp_generation;
+ spin_unlock(&imp->imp_lock);
}
- rc = req->rq_status;
- out:
- ptlrpc_req_finished(req);
- RETURN(rc);
-}
+ lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
-int
-ldlm_flock_deadlock_check(struct obd_device *master_obd, struct obd_device *obd,
- struct ldlm_lock *lock)
-{
- unsigned int flags = 0;
- int rc;
- ENTRY;
+ /* Go to sleep until the lock is granted. */
+ rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
- if (obd == NULL) {
- /* Delete this process from the sleeplock list. */
- flags = LDLM_FL_DEADLOCK_DEL;
- rc = ldlm_send_flock_deadlock_check(master_obd, lock, flags);
+ if (rc) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+ rc);
RETURN(rc);
}
- flags = LDLM_FL_GET_BLOCKING;
- if (obd == master_obd)
- flags |= LDLM_FL_DEADLOCK_CHK;
+granted:
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ RETURN(0);
+ }
- rc = ldlm_send_flock_deadlock_check(obd, lock, flags);
- CDEBUG(D_DLMTRACE, "1st check: rc: %d flags: 0x%x\n", rc, flags);
- if (rc || (flags & LDLM_FL_DEADLOCK_CHK))
+ if (lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
+ RETURN(-EIO);
+ }
+
+ if (rc) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+ rc);
RETURN(rc);
+ }
- CDEBUG(D_DLMTRACE, "about to send 2nd check: master: %p.\n",
- master_obd);
+ LDLM_DEBUG(lock, "client-side enqueue granted");
- flags = LDLM_FL_DEADLOCK_CHK;
+ lock_res_and_lock(lock);
- rc = ldlm_send_flock_deadlock_check(master_obd, lock, flags);
+ /* take lock off the deadlock detection hash list. */
+ ldlm_flock_blocking_unlink(lock);
- CDEBUG(D_DLMTRACE, "2nd check: rc: %d flags: 0x%x\n", rc, flags);
+ /* ldlm_lock_enqueue() has already placed lock on the granted list. */
+ cfs_list_del_init(&lock->l_res_link);
- RETURN(rc);
+ if (flags & LDLM_FL_TEST_LOCK) {
+ /* fcntl(F_GETLK) request */
+ /* The old mode was saved in getlk->fl_type so that if the mode
+ * in the lock changes we can decref the appropriate refcount.*/
+ ldlm_flock_destroy(lock, flock_type(getlk),
+ LDLM_FL_WAIT_NOREPROC);
+ switch (lock->l_granted_mode) {
+ case LCK_PR:
+ flock_set_type(getlk, F_RDLCK);
+ break;
+ case LCK_PW:
+ flock_set_type(getlk, F_WRLCK);
+ break;
+ default:
+ flock_set_type(getlk, F_UNLCK);
+ }
+ flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
+ flock_set_start(getlk,
+ (loff_t)lock->l_policy_data.l_flock.start);
+ flock_set_end(getlk,
+ (loff_t)lock->l_policy_data.l_flock.end);
+ } else {
+ __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
+
+ /* We need to reprocess the lock to do merges or splits
+ * with existing locks owned by this process. */
+ ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
+ }
+ unlock_res_and_lock(lock);
+ RETURN(0);
}
+EXPORT_SYMBOL(ldlm_flock_completion_ast);
-struct ldlm_flock_wait_data {
- struct ldlm_lock *fwd_lock;
- int fwd_generation;
-};
-
-static void
-ldlm_flock_interrupted_wait(void *data)
+int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
{
- struct ldlm_lock *lock;
- struct lustre_handle lockh;
ENTRY;
- lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
+ LASSERT(lock);
+ LASSERT(flag == LDLM_CB_CANCELING);
- /* client side - set flag to prevent lock from being put on lru list */
- lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(lock);
+ /* take lock off the deadlock detection hash list. */
+ lock_res_and_lock(lock);
+ ldlm_flock_blocking_unlink(lock);
+ unlock_res_and_lock(lock);
+ RETURN(0);
+}
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
- ldlm_lock2handle(lock, &lockh);
- ldlm_cli_cancel(&lockh);
- EXIT;
+void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+ lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+ lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+ /* Compat code, old clients had no idea about owner field and
+ * relied solely on pid for ownership. Introduced in LU-104, 2.1,
+ * April 2011 */
+ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
}
-int
-ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+
+void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
{
- struct ldlm_flock_wait_data fwd;
- unsigned long irqflags;
- struct obd_device *obd;
- struct obd_device *master_obd = (struct obd_device *)lock->l_ast_data;
- struct obd_import *imp = NULL;
- ldlm_error_t err;
- int deadlock_checked = 0;
- int rc = 0;
- struct l_wait_info lwi;
- ENTRY;
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+ lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+ lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
+}
- LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
+void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+ wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+ wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+ wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV)))
- goto granted;
+/*
+ * Export handle<->flock hash operations.
+ */
+static unsigned
+ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_u64_hash(*(__u64 *)key, mask);
+}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
- "sleeping");
+static void *
+ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+{
+ struct ldlm_lock *lock;
- ldlm_lock_dump(D_DLMTRACE, lock, 0);
- fwd.fwd_lock = lock;
- obd = class_exp2obd(lock->l_conn_export);
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ return &lock->l_policy_data.l_flock.owner;
+}
- CDEBUG(D_DLMTRACE, "flags: 0x%x master: %p obd: %p\n",
- flags, master_obd, obd);
+static int
+ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+{
+ return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
+}
- /* if this is a local lock, then there is no import */
- if (obd != NULL)
- imp = obd->u.cli.cl_import;
+static void *
+ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+{
+ return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+}
- if (imp != NULL) {
- spin_lock_irqsave(&imp->imp_lock, irqflags);
- fwd.fwd_generation = imp->imp_generation;
- spin_unlock_irqrestore(&imp->imp_lock, irqflags);
- }
+static void
+ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_flock *flock;
- lwi = LWI_TIMEOUT_INTR(ldlm_deadlock_timeout, NULL,
- ldlm_flock_interrupted_wait, &fwd);
-
- restart:
- rc = l_wait_event(lock->l_waitq,
- ((lock->l_req_mode == lock->l_granted_mode) ||
- lock->l_destroyed), &lwi);
-
- if (rc == -ETIMEDOUT) {
- deadlock_checked = 1;
- rc = ldlm_flock_deadlock_check(master_obd, obd, lock);
- if (rc == -EDEADLK)
- ldlm_flock_interrupted_wait(&fwd);
- else {
- CDEBUG(D_DLMTRACE, "lock: %p going back to sleep,\n",
- lock);
- goto restart;
- }
- } else {
- if (deadlock_checked)
- ldlm_flock_deadlock_check(master_obd, NULL, lock);
- }
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ LDLM_LOCK_GET(lock);
- LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
- RETURN(rc);
-
- granted:
- LDLM_DEBUG(lock, "client-side enqueue granted");
- lock_res_and_lock(lock);
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->blocking_export != NULL);
+ class_export_get(flock->blocking_export);
+ flock->blocking_refs++;
+}
- /* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
+static void
+ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_flock *flock;
+
+ lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ LDLM_LOCK_RELEASE(lock);
+
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->blocking_export != NULL);
+ class_export_put(flock->blocking_export);
+ if (--flock->blocking_refs == 0) {
+ flock->blocking_owner = 0;
+ flock->blocking_export = NULL;
+ }
+}
- if (flags & LDLM_FL_TEST_LOCK) {
- /* client side - set flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
- } else {
- int noreproc = LDLM_FL_WAIT_NOREPROC;
+static cfs_hash_ops_t ldlm_export_flock_ops = {
+ .hs_hash = ldlm_export_flock_hash,
+ .hs_key = ldlm_export_flock_key,
+ .hs_keycmp = ldlm_export_flock_keycmp,
+ .hs_object = ldlm_export_flock_object,
+ .hs_get = ldlm_export_flock_get,
+ .hs_put = ldlm_export_flock_put,
+ .hs_put_locked = ldlm_export_flock_put,
+};
- /* We need to reprocess the lock to do merges or splits
- * with existing locks owned by this process. */
- ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
- if (flags == 0)
- wake_up(&lock->l_waitq);
- }
+int ldlm_init_flock_export(struct obd_export *exp)
+{
+ if( strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
+ RETURN(0);
+
+ exp->exp_flock_hash =
+ cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
+ HASH_EXP_LOCK_CUR_BITS,
+ HASH_EXP_LOCK_MAX_BITS,
+ HASH_EXP_LOCK_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
+ &ldlm_export_flock_ops,
+ CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
+ if (!exp->exp_flock_hash)
+ RETURN(-ENOMEM);
+
+ RETURN(0);
+}
+EXPORT_SYMBOL(ldlm_init_flock_export);
- unlock_res_and_lock(lock);
- RETURN(0);
+void ldlm_destroy_flock_export(struct obd_export *exp)
+{
+ ENTRY;
+ if (exp->exp_flock_hash) {
+ cfs_hash_putref(exp->exp_flock_hash);
+ exp->exp_flock_hash = NULL;
+ }
+ EXIT;
}
+EXPORT_SYMBOL(ldlm_destroy_flock_export);