*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2012, Intel Corporation.
+ * Copyright (c) 2010, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LDLM
-#ifdef __KERNEL__
+#include <linux/list.h>
#include <lustre_dlm.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_lib.h>
-#include <libcfs/list.h>
-#else
-#include <liblustre.h>
-#include <obd_class.h>
-#endif
#include "ldlm_internal.h"
if (req->l_export == NULL)
return;
- LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+ LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
req->l_policy_data.l_flock.blocking_owner =
lock->l_policy_data.l_flock.owner;
req->l_policy_data.l_flock.blocking_export =
lock->l_export;
- req->l_policy_data.l_flock.blocking_refs = 0;
+ atomic_set(&req->l_policy_data.l_flock.blocking_refs, 0);
cfs_hash_add(req->l_export->exp_flock_hash,
&req->l_policy_data.l_flock.owner,
check_res_locked(req->l_resource);
if (req->l_export->exp_flock_hash != NULL &&
- !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+ !hlist_unhashed(&req->l_exp_flock_hash))
cfs_hash_del(req->l_export->exp_flock_hash,
&req->l_policy_data.l_flock.owner,
&req->l_exp_flock_hash);
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
{
ENTRY;
- LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: "LPX64")",
+ LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: %#llx)",
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
- LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
+ LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
- cfs_list_del_init(&lock->l_res_link);
+ list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
struct obd_export *exp;
};
-static int ldlm_flock_lookup_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode, void *data)
+static int ldlm_flock_lookup_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+ struct hlist_node *hnode, void *data)
{
struct ldlm_flock_lookup_cb_data *cb_data = data;
struct obd_export *exp = cfs_hash_object(hs, hnode);
}
static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
- cfs_list_t *work_list)
+ struct list_head *work_list)
{
CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
* This function looks for any conflicts for \a lock in the granted or
* waiting queues. The lock is granted if no conflicts are found in
* either queue.
- *
- * It is also responsible for splitting a lock if a portion of the lock
- * is released.
- *
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- * - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- * - blocking ASTs have not been sent yet, so list of conflicting locks
- * would be collected and ASTs sent.
*/
int
-ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
- ldlm_error_t *err, cfs_list_t *work_list)
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
+ enum ldlm_process_intention intention,
+ enum ldlm_error *err, struct list_head *work_list)
{
- struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- cfs_list_t *tmp;
- cfs_list_t *ownlocks = NULL;
- struct ldlm_lock *lock = NULL;
- struct ldlm_lock *new = req;
- struct ldlm_lock *new2 = NULL;
- ldlm_mode_t mode = req->l_req_mode;
- int local = ns_is_client(ns);
- int added = (mode == LCK_NL);
- int overlaps = 0;
- int splitted = 0;
- const struct ldlm_callback_suite null_cbs = { NULL };
- ENTRY;
+ struct ldlm_resource *res = req->l_resource;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+ struct list_head *tmp;
+ struct list_head *ownlocks = NULL;
+ struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *new = req;
+ struct ldlm_lock *new2 = NULL;
+ enum ldlm_mode mode = req->l_req_mode;
+ int local = ns_is_client(ns);
+ int added = (mode == LCK_NL);
+ int overlaps = 0;
+ int splitted = 0;
+ const struct ldlm_callback_suite null_cbs = { NULL };
+ struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
+ NULL : work_list;
+ ENTRY;
- CDEBUG(D_DLMTRACE, "flags "LPX64" owner "LPU64" pid %u mode %u start "
- LPU64" end "LPU64"\n", *flags,
+ CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start "
+ "%llu end %llu\n", *flags,
new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
- cfs_list_for_each(tmp, &res->lr_granted) {
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
- cfs_list_for_each(tmp, &res->lr_granted) {
- lock = cfs_list_entry(tmp, struct ldlm_lock,
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ldlm_flocks_overlap(lock, req))
continue;
- if (!first_enq) {
+ if (intention != LDLM_PROCESS_ENQUEUE) {
reprocess_failed = 1;
if (ldlm_flock_deadlock(req, lock)) {
ldlm_flock_cancel_on_deadlock(req,
- work_list);
+ grant_work);
RETURN(LDLM_ITER_CONTINUE);
}
continue;
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
if (lock->l_export != NULL) {
new2->l_export = class_export_lock_get(lock->l_export, new2);
if (new2->l_export->exp_lock_hash &&
- cfs_hlist_unhashed(&new2->l_exp_hash))
+ hlist_unhashed(&new2->l_exp_hash))
cfs_hash_add(new2->l_export->exp_lock_hash,
&new2->l_remote_handle,
&new2->l_exp_hash);
/* Add req to the granted queue before calling ldlm_reprocess_all(). */
if (!added) {
- cfs_list_del_init(&req->l_res_link);
+ list_del_init(&req->l_res_link);
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
if (*flags != LDLM_FL_WAIT_NOREPROC) {
#ifdef HAVE_SERVER_SUPPORT
- if (first_enq) {
+ if (intention == LDLM_PROCESS_ENQUEUE) {
/* If this is an unlock, reprocess the waitq and
* send completions ASTs for locks that can now be
* granted. The only problem with doing this
* newly granted locks will be sent before the unlock
* completion is sent. It shouldn't be an issue. Also
* note that ldlm_process_flock_lock() will recurse,
- * but only once because first_enq will be false from
- * ldlm_reprocess_queue. */
- if ((mode == LCK_NL) && overlaps) {
- CFS_LIST_HEAD(rpc_list);
+ * but only once because 'intention' won't be
+ * LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue. */
+ if ((mode == LCK_NL) && overlaps) {
+ struct list_head rpc_list;
int rc;
+
+ INIT_LIST_HEAD(&rpc_list);
restart:
- ldlm_reprocess_queue(res, &res->lr_waiting,
- &rpc_list);
+ ldlm_reprocess_queue(res, &res->lr_waiting,
+ &rpc_list,
+ LDLM_PROCESS_RESCAN, NULL);
unlock_res_and_lock(req);
rc = ldlm_run_ast_work(ns, &rpc_list,
}
} else {
LASSERT(req->l_completion_ast);
- ldlm_add_ast_work_item(req, NULL, work_list);
+ ldlm_add_ast_work_item(req, NULL, grant_work);
}
#else /* !HAVE_SERVER_SUPPORT */
/* The only one possible case for client-side calls flock
int
ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
- struct file_lock *getlk = lock->l_ast_data;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct ldlm_flock_wait_data fwd;
- struct l_wait_info lwi;
- ldlm_error_t err;
- int rc = 0;
- ENTRY;
+ struct file_lock *getlk = lock->l_ast_data;
+ struct obd_device *obd;
+ struct obd_import *imp = NULL;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ enum ldlm_error err;
+ int rc = 0;
+ ENTRY;
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
unlock_res_and_lock(lock);
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
}
- CDEBUG(D_DLMTRACE, "flags: "LPX64" data: %p getlk: %p\n",
+ CDEBUG(D_DLMTRACE, "flags: %#llx data: %p getlk: %p\n",
flags, data, getlk);
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
if (flags & LDLM_FL_FAILED)
goto granted;
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
if (NULL == data)
/* mds granted the lock in the reply */
goto granted;
RETURN(-EIO);
}
- /* ldlm_lock_enqueue() has already placed lock on the granted list. */
+ /* ldlm_lock_enqueue() has already placed lock on the granted list. */
ldlm_resource_unlink_lock(lock);
/* Import invalidation. We need to actually release the lock
LASSERT(ldlm_is_test_lock(lock));
if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
- mode = flock_type(getlk);
+ mode = getlk->fl_type;
else
mode = lock->l_granted_mode;
LDLM_DEBUG(lock, "client-side enqueue granted");
if (flags & LDLM_FL_TEST_LOCK) {
- /* fcntl(F_GETLK) request */
- /* The old mode was saved in getlk->fl_type so that if the mode
- * in the lock changes we can decref the appropriate refcount.*/
+ /*
+ * fcntl(F_GETLK) request
+ * The old mode was saved in getlk->fl_type so that if the mode
+ * in the lock changes we can decref the appropriate refcount.
+ */
LASSERT(ldlm_is_test_lock(lock));
- ldlm_flock_destroy(lock, flock_type(getlk),
- LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
- flock_set_type(getlk, F_RDLCK);
+ getlk->fl_type = F_RDLCK;
break;
case LCK_PW:
- flock_set_type(getlk, F_WRLCK);
+ getlk->fl_type = F_WRLCK;
break;
default:
- flock_set_type(getlk, F_UNLCK);
+ getlk->fl_type = F_UNLCK;
}
- flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
- flock_set_start(getlk,
- (loff_t)lock->l_policy_data.l_flock.start);
- flock_set_end(getlk,
- (loff_t)lock->l_policy_data.l_flock.end);
+ getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
+ getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
+ getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
RETURN(0);
}
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
-{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- /* Compat code, old clients had no idea about owner field and
- * relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011 */
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
-}
-
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+ union ldlm_policy_data *lpolicy)
{
- memset(lpolicy, 0, sizeof(*lpolicy));
- lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
- lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
- lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
- lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
+ lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+ lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+ lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
}
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+ union ldlm_wire_policy_data *wpolicy)
{
- memset(wpolicy, 0, sizeof(*wpolicy));
- wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
- wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
- wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
- wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+ wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+ wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+ wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
}
/*
* Export handle<->flock hash operations.
*/
static unsigned
-ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
{
return cfs_hash_u64_hash(*(__u64 *)key, mask);
}
static void *
-ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+ldlm_export_flock_key(struct hlist_node *hnode)
{
struct ldlm_lock *lock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
return &lock->l_policy_data.l_flock.owner;
}
static int
-ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
{
return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
}
static void *
-ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+ldlm_export_flock_object(struct hlist_node *hnode)
{
- return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
}
static void
-ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
LDLM_LOCK_GET(lock);
flock = &lock->l_policy_data.l_flock;
LASSERT(flock->blocking_export != NULL);
class_export_get(flock->blocking_export);
- flock->blocking_refs++;
+ atomic_inc(&flock->blocking_refs);
}
static void
-ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
struct ldlm_lock *lock;
struct ldlm_flock *flock;
- lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
- LDLM_LOCK_RELEASE(lock);
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
flock = &lock->l_policy_data.l_flock;
LASSERT(flock->blocking_export != NULL);
class_export_put(flock->blocking_export);
- if (--flock->blocking_refs == 0) {
+ if (atomic_dec_and_test(&flock->blocking_refs)) {
flock->blocking_owner = 0;
flock->blocking_export = NULL;
}
+ LDLM_LOCK_RELEASE(lock);
}
-static cfs_hash_ops_t ldlm_export_flock_ops = {
+static struct cfs_hash_ops ldlm_export_flock_ops = {
.hs_hash = ldlm_export_flock_hash,
.hs_key = ldlm_export_flock_key,
.hs_keycmp = ldlm_export_flock_keycmp,
RETURN(0);
}
-EXPORT_SYMBOL(ldlm_init_flock_export);
void ldlm_destroy_flock_export(struct obd_export *exp)
{
}
EXIT;
}
-EXPORT_SYMBOL(ldlm_destroy_flock_export);