Whamcloud - gitweb
git://git.whamcloud.com
/
fs
/
lustre-release.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
b=21476 fix race between mdt_finish and __mds_lov_synchronize
[fs/lustre-release.git]
/
lustre
/
ldlm
/
ldlm_flock.c
diff --git
a/lustre/ldlm/ldlm_flock.c
b/lustre/ldlm/ldlm_flock.c
index
46ff280
..
914ed4e
100644
(file)
--- a/
lustre/ldlm/ldlm_flock.c
+++ b/
lustre/ldlm/ldlm_flock.c
@@
-63,7
+63,7
@@
static CFS_LIST_HEAD(ldlm_flock_waitq);
/**
* Lock protecting access to ldlm_flock_waitq.
*/
/**
* Lock protecting access to ldlm_flock_waitq.
*/
-
spinlock_t ldlm_flock_waitq_lock =
SPIN_LOCK_UNLOCKED;
+
cfs_spinlock_t ldlm_flock_waitq_lock = CFS_
SPIN_LOCK_UNLOCKED;
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
@@
-71,10
+71,10
@@
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
/**
* list_for_remaining_safe - iterate over the remaining entries in a list
* and safeguard against removal of a list entry.
/**
* list_for_remaining_safe - iterate over the remaining entries in a list
* and safeguard against removal of a list entry.
- *
@pos:
the &struct list_head to use as a loop counter. pos MUST
+ *
\param pos
the &struct list_head to use as a loop counter. pos MUST
* have been initialized prior to using it in this macro.
* have been initialized prior to using it in this macro.
- *
@n:
another &struct list_head to use as temporary storage
- *
@head:
the head for your list.
+ *
\param n
another &struct list_head to use as temporary storage
+ *
\param head
the head for your list.
*/
#define list_for_remaining_safe(pos, n, head) \
for (n = pos->next; pos != (head); pos = n, n = pos->next)
*/
#define list_for_remaining_safe(pos, n, head) \
for (n = pos->next; pos != (head); pos = n, n = pos->next)
@@
-105,9
+105,9
@@
ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
mode, flags);
/* Safe to not lock here, since it should be empty anyway */
- LASSERT(list_empty(&lock->l_flock_waitq));
+ LASSERT(
cfs_
list_empty(&lock->l_flock_waitq));
- list_del_init(&lock->l_res_link);
+
cfs_
list_del_init(&lock->l_res_link);
if (flags == LDLM_FL_WAIT_NOREPROC &&
!(lock->l_flags & LDLM_FL_FAILED)) {
/* client side - set a flag to prevent sending a CANCEL */
if (flags == LDLM_FL_WAIT_NOREPROC &&
!(lock->l_flags & LDLM_FL_FAILED)) {
/* client side - set a flag to prevent sending a CANCEL */
@@
-131,9
+131,9
@@
ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
struct ldlm_lock *lock;
pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
struct ldlm_lock *lock;
- spin_lock(&ldlm_flock_waitq_lock);
+
cfs_
spin_lock(&ldlm_flock_waitq_lock);
restart:
restart:
- list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
+
cfs_
list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
(lock->l_export != blocking_export))
continue;
if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
(lock->l_export != blocking_export))
continue;
@@
-142,25
+142,25
@@
restart:
blocking_export = (struct obd_export *)(long)
lock->l_policy_data.l_flock.blocking_export;
if (blocking_pid == req_pid && blocking_export == req_export) {
blocking_export = (struct obd_export *)(long)
lock->l_policy_data.l_flock.blocking_export;
if (blocking_pid == req_pid && blocking_export == req_export) {
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
return 1;
}
goto restart;
}
return 1;
}
goto restart;
}
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
return 0;
}
int
ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
return 0;
}
int
ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
- ldlm_error_t *err,
struct list_head
*work_list)
+ ldlm_error_t *err,
cfs_list_t
*work_list)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = res->lr_namespace;
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = res->lr_namespace;
-
struct list_head
*tmp;
-
struct list_head
*ownlocks = NULL;
+
cfs_list_t
*tmp;
+
cfs_list_t
*ownlocks = NULL;
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
@@
-192,8
+192,9
@@
reprocess:
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
* in the resource lr_granted list. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
@@
-204,8
+205,9
@@
reprocess:
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
/* This loop determines if there are existing locks
* that conflict with the new lock request. */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+ cfs_list_for_each(tmp, &res->lr_granted) {
+ lock = cfs_list_entry(tmp, struct ldlm_lock,
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
@@
-253,10
+255,11
@@
reprocess:
req->l_policy_data.l_flock.blocking_export =
(long)(void *)lock->l_export;
req->l_policy_data.l_flock.blocking_export =
(long)(void *)lock->l_export;
- LASSERT(list_empty(&req->l_flock_waitq));
- spin_lock(&ldlm_flock_waitq_lock);
- list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+ LASSERT(cfs_list_empty(&req->l_flock_waitq));
+ cfs_spin_lock(&ldlm_flock_waitq_lock);
+ cfs_list_add_tail(&req->l_flock_waitq,
+ &ldlm_flock_waitq);
+ cfs_spin_unlock(&ldlm_flock_waitq_lock);
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
@@
-273,9
+276,9
@@
reprocess:
/* In case we had slept on this lock request take it off of the
* deadlock detection waitq. */
/* In case we had slept on this lock request take it off of the
* deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&req->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_lock(&ldlm_flock_waitq_lock);
+
cfs_
list_del_init(&req->l_flock_waitq);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
@@
-284,7
+287,7
@@
reprocess:
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
ownlocks = &res->lr_granted;
list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ lock =
cfs_
list_entry(ownlocks, struct ldlm_lock, l_res_link);
if (!ldlm_same_flock_owner(lock, new))
break;
if (!ldlm_same_flock_owner(lock, new))
break;
@@
-400,12
+403,12
@@
reprocess:
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export != NULL) {
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
if (lock->l_export != NULL) {
- new2->l_export = class_export_lock_get(lock->l_export);
+ new2->l_export = class_export_lock_get(lock->l_export
, new2
);
if (new2->l_export->exp_lock_hash &&
if (new2->l_export->exp_lock_hash &&
- hlist_unhashed(&new2->l_exp_hash))
-
lustre
_hash_add(new2->l_export->exp_lock_hash,
-
&new2->l_remote_handle,
-
&new2->l_exp_hash);
+
cfs_
hlist_unhashed(&new2->l_exp_hash))
+
cfs
_hash_add(new2->l_export->exp_lock_hash,
+ &new2->l_remote_handle,
+ &new2->l_exp_hash);
}
if (*flags == LDLM_FL_WAIT_NOREPROC)
ldlm_lock_addref_internal_nolock(new2,
}
if (*flags == LDLM_FL_WAIT_NOREPROC)
ldlm_lock_addref_internal_nolock(new2,
@@
-426,7
+429,7
@@
reprocess:
/* Add req to the granted queue before calling ldlm_reprocess_all(). */
if (!added) {
/* Add req to the granted queue before calling ldlm_reprocess_all(). */
if (!added) {
- list_del_init(&req->l_res_link);
+
cfs_
list_del_init(&req->l_res_link);
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
/* insert new lock before ownlocks in list. */
ldlm_resource_add_lock(res, ownlocks, req);
}
@@
-482,26
+485,18
@@
static void
ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
- struct lustre_handle lockh;
- int rc;
ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* take lock off the deadlock detection waitq. */
ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_lock(&ldlm_flock_waitq_lock);
+
cfs_
list_del_init(&lock->l_flock_waitq);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel: %d\n", rc);
-
EXIT;
}
EXIT;
}
@@
-534,17
+529,17
@@
ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
* server already dropped it anyway. Only for granted locks too. */
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
* server already dropped it anyway. Only for granted locks too. */
- lock_res_and_lock(lock);
if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
(LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
(LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- unlock_res_and_lock(lock);
if (lock->l_req_mode == lock->l_granted_mode &&
lock->l_granted_mode != LCK_NL &&
NULL == data)
ldlm_lock_decref_internal(lock, lock->l_req_mode);
if (lock->l_req_mode == lock->l_granted_mode &&
lock->l_granted_mode != LCK_NL &&
NULL == data)
ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+ /* Need to wake up the waiter if we were evicted */
+ cfs_waitq_signal(&lock->l_waitq);
RETURN(0);
}
RETURN(0);
}
- unlock_res_and_lock(lock);
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
@@
-568,9
+563,9
@@
ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
imp = obd->u.cli.cl_import;
if (NULL != imp) {
imp = obd->u.cli.cl_import;
if (NULL != imp) {
- spin_lock(&imp->imp_lock);
+
cfs_
spin_lock(&imp->imp_lock);
fwd.fwd_generation = imp->imp_generation;
fwd.fwd_generation = imp->imp_generation;
- spin_unlock(&imp->imp_lock);
+
cfs_
spin_unlock(&imp->imp_lock);
}
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
}
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
@@
-587,29
+582,33
@@
ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- lock_res_and_lock(lock);
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
+ if (lock->l_destroyed) {
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- unlock_res(lock->l_resource);
+ RETURN(0);
+ }
+
+ if (lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
RETURN(-EIO);
}
RETURN(-EIO);
}
+
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
- unlock_res_and_lock(lock);
RETURN(rc);
}
LDLM_DEBUG(lock, "client-side enqueue granted");
/* take lock off the deadlock detection waitq. */
RETURN(rc);
}
LDLM_DEBUG(lock, "client-side enqueue granted");
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_lock(&ldlm_flock_waitq_lock);
+
cfs_
list_del_init(&lock->l_flock_waitq);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
+
cfs_
list_del_init(&lock->l_res_link);
+ lock_res_and_lock(lock);
if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
@@
-656,8
+655,8
@@
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
ns = lock->l_resource->lr_namespace;
/* take lock off the deadlock detection waitq. */
ns = lock->l_resource->lr_namespace;
/* take lock off the deadlock detection waitq. */
- spin_lock(&ldlm_flock_waitq_lock);
- list_del_init(&lock->l_flock_waitq);
- spin_unlock(&ldlm_flock_waitq_lock);
+
cfs_
spin_lock(&ldlm_flock_waitq_lock);
+
cfs_
list_del_init(&lock->l_flock_waitq);
+
cfs_
spin_unlock(&ldlm_flock_waitq_lock);
RETURN(0);
}
RETURN(0);
}