#define l_flock_waitq l_lru
-static struct list_head ldlm_flock_waitq = CFS_LIST_HEAD_INIT(ldlm_flock_waitq);
+static CFS_LIST_HEAD(ldlm_flock_waitq);
+spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
mode, flags);
+ /* Safe to not lock here, since it should be empty anyway */
LASSERT(list_empty(&lock->l_flock_waitq));
list_del_init(&lock->l_res_link);
pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
struct ldlm_lock *lock;
+ spin_lock(&ldlm_flock_waitq_lock);
restart:
list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
blocking_export = (struct obd_export *)(long)
lock->l_policy_data.l_flock.blocking_export;
- if (blocking_pid == req_pid && blocking_export == req_export)
+ if (blocking_pid == req_pid && blocking_export == req_export) {
+ spin_unlock(&ldlm_flock_waitq_lock);
return 1;
+ }
goto restart;
}
+ spin_unlock(&ldlm_flock_waitq_lock);
return 0;
}
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
ldlm_mode_t mode = req->l_req_mode;
- int local = ns->ns_client;
+ int local = ns_is_client(ns);
int added = (mode == LCK_NL);
int overlaps = 0;
ENTRY;
(long)(void *)lock->l_export;
LASSERT(list_empty(&req->l_flock_waitq));
+ spin_lock(&ldlm_flock_waitq_lock);
list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
/* In case we had slept on this lock request take it off of the
* deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&req->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* Scan the locks owned by this process that overlap this request.
* We may have to merge or split existing locks. */
/* XXX - if ldlm_lock_new() can sleep we should
* release the ns_lock, allocate the new lock,
* and restart processing this lock. */
- new2 = ldlm_lock_create(ns, NULL, res->lr_name, LDLM_FLOCK,
+ new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
lock->l_granted_mode, NULL, NULL, NULL,
NULL, 0);
if (!new2) {
if (*flags != LDLM_FL_WAIT_NOREPROC) {
if (first_enq) {
/* If this is an unlock, reprocess the waitq and
- * send completions ASTs for locks that can now be
+ * send completions ASTs for locks that can now be
* granted. The only problem with doing this
* reprocessing here is that the completion ASTs for
* newly granted locks will be sent before the unlock
* but only once because first_enq will be false from
* ldlm_reprocess_queue. */
if ((mode == LCK_NL) && overlaps) {
- struct list_head rpc_list
- = CFS_LIST_HEAD_INIT(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
int rc;
restart:
ldlm_reprocess_queue(res, &res->lr_waiting,
&rpc_list);
unlock_res(res);
- rc = ldlm_run_bl_ast_work(&rpc_list);
+ rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
lock_res(res);
if (rc == -ERESTART)
GOTO(restart, -ERESTART);
{
struct ldlm_lock *lock;
struct lustre_handle lockh;
+ int rc;
ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* take lock off the deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
ldlm_lock_decref_internal(lock, lock->l_req_mode);
ldlm_lock2handle(lock, &lockh);
- /* coverity[check_return] */
- ldlm_cli_cancel(&lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc != ELDLM_OK)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+
EXIT;
}
CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
flags, data, getlk);
+ /* Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too. */
+ lock_res_and_lock(lock);
+ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+ (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+ unlock_res_and_lock(lock);
+ if (lock->l_req_mode == lock->l_granted_mode &&
+ lock->l_granted_mode != LCK_NL)
+ ldlm_lock_decref_internal(lock, lock->l_req_mode);
+ RETURN(0);
+ }
+ unlock_res_and_lock(lock);
+
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
RETURN(rc);
-
+
granted:
LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res(lock->l_resource);
/* take lock off the deadlock detection waitq. */
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
+ spin_unlock(&ldlm_flock_waitq_lock);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
cfs_flock_set_type(getlk, F_UNLCK);
}
cfs_flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
- cfs_flock_set_start(getlk, (off_t)lock->l_policy_data.l_flock.start);
- cfs_flock_set_end(getlk, (off_t)lock->l_policy_data.l_flock.end);
+ cfs_flock_set_start(getlk, (loff_t)lock->l_policy_data.l_flock.start);
+ cfs_flock_set_end(getlk, (loff_t)lock->l_policy_data.l_flock.end);
} else {
int noreproc = LDLM_FL_WAIT_NOREPROC;
ns = lock->l_resource->lr_namespace;
/* take lock off the deadlock detection waitq. */
- lock_res_and_lock(lock);
+ spin_lock(&ldlm_flock_waitq_lock);
list_del_init(&lock->l_flock_waitq);
- unlock_res_and_lock(lock);
+ spin_unlock(&ldlm_flock_waitq_lock);
RETURN(0);
}