* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
static int cl_lock_invariant_trusted(const struct lu_env *env,
const struct cl_lock *lock)
{
- return
- cl_is_lock(lock) &&
- ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
+ return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
lock->cll_holds >= lock->cll_users &&
lock->cll_holds >= 0 &&
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
- "(%p/%d/%i) at %s():%d\n",
+ CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
+ "(%p/%d/%d) at %s():%d\n",
prefix, lock, cfs_atomic_read(&lock->cll_ref),
lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
{
struct cl_object *obj = lock->cll_descr.cld_obj;
- LASSERT(cl_is_lock(lock));
LINVRNT(!cl_lock_is_mutexed(lock));
ENTRY;
{
struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
- LASSERT(cl_is_lock(lock));
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
cfs_atomic_read(&lock->cll_ref), lock, RETIP);
if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
- LASSERT(cl_is_lock(lock));
matched = cl_lock_ext_match(&lock->cll_descr, need) &&
lock->cll_state < CLS_FREEING &&
lock->cll_error == 0 &&
!(lock->cll_flags & CLF_CANCELLED) &&
cl_lock_fits_into(env, lock, need, io);
- CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
+ CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
matched);
if (matched) {
int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
{
cfs_waitlink_t waiter;
+ cfs_sigset_t blocked;
int result;
ENTRY;
cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
result = lock->cll_error;
if (result == 0) {
+ /* To avoid being interrupted by the 'non-fatal' signals
+ * (SIGCHLD, for instance), we'd block them temporarily.
+ * LU-305 */
+ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
cfs_waitlink_init(&waiter);
cfs_waitq_add(&lock->cll_wq, &waiter);
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&lock->cll_wq, &waiter);
result = cfs_signal_pending() ? -EINTR : 0;
+
+ /* Restore old blocked signals */
+ cfs_restore_sigs(blocked);
}
RETURN(result);
}
}
EXPORT_SYMBOL(cl_enqueue_try);
+/**
+ * Cancel the conflicting lock found during previous enqueue.
+ *
+ * \retval 0 conflicting lock has been canceled.
+ * \retval -ve error code.
+ */
+int cl_lock_enqueue_wait(const struct lu_env *env,
+ struct cl_lock *lock,
+ int keep_mutex)
+{
+ struct cl_lock *conflict;
+ int rc = 0;
+ ENTRY;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(lock->cll_state == CLS_QUEUING);
+ LASSERT(lock->cll_conflict != NULL);
+
+ conflict = lock->cll_conflict;
+ lock->cll_conflict = NULL;
+
+ cl_lock_mutex_put(env, lock);
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
+
+ cl_lock_mutex_get(env, conflict);
+ cl_lock_cancel(env, conflict);
+ cl_lock_delete(env, conflict);
+
+ while (conflict->cll_state != CLS_FREEING) {
+ rc = cl_lock_state_wait(env, conflict);
+ if (rc != 0)
+ break;
+ }
+ cl_lock_mutex_put(env, conflict);
+ lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
+ cl_lock_put(env, conflict);
+
+ if (keep_mutex)
+ cl_lock_mutex_get(env, lock);
+
+ LASSERT(rc <= 0);
+ RETURN(rc);
+}
+EXPORT_SYMBOL(cl_lock_enqueue_wait);
+
static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
struct cl_io *io, __u32 enqflags)
{
do {
result = cl_enqueue_try(env, lock, io, enqflags);
if (result == CLO_WAIT) {
- result = cl_lock_state_wait(env, lock);
+ if (lock->cll_conflict != NULL)
+ result = cl_lock_enqueue_wait(env, lock, 1);
+ else
+ result = cl_lock_state_wait(env, lock);
if (result == 0)
continue;
}
LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
"Wrong state %d \n", lock->cll_state);
LASSERT(lock->cll_holds > 0);
- cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
do {
result = cl_wait_try(env, lock);
cl_lock_error(env, lock, result);
cl_lock_lockdep_release(env, lock);
}
+ cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
cl_lock_mutex_put(env, lock);
LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
RETURN(result);
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
- /* Now, we have a list of cl_pages under the \a lock, we need
- * to check if some of pages are covered by other ldlm lock.
- * If this is the case, they aren't needed to be written out this time.
- *
- * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
- * the latter is to be canceled, this means other client is
- * reading/writing [200,300] since A won't canceled. Actually
- * we just need to write the pages covered by [200,300]. This is safe,
- * since [100,200] is also protected lock A.
- */
+ /* No need to fix for WRITE lock because it is exclusive. */
+ if (lock->cll_descr.cld_mode >= CLM_WRITE)
+ RETURN_EXIT;
+ /* For those pages who are still covered by other PR locks, we should
+ * not discard them otherwise a [0, EOF) PR lock will discard all
+ * pages.
+ */
cl_page_list_init(plist);
cl_page_list_for_each_safe(page, temp, queue) {
pgoff_t idx = page->cp_index;
page->cp_index < temp->cp_index));
found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
- page, lock, 0, 0);
+ page, lock, 1, 0);
if (found == NULL)
continue;
struct cl_io *io = &info->clt_io;
struct cl_2queue *queue = &info->clt_queue;
struct cl_lock_descr *descr = &lock->cll_descr;
+ struct lu_device_type *dtype;
long page_count;
- int nonblock = 1, resched;
+ pgoff_t next_index;
+ int res;
int result;
LINVRNT(cl_lock_invariant(env, lock));
if (result != 0)
GOTO(out, result);
+ dtype = descr->cld_obj->co_lu.lo_dev->ld_type;
+ next_index = descr->cld_start;
do {
+ const struct cl_page_slice *slice;
+
cl_2queue_init(queue);
- cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
- descr->cld_end, &queue->c2_qin, nonblock,
- &resched);
+ res = cl_page_gang_lookup(env, descr->cld_obj, io,
+ next_index, descr->cld_end,
+ &queue->c2_qin);
page_count = queue->c2_qin.pl_nr;
- if (page_count > 0) {
- result = cl_page_list_unmap(env, io, &queue->c2_qin);
- if (!discard) {
- long timeout = 600; /* 10 minutes. */
- /* for debug purpose, if this request can't be
- * finished in 10 minutes, we hope it can
- * notify us.
- */
- result = cl_io_submit_sync(env, io, CRT_WRITE,
- queue, CRP_CANCEL,
- timeout);
- if (result)
- CWARN("Writing %lu pages error: %d\n",
- page_count, result);
- }
- cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
+ if (page_count == 0)
+ break;
+
+ /* cl_page_gang_lookup() uses subobj and sublock to look for
+ * covered pages, but @queue->c2_qin contains the list of top
+ * pages. We have to turn the page back to subpage so as to
+ * get `correct' next index. -jay */
+ slice = cl_page_at(cl_page_list_last(&queue->c2_qin), dtype);
+ next_index = slice->cpl_page->cp_index + 1;
+
+ result = cl_page_list_unmap(env, io, &queue->c2_qin);
+ if (!discard) {
+ long timeout = 600; /* 10 minutes. */
+ /* for debug purpose, if this request can't be
+ * finished in 10 minutes, we hope it can notify us.
+ */
+ result = cl_io_submit_sync(env, io, CRT_WRITE, queue,
+ CRP_CANCEL, timeout);
+ if (result)
+ CWARN("Writing %lu pages error: %d\n",
+ page_count, result);
}
+ cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
+ cl_2queue_discard(env, io, queue);
+ cl_2queue_disown(env, io, queue);
cl_2queue_fini(env, queue);
- if (resched)
+ if (next_index > descr->cld_end)
+ break;
+
+ if (res == CLP_GANG_RESCHED)
cfs_cond_resched();
- } while (resched || nonblock--);
+ } while (res != CLP_GANG_OKAY);
out:
cl_io_fini(env, io);
RETURN(result);
}
EXPORT_SYMBOL(cl_locks_prune);
-/**
- * Returns true if \a addr is an address of an allocated cl_lock. Used in
- * assertions. This check is optimistically imprecise, i.e., it occasionally
- * returns true for the incorrect addresses, but if it returns false, then the
- * address is guaranteed to be incorrect. (Should be named cl_lockp().)
- *
- * \see cl_is_page()
- */
-int cl_is_lock(const void *addr)
-{
- return cfs_mem_is_in_cache(addr, cl_lock_kmem);
-}
-EXPORT_SYMBOL(cl_is_lock);
-
static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
const struct cl_io *io,
const struct cl_lock_descr *need,
const char *scope, const void *source)
{
struct cl_lock *lock;
- const struct lu_fid *fid;
int rc;
- int iter;
__u32 enqflags = need->cld_enq_flags;
ENTRY;
- fid = lu_object_fid(&io->ci_obj->co_lu);
- iter = 0;
do {
lock = cl_lock_hold_mutex(env, io, need, scope, source);
if (!IS_ERR(lock)) {
lock = ERR_PTR(rc);
} else
rc = PTR_ERR(lock);
- iter++;
} while (rc == 0);
RETURN(lock);
}