-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011 Whamcloud, Inc.
- *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#include <obd_class.h>
#include <obd_support.h>
struct cl_object_header *head;
struct cl_object *obj;
struct cl_lock *lock;
- int ok;
obj = need->cld_obj;
head = cl_object_header(obj);
cl_lock_mutex_get(env, lock);
if (lock->cll_state == CLS_INTRANSIT)
cl_lock_state_wait(env, lock); /* Don't care return value. */
- if (lock->cll_state == CLS_CACHED) {
- int result;
- result = cl_use_try(env, lock, 1);
- if (result < 0)
- cl_lock_error(env, lock, result);
- }
- ok = lock->cll_state == CLS_HELD;
- if (ok) {
- cl_lock_hold_add(env, lock, scope, source);
- cl_lock_user_add(env, lock);
- cl_lock_put(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- if (!ok) {
+ cl_lock_hold_add(env, lock, scope, source);
+ cl_lock_user_add(env, lock);
+ if (lock->cll_state == CLS_CACHED)
+ cl_use_try(env, lock, 1);
+ if (lock->cll_state == CLS_HELD) {
+ cl_lock_mutex_put(env, lock);
+ cl_lock_lockdep_acquire(env, lock, 0);
+ cl_lock_put(env, lock);
+ } else {
+ cl_unuse_try(env, lock);
+ cl_lock_unhold(env, lock, scope, source);
+ cl_lock_mutex_put(env, lock);
cl_lock_put(env, lock);
lock = NULL;
}
}
}
-static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
lu_ref_del(&lock->cll_holders, scope, source);
cl_lock_hold_mod(env, lock, -1);
if (lock->cll_holds == 0) {
- if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
- lock->cll_descr.cld_mode == CLM_GROUP)
+ CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
+ if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
+ lock->cll_descr.cld_mode == CLM_GROUP ||
+ lock->cll_state != CLS_CACHED)
/*
* If lock is still phantom or grouplock when user is
* done with it---destroy the lock.
}
EXIT;
}
-
+EXPORT_SYMBOL(cl_lock_hold_release);
/**
* Waits until lock state is changed.
cl_lock_mutex_put(env, lock);
LASSERT(cl_lock_nr_mutexed(env) == 0);
- cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+
+ result = -EINTR;
+ if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
+ cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+ if (!cfs_signal_pending())
+ result = 0;
+ }
cl_lock_mutex_get(env, lock);
cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&lock->cll_wq, &waiter);
- result = cfs_signal_pending() ? -EINTR : 0;
/* Restore old blocked signals */
cfs_restore_sigs(blocked);
ENTRY;
cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
do {
- result = 0;
-
LINVRNT(cl_lock_is_mutexed(lock));
- if (lock->cll_error != 0)
+ result = lock->cll_error;
+ if (result != 0)
break;
+
switch (lock->cll_state) {
case CLS_NEW:
cl_lock_state_set(env, lock, CLS_QUEUING);
case CLS_QUEUING:
/* kick layers. */
result = cl_enqueue_kick(env, lock, io, flags);
- if (result == 0)
+ /* For AGL case, the cl_lock::cll_state may
+ * become CLS_HELD already. */
+ if (result == 0 && lock->cll_state == CLS_QUEUING)
cl_lock_state_set(env, lock, CLS_ENQUEUED);
break;
case CLS_INTRANSIT:
LBUG();
}
} while (result == CLO_REPEAT);
- if (result < 0)
- cl_lock_error(env, lock, result);
- RETURN(result ?: lock->cll_error);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_enqueue_try);
LASSERT(cl_lock_nr_mutexed(env) == 0);
cl_lock_mutex_get(env, conflict);
+ cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
cl_lock_cancel(env, conflict);
cl_lock_delete(env, conflict);
}
break;
} while (1);
- if (result != 0) {
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, result);
- }
- LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
+ if (result != 0)
+ cl_unuse_try(env, lock);
+ LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
+ lock->cll_state == CLS_ENQUEUED ||
lock->cll_state == CLS_HELD));
RETURN(result);
}
/**
* Tries to unlock a lock.
*
- * This function is called repeatedly by cl_unuse() until either lock is
- * unlocked, or error occurs.
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \pre lock->cll_state == CLS_HELD
+ * This function is called to release underlying resource:
+ * 1. for top lock, the resource is sublocks it held;
+ * 2. for sublock, the resource is the reference to dlmlock.
*
- * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
+ * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
*
* \see cl_unuse() cl_lock_operations::clo_unuse()
* \see cl_lock_state::CLS_CACHED
ENTRY;
cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
- LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
if (lock->cll_users > 1) {
cl_lock_user_del(env, lock);
RETURN(0);
}
+ /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
+ * underlying resources. */
+ if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
+ cl_lock_user_del(env, lock);
+ RETURN(0);
+ }
+
/*
* New lock users (->cll_users) are not protecting unlocking
* from proceeding. From this point, lock eventually reaches
result = 0;
} else {
CERROR("result = %d, this is unlikely!\n", result);
+ state = CLS_NEW;
cl_lock_extransit(env, lock, state);
}
-
- result = result ?: lock->cll_error;
- if (result < 0)
- cl_lock_error(env, lock, result);
- RETURN(result);
+ RETURN(result ?: lock->cll_error);
}
EXPORT_SYMBOL(cl_unuse_try);
do {
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT);
+ LASSERTF(lock->cll_state == CLS_QUEUING ||
+ lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD ||
+ lock->cll_state == CLS_INTRANSIT,
+ "lock state: %d\n", lock->cll_state);
LASSERT(lock->cll_users > 0);
LASSERT(lock->cll_holds > 0);
- result = 0;
- if (lock->cll_error != 0)
+ result = lock->cll_error;
+ if (result != 0)
break;
if (cl_lock_is_intransit(lock)) {
cl_lock_state_set(env, lock, CLS_HELD);
}
} while (result == CLO_REPEAT);
- RETURN(result ?: lock->cll_error);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_wait_try);
break;
} while (1);
if (result < 0) {
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, result);
+ cl_unuse_try(env, lock);
cl_lock_lockdep_release(env, lock);
}
cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
- cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
if (lock->cll_error == 0 && error != 0) {
+ cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
lock->cll_error = error;
cl_lock_signal(env, lock);
cl_lock_cancel(env, lock);
EXPORT_SYMBOL(cl_lock_cancel);
/**
- * Finds an existing lock covering given page and optionally different from a
+ * Finds an existing lock covering given index and optionally different from a
* given \a except lock.
*/
-struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct cl_lock *except,
- int pending, int canceld)
+struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
+ struct cl_object *obj, pgoff_t index,
+ struct cl_lock *except,
+ int pending, int canceld)
{
struct cl_object_header *head;
struct cl_lock *scan;
need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
* not PHANTOM */
- need->cld_start = need->cld_end = page->cp_index;
+ need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
cfs_spin_lock(&head->coh_lock_guard);
cfs_spin_unlock(&head->coh_lock_guard);
RETURN(lock);
}
-EXPORT_SYMBOL(cl_lock_at_page);
+EXPORT_SYMBOL(cl_lock_at_pgoff);
/**
- * Returns a list of pages protected (only) by a given lock.
- *
- * Scans an extent of page radix tree, corresponding to the \a lock and queues
- * all pages that are not protected by locks other than \a lock into \a queue.
+ * Calculate the page offset at the layer of @lock.
+ * At the time of this writing, @page is top page and @lock is sub lock.
*/
-void cl_lock_page_list_fixup(const struct lu_env *env,
- struct cl_io *io, struct cl_lock *lock,
- struct cl_page_list *queue)
+static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
{
- struct cl_page *page;
- struct cl_page *temp;
- struct cl_page_list *plist = &cl_env_info(env)->clt_list;
-
- LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
+ struct lu_device_type *dtype;
+ const struct cl_page_slice *slice;
- /* No need to fix for WRITE lock because it is exclusive. */
- if (lock->cll_descr.cld_mode >= CLM_WRITE)
- RETURN_EXIT;
+ dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
+ slice = cl_page_at(page, dtype);
+ LASSERT(slice != NULL);
+ return slice->cpl_page->cp_index;
+}
- /* For those pages who are still covered by other PR locks, we should
- * not discard them otherwise a [0, EOF) PR lock will discard all
- * pages.
- */
- cl_page_list_init(plist);
- cl_page_list_for_each_safe(page, temp, queue) {
- pgoff_t idx = page->cp_index;
- struct cl_lock *found;
- struct cl_lock_descr *descr;
-
- /* The algorithm counts on the index-ascending page index. */
- LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
- page->cp_index < temp->cp_index));
-
- found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
- page, lock, 1, 0);
- if (found == NULL)
- continue;
-
- descr = &found->cll_descr;
- cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
- cp_batch) {
- idx = page->cp_index;
- if (descr->cld_start > idx || descr->cld_end < idx)
- break;
- cl_page_list_move(plist, queue, page);
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_lock *lock = cbdata;
+ pgoff_t index = pgoff_at_lock(page, lock);
+
+ if (index >= info->clt_fn_index) {
+ struct cl_lock *tmp;
+
+ /* refresh non-overlapped index */
+ tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
+ 1, 0);
+ if (tmp != NULL) {
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, clt_fn_index). This
+ * is safe because if tmp lock is canceled, it will
+ * discard these pages. */
+ info->clt_fn_index = tmp->cll_descr.cld_end + 1;
+ if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
+ info->clt_fn_index = CL_PAGE_EOF;
+ cl_lock_put(env, tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
}
- cl_lock_put(env, found);
}
- /* The pages in plist are covered by other locks, don't handle them
- * this time.
- */
- if (io != NULL)
- cl_page_list_disown(env, io, plist);
- cl_page_list_fini(env, plist);
- EXIT;
+ info->clt_next_index = index + 1;
+ return CLP_GANG_OKAY;
+}
+
+static int discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_lock *lock = cbdata;
+
+ LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageWriteback(cl_page_vmpage(env, page))));
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(env, page))));
+
+ info->clt_next_index = pgoff_at_lock(page, lock) + 1;
+ if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+
+ return CLP_GANG_OKAY;
}
-EXPORT_SYMBOL(cl_lock_page_list_fixup);
/**
- * Invalidate pages protected by the given lock, sending them out to the
- * server first, if necessary.
- *
- * This function does the following:
- *
- * - collects a list of pages to be invalidated,
- *
- * - unmaps them from the user virtual memory,
- *
- * - sends dirty pages to the server,
- *
- * - waits for transfer completion,
- *
- * - discards pages, and throws them out of memory.
- *
- * If \a discard is set, pages are discarded without sending them to the
- * server.
+ * Discard pages protected by the given lock. This function traverses radix
+ * tree to find all covering pages and discard them. If a page is being covered
+ * by other locks, it should remain in cache.
*
* If error happens on any step, the process continues anyway (the reasoning
* behind this being that lock cancellation cannot be delayed indefinitely).
*/
-int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
- int discard)
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
{
struct cl_thread_info *info = cl_env_info(env);
struct cl_io *io = &info->clt_io;
- struct cl_2queue *queue = &info->clt_queue;
struct cl_lock_descr *descr = &lock->cll_descr;
- struct lu_device_type *dtype;
- long page_count;
- pgoff_t next_index;
+ cl_page_gang_cb_t cb;
int res;
int result;
ENTRY;
io->ci_obj = cl_object_top(descr->cld_obj);
+ io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (result != 0)
GOTO(out, result);
- dtype = descr->cld_obj->co_lu.lo_dev->ld_type;
- next_index = descr->cld_start;
+ cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->clt_fn_index = info->clt_next_index = descr->cld_start;
do {
- const struct cl_page_slice *slice;
-
- cl_2queue_init(queue);
res = cl_page_gang_lookup(env, descr->cld_obj, io,
- next_index, descr->cld_end,
- &queue->c2_qin);
- page_count = queue->c2_qin.pl_nr;
- if (page_count == 0)
- break;
-
- /* cl_page_gang_lookup() uses subobj and sublock to look for
- * covered pages, but @queue->c2_qin contains the list of top
- * pages. We have to turn the page back to subpage so as to
- * get `correct' next index. -jay */
- slice = cl_page_at(cl_page_list_last(&queue->c2_qin), dtype);
- next_index = slice->cpl_page->cp_index + 1;
-
- result = cl_page_list_unmap(env, io, &queue->c2_qin);
- if (!discard) {
- long timeout = 600; /* 10 minutes. */
- /* for debug purpose, if this request can't be
- * finished in 10 minutes, we hope it can notify us.
- */
- result = cl_io_submit_sync(env, io, CRT_WRITE, queue,
- CRP_CANCEL, timeout);
- if (result)
- CWARN("Writing %lu pages error: %d\n",
- page_count, result);
- }
- cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
- cl_2queue_discard(env, io, queue);
- cl_2queue_disown(env, io, queue);
- cl_2queue_fini(env, queue);
-
- if (next_index > descr->cld_end)
+ info->clt_next_index, descr->cld_end,
+ cb, (void *)lock);
+ if (info->clt_next_index > descr->cld_end)
break;
if (res == CLP_GANG_RESCHED)
cl_io_fini(env, io);
RETURN(result);
}
-EXPORT_SYMBOL(cl_lock_page_out);
+EXPORT_SYMBOL(cl_lock_discard_pages);
/**
* Eliminate all locks for a given object.
cl_lock_get_trust(lock);
cfs_spin_unlock(&head->coh_lock_guard);
lu_ref_add(&lock->cll_reference, "prune", cfs_current());
+
+again:
cl_lock_mutex_get(env, lock);
if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_holds == 0);
- LASSERT(lock->cll_users == 0);
+ LASSERT(lock->cll_users <= 1);
+ if (unlikely(lock->cll_users == 1)) {
+ struct l_wait_info lwi = { 0 };
+
+ cl_lock_mutex_put(env, lock);
+ l_wait_event(lock->cll_wq,
+ lock->cll_users == 0,
+ &lwi);
+ goto again;
+ }
+
if (cancel)
cl_lock_cancel(env, lock);
cl_lock_delete(env, lock);
ENTRY;
do {
lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (!IS_ERR(lock)) {
- rc = cl_enqueue_locked(env, lock, io, enqflags);
- if (rc == 0) {
- if (cl_lock_fits_into(env, lock, need, io)) {
+ if (IS_ERR(lock))
+ break;
+
+ rc = cl_enqueue_locked(env, lock, io, enqflags);
+ if (rc == 0) {
+ if (cl_lock_fits_into(env, lock, need, io)) {
+ if (!(enqflags & CEF_AGL)) {
cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env,
- lock, enqflags);
+ cl_lock_lockdep_acquire(env, lock,
+ enqflags);
break;
}
- cl_unuse_locked(env, lock);
+ rc = 1;
}
- cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
+ cl_unuse_locked(env, lock);
+ }
+ cl_lock_trace(D_DLMTRACE, env,
+ rc <= 0 ? "enqueue failed" : "agl succeed", lock);
+ cl_lock_hold_release(env, lock, scope, source);
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, scope, source);
+ cl_lock_put(env, lock);
+ if (rc > 0) {
+ LASSERT(enqflags & CEF_AGL);
+ lock = NULL;
+ } else if (rc != 0) {
lock = ERR_PTR(rc);
- } else
- rc = PTR_ERR(lock);
+ }
} while (rc == 0);
RETURN(lock);
}
}
EXPORT_SYMBOL(cl_lock_user_add);
-int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
+void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
cl_lock_used_mod(env, lock, -1);
- RETURN(lock->cll_users == 0);
+ if (lock->cll_users == 0)
+ cfs_waitq_broadcast(&lock->cll_wq);
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_user_del);