* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
#include <cl_object.h>
#include "cl_internal.h"
ENTRY;
while (!cfs_list_empty(&io->ci_layers)) {
- slice = container_of(io->ci_layers.next, struct cl_io_slice,
+ slice = container_of(io->ci_layers.prev, struct cl_io_slice,
cis_linkage);
cfs_list_del_init(&slice->cis_linkage);
if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
switch(io->ci_type) {
case CIT_READ:
case CIT_WRITE:
+ break;
case CIT_FAULT:
case CIT_FSYNC:
LASSERT(!io->ci_need_restart);
break;
+ case CIT_SETATTR:
case CIT_MISC:
/* Check ignore layout change conf */
- LASSERT(ergo(io->ci_ignore_layout, !io->ci_need_restart));
- case CIT_SETATTR:
+ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
+ !io->ci_need_restart));
break;
default:
LBUG();
ENTRY;
- if (io->ci_lockreq == CILR_PEEK) {
- lock = cl_lock_peek(env, io, &link->cill_descr, "io", io);
- if (lock == NULL)
- lock = ERR_PTR(-ENODATA);
- } else
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
+ lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
link->cill_lock = lock;
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == cfs_current());
- cfs_lockdep_off();
- cfs_mutex_lock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_lock(&page->cp_mutex);
+ lockdep_on();
LASSERT(cfs_list_empty(&page->cp_batch));
cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
ENTRY;
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LASSERT(plist->pl_nr > 0);
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
*/
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
- ENTRY;
- cfs_waitq_init(&anchor->csi_waitq);
- cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
- anchor->csi_sync_rc = 0;
- EXIT;
+ ENTRY;
+ cfs_waitq_init(&anchor->csi_waitq);
+ cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
+ cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+ anchor->csi_sync_rc = 0;
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init);
}
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
- POISON(anchor, 0x5a, sizeof *anchor);
- RETURN(rc);
+
+ /* wait until cl_sync_io_note() has done wakeup */
+ while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+#ifdef __KERNEL__
+ cpu_relax();
+#endif
+ }
+
+ POISON(anchor, 0x5a, sizeof *anchor);
+ RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
* IO.
*/
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
- if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
- cfs_waitq_broadcast(&anchor->csi_waitq);
- EXIT;
+ if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ cfs_waitq_broadcast(&anchor->csi_waitq);
+ /* it's safe to nuke or reuse anchor now */
+ cfs_atomic_set(&anchor->csi_barrier, 0);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_note);