-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
#include <cl_object.h>
#include "cl_internal.h"
*/
void cl_io_fini(const struct lu_env *env, struct cl_io *io)
{
- struct cl_io_slice *slice;
- struct cl_thread_info *info;
+ struct cl_io_slice *slice;
+ struct cl_thread_info *info;
LINVRNT(cl_io_type_is_valid(io->ci_type));
LINVRNT(cl_io_invariant(io));
ENTRY;
while (!cfs_list_empty(&io->ci_layers)) {
- slice = container_of(io->ci_layers.next, struct cl_io_slice,
+ slice = container_of(io->ci_layers.prev, struct cl_io_slice,
cis_linkage);
cfs_list_del_init(&slice->cis_linkage);
if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
info = cl_env_info(env);
if (info->clt_current_io == io)
info->clt_current_io = NULL;
- EXIT;
+
+ /* sanity check for layout change */
+ switch(io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ break;
+ case CIT_FAULT:
+ case CIT_FSYNC:
+ LASSERT(!io->ci_need_restart);
+ break;
+ case CIT_SETATTR:
+ case CIT_MISC:
+ /* Check ignore layout change conf */
+ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
+ !io->ci_need_restart));
+ break;
+ default:
+ LBUG();
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_io_fini);
ENTRY;
- if (io->ci_lockreq == CILR_PEEK) {
- lock = cl_lock_peek(env, io, &link->cill_descr, "io", io);
- if (lock == NULL)
- lock = ERR_PTR(-ENODATA);
- } else
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
+ lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
link->cill_lock = lock;
}
}
if (result == 0)
- result = cl_io_submit_rw(env, io, CRT_READ, queue, CRP_NORMAL);
+ result = cl_io_submit_rw(env, io, CRT_READ, queue);
/*
* Unlock unsent pages in case of error.
*/
* \see cl_io_operations::cio_submit()
*/
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type crt, struct cl_2queue *queue,
- enum cl_req_priority priority)
+ enum cl_req_type crt, struct cl_2queue *queue)
{
const struct cl_io_slice *scan;
int result = 0;
if (scan->cis_iop->req_op[crt].cio_submit == NULL)
continue;
result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
- queue, priority);
+ queue);
if (result != 0)
break;
}
*/
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
- enum cl_req_priority prio, long timeout)
+ long timeout)
{
struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
struct cl_page *pg;
int rc;
- LASSERT(prio == CRP_NORMAL || prio == CRP_CANCEL);
-
cl_page_list_for_each(pg, &queue->c2_qin) {
LASSERT(pg->cp_sync_io == NULL);
pg->cp_sync_io = anchor;
}
cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
- rc = cl_io_submit_rw(env, io, iot, queue, prio);
+ rc = cl_io_submit_rw(env, io, iot, queue);
if (rc == 0) {
/*
* If some pages weren't sent for any reason (e.g.,
}
cl_io_iter_fini(env, io);
} while (result == 0 && io->ci_continue);
- RETURN(result < 0 ? result : 0);
+ if (result == 0)
+ result = io->ci_result;
+ RETURN(result < 0 ? result : 0);
}
EXPORT_SYMBOL(cl_io_loop);
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == cfs_current());
- cfs_lockdep_off();
- cfs_mutex_lock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_lock(&page->cp_mutex);
+ lockdep_on();
LASSERT(cfs_list_empty(&page->cp_batch));
cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
ENTRY;
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LASSERT(plist->pl_nr > 0);
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
*/
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
- ENTRY;
- cfs_waitq_init(&anchor->csi_waitq);
- cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
- anchor->csi_sync_rc = 0;
- EXIT;
+ ENTRY;
+ cfs_waitq_init(&anchor->csi_waitq);
+ cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
+ cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+ anchor->csi_sync_rc = 0;
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init);
}
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
- POISON(anchor, 0x5a, sizeof *anchor);
- RETURN(rc);
+
+ /* wait until cl_sync_io_note() has done wakeup */
+ while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+#ifdef __KERNEL__
+ cpu_relax();
+#endif
+ }
+
+ POISON(anchor, 0x5a, sizeof *anchor);
+ RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
* IO.
*/
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
- if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
- cfs_waitq_broadcast(&anchor->csi_waitq);
- EXIT;
+ if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ cfs_waitq_broadcast(&anchor->csi_waitq);
+ /* it's safe to nuke or reuse anchor now */
+ cfs_atomic_set(&anchor->csi_barrier, 0);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_note);