X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_io.c;h=bcb0ad34be6b49d3b2eb7fb0d428604e655b982b;hb=67af9ea47e43c53bf53cb1e9fed8a3222b5c6a8e;hp=cf1860564b0eb5896e76a6f51d62b8f92e8464a2;hpb=c5361360e51de22a59d4427327bddf9fd398f352;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index cf18605..bcb0ad3 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,17 +39,11 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #include #include #include #include -/* lu_time_global_{init,fini}() */ -#include - #include #include "cl_internal.h" @@ -107,15 +101,15 @@ static int cl_io_invariant(const struct cl_io *io) */ void cl_io_fini(const struct lu_env *env, struct cl_io *io) { - struct cl_io_slice *slice; - struct cl_thread_info *info; + struct cl_io_slice *slice; + struct cl_thread_info *info; LINVRNT(cl_io_type_is_valid(io->ci_type)); LINVRNT(cl_io_invariant(io)); ENTRY; while (!cfs_list_empty(&io->ci_layers)) { - slice = container_of(io->ci_layers.next, struct cl_io_slice, + slice = container_of(io->ci_layers.prev, struct cl_io_slice, cis_linkage); cfs_list_del_init(&slice->cis_linkage); if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) @@ -131,7 +125,26 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) info = cl_env_info(env); if (info->clt_current_io == io) info->clt_current_io = NULL; - EXIT; + + /* sanity check for layout change */ + switch(io->ci_type) { + case CIT_READ: + case CIT_WRITE: + break; + case CIT_FAULT: + case CIT_FSYNC: + LASSERT(!io->ci_need_restart); + break; + case CIT_SETATTR: + case CIT_MISC: + /* Check ignore layout change conf */ + LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, + !io->ci_need_restart)); + break; + default: + LBUG(); + } + EXIT; } EXPORT_SYMBOL(cl_io_fini); @@ -377,7 +390,8 @@ static int cl_lockset_lock_one(const struct lu_env *env, ENTRY; - lock = cl_lock_request(env, io, &link->cill_descr, "io", io); + lock = cl_lock_request(env, io, &link->cill_descr, "io", io); + if (!IS_ERR(lock)) { link->cill_lock = lock; cfs_list_move(&link->cill_linkage, &set->cls_curr); @@ -787,7 +801,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io, } } if (result == 0) - result = cl_io_submit_rw(env, io, CRT_READ, queue, CRP_NORMAL); + result = cl_io_submit_rw(env, io, CRT_READ, queue); /* * Unlock unsent pages in case of error. */ @@ -883,8 +897,7 @@ EXPORT_SYMBOL(cl_io_commit_write); * \see cl_io_operations::cio_submit() */ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, - enum cl_req_type crt, struct cl_2queue *queue, - enum cl_req_priority priority) + enum cl_req_type crt, struct cl_2queue *queue) { const struct cl_io_slice *scan; int result = 0; @@ -896,7 +909,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, if (scan->cis_iop->req_op[crt].cio_submit == NULL) continue; result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt, - queue, priority); + queue); if (result != 0) break; } @@ -914,21 +927,19 @@ EXPORT_SYMBOL(cl_io_submit_rw); */ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority prio, long timeout) + long timeout) { struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor; struct cl_page *pg; int rc; - LASSERT(prio == CRP_NORMAL || prio == CRP_CANCEL); - cl_page_list_for_each(pg, &queue->c2_qin) { LASSERT(pg->cp_sync_io == NULL); pg->cp_sync_io = anchor; } cl_sync_io_init(anchor, queue->c2_qin.pl_nr); - rc = cl_io_submit_rw(env, io, iot, queue, prio); + rc = cl_io_submit_rw(env, io, iot, queue); if (rc == 0) { /* * If some pages weren't sent for any reason (e.g., @@ -1030,7 +1041,9 @@ int cl_io_loop(const struct lu_env *env, struct cl_io *io) } cl_io_iter_fini(env, io); } while (result == 0 && io->ci_continue); - RETURN(result < 0 ? result : 0); + if (result == 0) + result = io->ci_result; + RETURN(result < 0 ? result : 0); } EXPORT_SYMBOL(cl_io_loop); @@ -1086,9 +1099,9 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) LASSERT(page->cp_owner != NULL); LINVRNT(plist->pl_owner == cfs_current()); - cfs_lockdep_off(); - cfs_mutex_lock(&page->cp_mutex); - cfs_lockdep_on(); + lockdep_off(); + mutex_lock(&page->cp_mutex); + lockdep_on(); LASSERT(cfs_list_empty(&page->cp_batch)); cfs_list_add_tail(&page->cp_batch, &plist->pl_pages); ++plist->pl_nr; @@ -1109,9 +1122,9 @@ void cl_page_list_del(const struct lu_env *env, ENTRY; cfs_list_del_init(&page->cp_batch); - cfs_lockdep_off(); - cfs_mutex_unlock(&page->cp_mutex); - cfs_lockdep_on(); + lockdep_off(); + mutex_unlock(&page->cp_mutex); + lockdep_on(); --plist->pl_nr; lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist); cl_page_put(env, page); @@ -1176,9 +1189,9 @@ void cl_page_list_disown(const struct lu_env *env, LASSERT(plist->pl_nr > 0); cfs_list_del_init(&page->cp_batch); - cfs_lockdep_off(); - cfs_mutex_unlock(&page->cp_mutex); - cfs_lockdep_on(); + lockdep_off(); + mutex_unlock(&page->cp_mutex); + lockdep_on(); --plist->pl_nr; /* * cl_page_disown0 rather than usual cl_page_disown() is used, @@ -1666,11 +1679,12 @@ EXPORT_SYMBOL(cl_req_attr_set); */ void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages) { - ENTRY; - cfs_waitq_init(&anchor->csi_waitq); - cfs_atomic_set(&anchor->csi_sync_nr, nrpages); - anchor->csi_sync_rc = 0; - EXIT; + ENTRY; + cfs_waitq_init(&anchor->csi_waitq); + cfs_atomic_set(&anchor->csi_sync_nr, nrpages); + cfs_atomic_set(&anchor->csi_barrier, nrpages > 0); + anchor->csi_sync_rc = 0; + EXIT; } EXPORT_SYMBOL(cl_sync_io_init); @@ -1708,8 +1722,16 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, } LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0); cl_page_list_assume(env, io, queue); - POISON(anchor, 0x5a, sizeof *anchor); - RETURN(rc); + + /* wait until cl_sync_io_note() has done wakeup */ + while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) { +#ifdef __KERNEL__ + cpu_relax(); +#endif + } + + POISON(anchor, 0x5a, sizeof *anchor); + RETURN(rc); } EXPORT_SYMBOL(cl_sync_io_wait); @@ -1727,8 +1749,11 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret) * IO. */ LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0); - if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) - cfs_waitq_broadcast(&anchor->csi_waitq); - EXIT; + if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) { + cfs_waitq_broadcast(&anchor->csi_waitq); + /* it's safe to nuke or reuse anchor now */ + cfs_atomic_set(&anchor->csi_barrier, 0); + } + EXIT; } EXPORT_SYMBOL(cl_sync_io_note);