X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosc%2Fosc_io.c;h=5a0d0b95148aafdfc102ac10934894fb86d39e35;hp=a9bad93fa0e344b4ad4761d59e93981715395614;hb=546993d587c5fc380e9745eae98f863e02e68575;hpb=4fcbd1af9ec3b1e5f6424d925f43f0cb2910c3ec diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index a9bad93..5a0d0b9 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,51 +37,67 @@ #define DEBUG_SUBSYSTEM S_OSC -#include "osc_cl_internal.h" +#include +#include +#include + +#include "osc_internal.h" -/** \addtogroup osc - * @{ +/** \addtogroup osc + * @{ */ /***************************************************************************** * - * Type conversions. + * io operations. * */ -static struct osc_req *cl2osc_req(const struct cl_req_slice *slice) +static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) { - LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type); - return container_of0(slice, struct osc_req, or_cl); } -static struct osc_io *cl2osc_io(const struct lu_env *env, - const struct cl_io_slice *slice) +void osc_read_ahead_release(const struct lu_env *env, void *cbdata) { - struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl); - LINVRNT(oio == osc_env_io(env)); - return oio; + struct ldlm_lock *dlmlock = cbdata; + struct lustre_handle lockh; + + ldlm_lock2handle(dlmlock, &lockh); + ldlm_lock_decref(&lockh, LCK_PR); + LDLM_LOCK_PUT(dlmlock); } +EXPORT_SYMBOL(osc_read_ahead_release); -static struct osc_page *osc_cl_page_osc(struct cl_page *page) +static int osc_io_read_ahead(const struct lu_env *env, + const struct cl_io_slice *ios, + pgoff_t start, struct cl_read_ahead *ra) { - const struct cl_page_slice *slice; - - slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); - - return cl2osc_page(slice); -} + struct osc_object *osc = cl2osc(ios->cis_obj); + struct ldlm_lock *dlmlock; + int result = -ENODATA; + ENTRY; + dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0); + if (dlmlock != NULL) { + LASSERT(dlmlock->l_ast_data == osc); + if (dlmlock->l_req_mode != LCK_PR) { + struct lustre_handle lockh; + ldlm_lock2handle(dlmlock, &lockh); + ldlm_lock_addref(&lockh, LCK_PR); + ldlm_lock_decref(&lockh, dlmlock->l_req_mode); + } -/***************************************************************************** - * - * io operations. - * - */ + ra->cra_rpc_size = osc_cli(osc)->cl_max_pages_per_rpc; + ra->cra_end = cl_index(osc2cl(osc), + dlmlock->l_policy_data.l_extent.end); + ra->cra_release = osc_read_ahead_release; + ra->cra_cbdata = dlmlock; + if (ra->cra_end != CL_PAGE_EOF) + ra->cra_contention = true; + result = 0; + } -static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) -{ + RETURN(result); } /** @@ -95,36 +107,41 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) * or, if page is already submitted, changes osc flags through * osc_set_async_flags(). */ -static int osc_io_submit(const struct lu_env *env, - const struct cl_io_slice *ios, - enum cl_req_type crt, struct cl_2queue *queue) +int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios, + enum cl_req_type crt, struct cl_2queue *queue) { - struct cl_page *page; - struct cl_page *tmp; - struct client_obd *cli = NULL; - struct osc_object *osc = NULL; /* to keep gcc happy */ - struct osc_page *opg; - struct cl_io *io; - CFS_LIST_HEAD (list); + struct cl_page *page; + struct cl_page *tmp; + struct client_obd *cli = NULL; + struct osc_object *osc = NULL; /* to keep gcc happy */ + struct osc_page *opg; + struct cl_io *io; + LIST_HEAD(list); struct cl_page_list *qin = &queue->c2_qin; struct cl_page_list *qout = &queue->c2_qout; - int queued = 0; + unsigned int queued = 0; int result = 0; - int cmd; int brw_flags; - int max_pages; + unsigned int max_pages; + unsigned int ppc_bits; /* pages per chunk bits */ + unsigned int ppc; + bool sync_queue = false; LASSERT(qin->pl_nr > 0); - CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt); + CDEBUG(D_CACHE|D_READA, "%d %d\n", qin->pl_nr, crt); osc = cl2osc(ios->cis_obj); cli = osc_cli(osc); max_pages = cli->cl_max_pages_per_rpc; + ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; + ppc = 1 << ppc_bits; - cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0; + brw_flags |= crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; + if (crt == CRT_READ && ios->cis_io->ci_ndelay) + brw_flags |= OBD_BRW_NDELAY; /* * NOTE: here @page is a top-level page. This is done to avoid @@ -137,12 +154,12 @@ static int osc_io_submit(const struct lu_env *env, io = page->cp_owner; LASSERT(io != NULL); - opg = osc_cl_page_osc(page); - oap = &opg->ops_oap; + opg = osc_cl_page_osc(page, osc); + oap = &opg->ops_oap; LASSERT(osc == oap->oap_obj); - if (!cfs_list_empty(&oap->oap_pending_item) || - !cfs_list_empty(&oap->oap_rpc_item)) { + if (!list_empty(&oap->oap_pending_item) || + !list_empty(&oap->oap_rpc_item)) { CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", oap, opg); result = -EBUSY; @@ -163,78 +180,109 @@ static int osc_io_submit(const struct lu_env *env, continue; } - cl_page_list_move(qout, qin, page); spin_lock(&oap->oap_lock); oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY; oap->oap_async_flags |= ASYNC_COUNT_STABLE; spin_unlock(&oap->oap_lock); osc_page_submit(env, opg, crt, brw_flags); - cfs_list_add_tail(&oap->oap_pending_item, &list); - if (++queued == max_pages) { - queued = 0; - result = osc_queue_sync_pages(env, osc, &list, cmd, + list_add_tail(&oap->oap_pending_item, &list); + + if (page->cp_sync_io != NULL) + cl_page_list_move(qout, qin, page); + else /* async IO */ + cl_page_list_del(env, qin, page); + + queued++; + if (queued == max_pages) { + sync_queue = true; + } else if (crt == CRT_WRITE) { + unsigned int chunks; + unsigned int next_chunks; + + chunks = (queued + ppc - 1) >> ppc_bits; + /* chunk number if add another page */ + next_chunks = (queued + ppc) >> ppc_bits; + + /* next page will excceed write chunk limit */ + if (chunks == osc_max_write_chunks(cli) && + next_chunks > chunks) + sync_queue = true; + } + + if (sync_queue) { + result = osc_queue_sync_pages(env, io, osc, &list, brw_flags); if (result < 0) break; + queued = 0; + sync_queue = false; } } if (queued > 0) - result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags); + result = osc_queue_sync_pages(env, io, osc, &list, brw_flags); + + /* Update c/mtime for sync write. LU-7310 */ + if (crt == CRT_WRITE && qout->pl_nr > 0 && result == 0) { + struct cl_object *obj = ios->cis_obj; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + + cl_object_attr_lock(obj); + attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds(); + cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME); + cl_object_attr_unlock(obj); + } CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result); return qout->pl_nr > 0 ? 0 : result; } +EXPORT_SYMBOL(osc_io_submit); /** - * This is called when a page is accessed within file in a way that creates - * new page, if one were missing (i.e., if there were a hole at that place in - * the file, or accessed page is beyond the current file size). + * This is called to update the attributes when modifying a specific page, + * both when making new pages and when doing updates to existing cached pages. * * Expand stripe KMS if necessary. */ -static void osc_page_touch_at(const struct lu_env *env, - struct cl_object *obj, pgoff_t idx, unsigned to) +void osc_page_touch_at(const struct lu_env *env, struct cl_object *obj, + pgoff_t idx, size_t to) { - struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - int valid; - __u64 kms; + struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + int valid; + __u64 kms; - /* offset within stripe */ - kms = cl_offset(obj, idx) + to; + ENTRY; - cl_object_attr_lock(obj); - /* - * XXX old code used - * - * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm); - * - * here - */ - CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n", - kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, - loi->loi_lvb.lvb_size); + /* offset within stripe */ + kms = cl_offset(obj, idx) + to; - attr->cat_mtime = attr->cat_ctime = LTIME_S(CFS_CURRENT_TIME); + cl_object_attr_lock(obj); + CDEBUG(D_INODE, "stripe KMS %sincreasing %llu->%llu %llu\n", + kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms, + loi->loi_lvb.lvb_size); + + attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds(); valid = CAT_MTIME | CAT_CTIME; - if (kms > loi->loi_kms) { - attr->cat_kms = kms; - valid |= CAT_KMS; - } - if (kms > loi->loi_lvb.lvb_size) { - attr->cat_size = kms; - valid |= CAT_SIZE; - } - cl_object_attr_set(env, obj, attr, valid); - cl_object_attr_unlock(obj); + if (kms > loi->loi_kms) { + attr->cat_kms = kms; + valid |= CAT_KMS; + } + if (kms > loi->loi_lvb.lvb_size) { + attr->cat_size = kms; + valid |= CAT_SIZE; + } + cl_object_attr_update(env, obj, attr, valid); + cl_object_attr_unlock(obj); + + EXIT; } -static int osc_io_commit_async(const struct lu_env *env, - const struct cl_io_slice *ios, - struct cl_page_list *qin, int from, int to, - cl_commit_cbt cb) +int osc_io_commit_async(const struct lu_env *env, + const struct cl_io_slice *ios, + struct cl_page_list *qin, int from, int to, + cl_commit_cbt cb) { struct cl_io *io = ios->cis_io; struct osc_io *oio = cl2osc_io(env, ios); @@ -242,6 +290,7 @@ static int osc_io_commit_async(const struct lu_env *env, struct cl_page *page; struct cl_page *last_page; struct osc_page *opg; + struct pagevec *pvec = &osc_env_info(env)->oti_pagevec; int result = 0; ENTRY; @@ -261,18 +310,19 @@ static int osc_io_commit_async(const struct lu_env *env, } } - /* - * NOTE: here @page is a top-level page. This is done to avoid - * creation of sub-page-list. - */ + ll_pagevec_init(pvec, 0); + while (qin->pl_nr > 0) { struct osc_async_page *oap; page = cl_page_list_first(qin); - opg = osc_cl_page_osc(page); + opg = osc_cl_page_osc(page, osc); oap = &opg->ops_oap; - if (!cfs_list_empty(&oap->oap_rpc_item)) { + LASSERTF(osc == oap->oap_obj, + "obj mismatch: %p / %p\n", osc, oap->oap_obj); + + if (!list_empty(&oap->oap_rpc_item)) { CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n", oap, opg); result = -EBUSY; @@ -280,23 +330,31 @@ static int osc_io_commit_async(const struct lu_env *env, } /* The page may be already in dirty cache. */ - if (cfs_list_empty(&oap->oap_pending_item)) { - result = osc_page_cache_add(env, &opg->ops_cl, io); + if (list_empty(&oap->oap_pending_item)) { + result = osc_page_cache_add(env, opg, io, cb); if (result != 0) break; } - osc_page_touch_at(env, osc2cl(osc), - opg->ops_cl.cpl_page->cp_index, + osc_page_touch_at(env, osc2cl(osc), osc_index(opg), page == last_page ? to : PAGE_SIZE); cl_page_list_del(env, qin, page); - (*cb)(env, io, page); - /* Can't access page any more. Page can be in transfer and - * complete at any time. */ + /* if there are no more slots, do the callback & reinit */ + if (pagevec_add(pvec, page->cp_vmpage) == 0) { + (*cb)(env, io, pvec); + pagevec_reinit(pvec); + } } + /* Clean up any partially full pagevecs */ + if (pagevec_count(pvec) != 0) + (*cb)(env, io, pvec); + + /* Can't access these pages any more. Page can be in transfer and + * complete at any time. */ + /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. * for mkwrite(), it's known there is no further pages. */ @@ -308,79 +366,121 @@ static int osc_io_commit_async(const struct lu_env *env, CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result); RETURN(result); } +EXPORT_SYMBOL(osc_io_commit_async); + +static bool osc_import_not_healthy(struct obd_import *imp) +{ + return imp->imp_invalid || imp->imp_deactive || + !(imp->imp_state == LUSTRE_IMP_FULL || + imp->imp_state == LUSTRE_IMP_IDLE); +} + +int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios) +{ + struct osc_object *osc = cl2osc(ios->cis_obj); + struct obd_import *imp = osc_cli(osc)->cl_import; + struct osc_io *oio = osc_env_io(env); + int rc = -EIO; + ENTRY; + + spin_lock(&imp->imp_lock); + /** + * check whether this OSC device is available for non-delay read, + * fast switching mirror if we haven't tried all mirrors. + */ + if (ios->cis_io->ci_type == CIT_READ && ios->cis_io->ci_ndelay && + !ios->cis_io->ci_tried_all_mirrors && osc_import_not_healthy(imp)) { + rc = -EWOULDBLOCK; + } else if (likely(!imp->imp_invalid)) { + atomic_inc(&osc->oo_nr_ios); + oio->oi_is_active = 1; + rc = 0; + } + spin_unlock(&imp->imp_lock); + + if (cfs_capable(CFS_CAP_SYS_RESOURCE)) + oio->oi_cap_sys_resource = 1; + + RETURN(rc); +} +EXPORT_SYMBOL(osc_io_iter_init); -static int osc_io_rw_iter_init(const struct lu_env *env, - const struct cl_io_slice *ios) +int osc_io_rw_iter_init(const struct lu_env *env, + const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); - struct client_obd *cli = osc_cli(osc); - unsigned long c; - unsigned int npages; - unsigned int max_pages; + unsigned long npages; ENTRY; if (cl_io_is_append(io)) - RETURN(0); + RETURN(osc_io_iter_init(env, ios)); - npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; - if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK) + npages = io->u.ci_rw.crw_count >> PAGE_SHIFT; + if (io->u.ci_rw.crw_pos & ~PAGE_MASK) ++npages; - max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight; - if (npages > max_pages) - npages = max_pages; + oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages); - c = cfs_atomic_read(cli->cl_lru_left); - if (c < npages && osc_lru_reclaim(cli) > 0) - c = cfs_atomic_read(cli->cl_lru_left); - while (c >= npages) { - if (c == cfs_atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) { - oio->oi_lru_reserved = npages; - break; - } - c = cfs_atomic_read(cli->cl_lru_left); - } + RETURN(osc_io_iter_init(env, ios)); +} +EXPORT_SYMBOL(osc_io_rw_iter_init); - RETURN(0); +void osc_io_iter_fini(const struct lu_env *env, + const struct cl_io_slice *ios) +{ + struct osc_io *oio = osc_env_io(env); + + if (oio->oi_is_active) { + struct osc_object *osc = cl2osc(ios->cis_obj); + + oio->oi_is_active = 0; + LASSERT(atomic_read(&osc->oo_nr_ios) > 0); + if (atomic_dec_and_test(&osc->oo_nr_ios)) + wake_up_all(&osc->oo_io_waitq); + } } +EXPORT_SYMBOL(osc_io_iter_fini); -static void osc_io_rw_iter_fini(const struct lu_env *env, - const struct cl_io_slice *ios) +void osc_io_rw_iter_fini(const struct lu_env *env, + const struct cl_io_slice *ios) { struct osc_io *oio = osc_env_io(env); struct osc_object *osc = cl2osc(ios->cis_obj); - struct client_obd *cli = osc_cli(osc); if (oio->oi_lru_reserved > 0) { - cfs_atomic_add(oio->oi_lru_reserved, cli->cl_lru_left); + osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved); oio->oi_lru_reserved = 0; } + oio->oi_write_osclock = NULL; + + osc_io_iter_fini(env, ios); } +EXPORT_SYMBOL(osc_io_rw_iter_fini); -static int osc_io_fault_start(const struct lu_env *env, - const struct cl_io_slice *ios) +int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios) { - struct cl_io *io; - struct cl_fault_io *fio; - - ENTRY; + struct cl_io *io; + struct cl_fault_io *fio; + ENTRY; - io = ios->cis_io; - fio = &io->u.ci_fault; - CDEBUG(D_INFO, "%lu %d %d\n", - fio->ft_index, fio->ft_writable, fio->ft_nob); - /* - * If mapping is writeable, adjust kms to cover this page, - * but do not extend kms beyond actual file size. - * See bug 10919. - */ - if (fio->ft_writable) - osc_page_touch_at(env, ios->cis_obj, - fio->ft_index, fio->ft_nob); - RETURN(0); + io = ios->cis_io; + fio = &io->u.ci_fault; + CDEBUG(D_INFO, "%lu %d %zu\n", + fio->ft_index, fio->ft_writable, fio->ft_nob); + /* + * If mapping is writeable, adjust kms to cover this page, + * but do not extend kms beyond actual file size. + * See bug 10919. + */ + if (fio->ft_writable) + osc_page_touch_at(env, ios->cis_obj, + fio->ft_index, fio->ft_nob); + RETURN(0); } +EXPORT_SYMBOL(osc_io_fault_start); + static int osc_async_upcall(void *a, int rc) { @@ -391,7 +491,6 @@ static int osc_async_upcall(void *a, int rc) return 0; } -#if defined(__KERNEL__) /** * Checks that there are no pages being written in the extent being truncated. */ @@ -404,19 +503,13 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && - !cfs_list_empty(&oap->oap_pending_item)) - CL_PAGE_DEBUG(D_ERROR, env, page, "exists " LPU64 "/%s.\n", + !list_empty(&oap->oap_pending_item)) + CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", start, current->comm); -#ifdef __linux__ - { - struct page *vmpage = cl_page_vmpage(env, page); - if (PageLocked(vmpage)) - CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n", - ops, page->cp_index, - (oap->oap_cmd & OBD_BRW_RWMASK)); - } -#endif + if (PageLocked(page->cp_vmpage)) + CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n", + ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK); return CLP_GANG_OKAY; } @@ -439,13 +532,6 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, start + partial, CL_PAGE_EOF, trunc_check_cb, (void *)&size); } -#else /* __KERNEL__ */ -static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, - struct osc_io *oio, __u64 size) -{ - return; -} -#endif static int osc_io_setattr_start(const struct lu_env *env, const struct cl_io_slice *slice) @@ -458,13 +544,15 @@ static int osc_io_setattr_start(const struct lu_env *env, struct obdo *oa = &oio->oi_oa; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; __u64 size = io->u.ci_setattr.sa_attr.lvb_size; - unsigned int ia_valid = io->u.ci_setattr.sa_valid; - int result = 0; - struct obd_info oinfo = { { { 0 } } }; + unsigned int ia_avalid = io->u.ci_setattr.sa_avalid; + enum op_xvalid ia_xvalid = io->u.ci_setattr.sa_xvalid; + int result = 0; + ENTRY; /* truncate cache dirty pages first */ if (cl_io_is_trunc(io)) - result = osc_cache_truncate_start(env, oio, cl2osc(obj), size); + result = osc_cache_truncate_start(env, cl2osc(obj), size, + &oio->oi_trunc); if (result == 0 && oio->oi_lockless == 0) { cl_object_attr_lock(obj); @@ -473,148 +561,284 @@ static int osc_io_setattr_start(const struct lu_env *env, struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr; unsigned int cl_valid = 0; - if (ia_valid & ATTR_SIZE) { - attr->cat_size = attr->cat_kms = size; + if (ia_avalid & ATTR_SIZE) { + attr->cat_size = size; + attr->cat_kms = size; cl_valid = (CAT_SIZE | CAT_KMS); } - if (ia_valid & ATTR_MTIME_SET) { + if (ia_avalid & ATTR_MTIME_SET) { attr->cat_mtime = lvb->lvb_mtime; cl_valid |= CAT_MTIME; } - if (ia_valid & ATTR_ATIME_SET) { + if (ia_avalid & ATTR_ATIME_SET) { attr->cat_atime = lvb->lvb_atime; cl_valid |= CAT_ATIME; } - if (ia_valid & ATTR_CTIME_SET) { + if (ia_xvalid & OP_XVALID_CTIME_SET) { attr->cat_ctime = lvb->lvb_ctime; - cl_valid |= CAT_CTIME; - } - result = cl_object_attr_set(env, obj, attr, cl_valid); - } - cl_object_attr_unlock(obj); - } - memset(oa, 0, sizeof(*oa)); - if (result == 0) { + cl_valid |= CAT_CTIME; + } + result = cl_object_attr_update(env, obj, attr, + cl_valid); + } + cl_object_attr_unlock(obj); + } + memset(oa, 0, sizeof(*oa)); + if (result == 0) { oa->o_oi = loi->loi_oi; - oa->o_mtime = attr->cat_mtime; - oa->o_atime = attr->cat_atime; - oa->o_ctime = attr->cat_ctime; - oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME | - OBD_MD_FLCTIME | OBD_MD_FLMTIME; - if (ia_valid & ATTR_SIZE) { - oa->o_size = size; - oa->o_blocks = OBD_OBJECT_EOF; - oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; - - if (oio->oi_lockless) { - oa->o_flags = OBD_FL_SRVLOCK; - oa->o_valid |= OBD_MD_FLFLAGS; - } - } else { - LASSERT(oio->oi_lockless == 0); - } + obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid); + oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index; + oa->o_layout = io->u.ci_setattr.sa_layout; + oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | + OBD_MD_FLOSTLAYOUT; + if (ia_avalid & ATTR_CTIME) { + oa->o_valid |= OBD_MD_FLCTIME; + oa->o_ctime = attr->cat_ctime; + } + if (ia_avalid & ATTR_ATIME) { + oa->o_valid |= OBD_MD_FLATIME; + oa->o_atime = attr->cat_atime; + } + if (ia_avalid & ATTR_MTIME) { + oa->o_valid |= OBD_MD_FLMTIME; + oa->o_mtime = attr->cat_mtime; + } + if (ia_avalid & ATTR_SIZE) { + oa->o_size = size; + oa->o_blocks = OBD_OBJECT_EOF; + oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS; + + if (oio->oi_lockless) { + oa->o_flags = OBD_FL_SRVLOCK; + oa->o_valid |= OBD_MD_FLFLAGS; + } + + if (io->ci_layout_version > 0) { + /* verify layout version */ + oa->o_valid |= OBD_MD_LAYOUT_VERSION; + oa->o_layout_version = io->ci_layout_version; + } + } else { + LASSERT(oio->oi_lockless == 0); + } + + if (ia_xvalid & OP_XVALID_FLAGS) { + oa->o_flags = io->u.ci_setattr.sa_attr_flags; + oa->o_valid |= OBD_MD_FLFLAGS; + } - oinfo.oi_oa = oa; - oinfo.oi_capa = io->u.ci_setattr.sa_capa; init_completion(&cbargs->opc_sync); - if (ia_valid & ATTR_SIZE) - result = osc_punch_base(osc_export(cl2osc(obj)), - &oinfo, osc_async_upcall, - cbargs, PTLRPCD_SET); - else - result = osc_setattr_async_base(osc_export(cl2osc(obj)), - &oinfo, NULL, - osc_async_upcall, - cbargs, PTLRPCD_SET); + if (ia_avalid & ATTR_SIZE) + result = osc_punch_send(osc_export(cl2osc(obj)), + oa, osc_async_upcall, cbargs); + else + result = osc_setattr_async(osc_export(cl2osc(obj)), + oa, osc_async_upcall, + cbargs, PTLRPCD_SET); + cbargs->opc_rpc_sent = result == 0; - } - return result; + } + + RETURN(result); } -static void osc_io_setattr_end(const struct lu_env *env, - const struct cl_io_slice *slice) +void osc_io_setattr_end(const struct lu_env *env, + const struct cl_io_slice *slice) { struct cl_io *io = slice->cis_io; struct osc_io *oio = cl2osc_io(env, slice); struct cl_object *obj = slice->cis_obj; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; - int result = 0; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + struct obdo *oa = &oio->oi_oa; + unsigned int cl_valid = 0; + int result = 0; if (cbargs->opc_rpc_sent) { wait_for_completion(&cbargs->opc_sync); result = io->ci_result = cbargs->opc_rc; } - if (result == 0) { - if (oio->oi_lockless) { - /* lockless truncate */ - struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev); - - LASSERT(cl_io_is_trunc(io)); - /* XXX: Need a lock. */ - osd->od_stats.os_lockless_truncates++; - } - } + + if (result == 0) { + if (oio->oi_lockless) { + /* lockless truncate */ + struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev); + + LASSERT(cl_io_is_trunc(io)); + /* XXX: Need a lock. */ + osd->od_stats.os_lockless_truncates++; + } + } if (cl_io_is_trunc(io)) { __u64 size = io->u.ci_setattr.sa_attr.lvb_size; - osc_trunc_check(env, io, oio, size); - if (oio->oi_trunc != NULL) { - osc_cache_truncate_end(env, oio, cl2osc(obj)); - oio->oi_trunc = NULL; + cl_object_attr_lock(obj); + if (oa->o_valid & OBD_MD_FLBLOCKS) { + attr->cat_blocks = oa->o_blocks; + cl_valid |= CAT_BLOCKS; } + + cl_object_attr_update(env, obj, attr, cl_valid); + cl_object_attr_unlock(obj); + osc_trunc_check(env, io, oio, size); + osc_cache_truncate_end(env, oio->oi_trunc); + oio->oi_trunc = NULL; } } +EXPORT_SYMBOL(osc_io_setattr_end); + +struct osc_data_version_args { + struct osc_io *dva_oio; +}; + +static int +osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req, + void *args, int rc) +{ + struct osc_data_version_args *dva = args; + struct osc_io *oio = dva->dva_oio; + const struct ost_body *body; + + ENTRY; + if (rc < 0) + GOTO(out, rc); + + body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); + if (body == NULL) + GOTO(out, rc = -EPROTO); + + lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa, + &body->oa); + EXIT; +out: + oio->oi_cbarg.opc_rc = rc; + complete(&oio->oi_cbarg.opc_sync); + + return 0; +} -static int osc_io_read_start(const struct lu_env *env, - const struct cl_io_slice *slice) +static int osc_io_data_version_start(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; + struct osc_io *oio = cl2osc_io(env, slice); + struct obdo *oa = &oio->oi_oa; + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + struct osc_object *obj = cl2osc(slice->cis_obj); + struct lov_oinfo *loi = obj->oo_oinfo; + struct obd_export *exp = osc_export(obj); + struct ptlrpc_request *req; + struct ost_body *body; + struct osc_data_version_args *dva; + int rc; + + ENTRY; + memset(oa, 0, sizeof(*oa)); + oa->o_oi = loi->loi_oi; + oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP; + + if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) { + oa->o_valid |= OBD_MD_FLFLAGS; + oa->o_flags |= OBD_FL_SRVLOCK; + if (dv->dv_flags & LL_DV_WR_FLUSH) + oa->o_flags |= OBD_FL_FLUSH; + } + + init_completion(&cbargs->opc_sync); + + req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); + if (req == NULL) + RETURN(-ENOMEM); + + rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); + if (rc < 0) { + ptlrpc_request_free(req); + RETURN(rc); + } + + body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); + lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); + + ptlrpc_request_set_replen(req); + req->rq_interpret_reply = osc_data_version_interpret; + dva = ptlrpc_req_async_args(dva, req); + dva->dva_oio = oio; + + ptlrpcd_add_req(req); + + RETURN(0); +} + +static void osc_io_data_version_end(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version; + struct osc_io *oio = cl2osc_io(env, slice); + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + + ENTRY; + wait_for_completion(&cbargs->opc_sync); + + if (cbargs->opc_rc != 0) { + slice->cis_io->ci_result = cbargs->opc_rc; + } else { + slice->cis_io->ci_result = 0; + if (!(oio->oi_oa.o_valid & + (OBD_MD_LAYOUT_VERSION | OBD_MD_FLDATAVERSION))) + slice->cis_io->ci_result = -ENOTSUPP; + + if (oio->oi_oa.o_valid & OBD_MD_LAYOUT_VERSION) + dv->dv_layout_version = oio->oi_oa.o_layout_version; + if (oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION) + dv->dv_data_version = oio->oi_oa.o_data_version; + } + + EXIT; +} + +int osc_io_read_start(const struct lu_env *env, + const struct cl_io_slice *slice) { - struct osc_io *oio = cl2osc_io(env, slice); struct cl_object *obj = slice->cis_obj; struct cl_attr *attr = &osc_env_info(env)->oti_attr; int rc = 0; ENTRY; - if (oio->oi_lockless == 0 && !slice->cis_io->ci_noatime) { + if (!slice->cis_io->ci_noatime) { cl_object_attr_lock(obj); - attr->cat_atime = LTIME_S(CFS_CURRENT_TIME); - rc = cl_object_attr_set(env, obj, attr, CAT_ATIME); + attr->cat_atime = ktime_get_real_seconds(); + rc = cl_object_attr_update(env, obj, attr, CAT_ATIME); cl_object_attr_unlock(obj); } RETURN(rc); } +EXPORT_SYMBOL(osc_io_read_start); -static int osc_io_write_start(const struct lu_env *env, - const struct cl_io_slice *slice) +int osc_io_write_start(const struct lu_env *env, + const struct cl_io_slice *slice) { - struct osc_io *oio = cl2osc_io(env, slice); - struct cl_object *obj = slice->cis_obj; - struct cl_attr *attr = &osc_env_info(env)->oti_attr; - int result = 0; - ENTRY; - - if (oio->oi_lockless == 0) { - OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1); - cl_object_attr_lock(obj); - result = cl_object_attr_get(env, obj, attr); - if (result == 0) { - attr->cat_mtime = attr->cat_ctime = - LTIME_S(CFS_CURRENT_TIME); - result = cl_object_attr_set(env, obj, attr, - CAT_MTIME | CAT_CTIME); - } - cl_object_attr_unlock(obj); - } - RETURN(result); + struct cl_object *obj = slice->cis_obj; + struct cl_attr *attr = &osc_env_info(env)->oti_attr; + int rc = 0; + ENTRY; + + OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1); + cl_object_attr_lock(obj); + attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds(); + rc = cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME); + cl_object_attr_unlock(obj); + + RETURN(rc); } +EXPORT_SYMBOL(osc_io_write_start); -static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, - struct cl_fsync_io *fio) +int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, + struct cl_fsync_io *fio) { struct osc_io *oio = osc_env_io(env); struct obdo *oa = &oio->oi_oa; - struct obd_info *oinfo = &oio->oi_info; struct lov_oinfo *loi = obj->oo_oinfo; struct osc_async_cbargs *cbargs = &oio->oi_cbarg; int rc = 0; @@ -631,18 +855,15 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj, obdo_set_parent_fid(oa, fio->fi_fid); - memset(oinfo, 0, sizeof(*oinfo)); - oinfo->oi_oa = oa; - oinfo->oi_capa = fio->fi_capa; init_completion(&cbargs->opc_sync); - rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs, - PTLRPCD_SET); + rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET); RETURN(rc); } +EXPORT_SYMBOL(osc_fsync_ost); -static int osc_io_fsync_start(const struct lu_env *env, - const struct cl_io_slice *slice) +int osc_io_fsync_start(const struct lu_env *env, + const struct cl_io_slice *slice) { struct cl_io *io = slice->cis_io; struct cl_fsync_io *fio = &io->u.ci_fsync; @@ -681,8 +902,8 @@ static int osc_io_fsync_start(const struct lu_env *env, RETURN(result); } -static void osc_io_fsync_end(const struct lu_env *env, - const struct cl_io_slice *slice) +void osc_io_fsync_end(const struct lu_env *env, + const struct cl_io_slice *slice) { struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync; struct cl_object *obj = slice->cis_obj; @@ -702,9 +923,79 @@ static void osc_io_fsync_end(const struct lu_env *env, } slice->cis_io->ci_result = result; } +EXPORT_SYMBOL(osc_io_fsync_end); -static void osc_io_end(const struct lu_env *env, - const struct cl_io_slice *slice) +static int osc_io_ladvise_start(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + int result = 0; + struct cl_io *io = slice->cis_io; + struct osc_io *oio = cl2osc_io(env, slice); + struct cl_object *obj = slice->cis_obj; + struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo; + struct cl_ladvise_io *lio = &io->u.ci_ladvise; + struct obdo *oa = &oio->oi_oa; + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + struct lu_ladvise *ladvise; + struct ladvise_hdr *ladvise_hdr; + int buf_size; + int num_advise = 1; + ENTRY; + + /* TODO: add multiple ladvise support in CLIO */ + buf_size = offsetof(typeof(*ladvise_hdr), lah_advise[num_advise]); + if (osc_env_info(env)->oti_ladvise_buf.lb_len < buf_size) + lu_buf_realloc(&osc_env_info(env)->oti_ladvise_buf, buf_size); + + ladvise_hdr = osc_env_info(env)->oti_ladvise_buf.lb_buf; + if (ladvise_hdr == NULL) + RETURN(-ENOMEM); + + memset(ladvise_hdr, 0, buf_size); + ladvise_hdr->lah_magic = LADVISE_MAGIC; + ladvise_hdr->lah_count = num_advise; + ladvise_hdr->lah_flags = lio->li_flags; + + memset(oa, 0, sizeof(*oa)); + oa->o_oi = loi->loi_oi; + oa->o_valid = OBD_MD_FLID; + obdo_set_parent_fid(oa, lio->li_fid); + + ladvise = ladvise_hdr->lah_advise; + ladvise->lla_start = lio->li_start; + ladvise->lla_end = lio->li_end; + ladvise->lla_advice = lio->li_advice; + + if (lio->li_flags & LF_ASYNC) { + result = osc_ladvise_base(osc_export(cl2osc(obj)), oa, + ladvise_hdr, NULL, NULL, NULL); + } else { + init_completion(&cbargs->opc_sync); + result = osc_ladvise_base(osc_export(cl2osc(obj)), oa, + ladvise_hdr, osc_async_upcall, + cbargs, PTLRPCD_SET); + cbargs->opc_rpc_sent = result == 0; + } + RETURN(result); +} + +static void osc_io_ladvise_end(const struct lu_env *env, + const struct cl_io_slice *slice) +{ + struct cl_io *io = slice->cis_io; + struct osc_io *oio = cl2osc_io(env, slice); + struct osc_async_cbargs *cbargs = &oio->oi_cbarg; + int result = 0; + struct cl_ladvise_io *lio = &io->u.ci_ladvise; + + if ((!(lio->li_flags & LF_ASYNC)) && cbargs->opc_rpc_sent) { + wait_for_completion(&cbargs->opc_sync); + result = cbargs->opc_rc; + } + slice->cis_io->ci_result = result; +} + +void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice) { struct osc_io *oio = cl2osc_io(env, slice); @@ -713,10 +1004,13 @@ static void osc_io_end(const struct lu_env *env, oio->oi_active = NULL; } } +EXPORT_SYMBOL(osc_io_end); static const struct cl_io_operations osc_io_ops = { .op = { [CIT_READ] = { + .cio_iter_init = osc_io_rw_iter_init, + .cio_iter_fini = osc_io_rw_iter_fini, .cio_start = osc_io_read_start, .cio_fini = osc_io_fini }, @@ -728,10 +1022,18 @@ static const struct cl_io_operations osc_io_ops = { .cio_fini = osc_io_fini }, [CIT_SETATTR] = { + .cio_iter_init = osc_io_iter_init, + .cio_iter_fini = osc_io_iter_fini, .cio_start = osc_io_setattr_start, .cio_end = osc_io_setattr_end }, + [CIT_DATA_VERSION] = { + .cio_start = osc_io_data_version_start, + .cio_end = osc_io_data_version_end, + }, [CIT_FAULT] = { + .cio_iter_init = osc_io_iter_init, + .cio_iter_fini = osc_io_iter_fini, .cio_start = osc_io_fault_start, .cio_end = osc_io_end, .cio_fini = osc_io_fini @@ -741,10 +1043,16 @@ static const struct cl_io_operations osc_io_ops = { .cio_end = osc_io_fsync_end, .cio_fini = osc_io_fini }, + [CIT_LADVISE] = { + .cio_start = osc_io_ladvise_start, + .cio_end = osc_io_ladvise_end, + .cio_fini = osc_io_fini + }, [CIT_MISC] = { .cio_fini = osc_io_fini } }, + .cio_read_ahead = osc_io_read_ahead, .cio_submit = osc_io_submit, .cio_commit_async = osc_io_commit_async }; @@ -755,106 +1063,6 @@ static const struct cl_io_operations osc_io_ops = { * */ -static int osc_req_prep(const struct lu_env *env, - const struct cl_req_slice *slice) -{ - return 0; -} - -static void osc_req_completion(const struct lu_env *env, - const struct cl_req_slice *slice, int ioret) -{ - struct osc_req *or; - - or = cl2osc_req(slice); - OBD_SLAB_FREE_PTR(or, osc_req_kmem); -} - -/** - * Implementation of struct cl_req_operations::cro_attr_set() for osc - * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq - * fields. - */ -static void osc_req_attr_set(const struct lu_env *env, - const struct cl_req_slice *slice, - const struct cl_object *obj, - struct cl_req_attr *attr, obd_valid flags) -{ - struct lov_oinfo *oinfo; - struct cl_req *clerq; - struct cl_page *apage; /* _some_ page in @clerq */ - struct cl_lock *lock; /* _some_ lock protecting @apage */ - struct osc_lock *olck; - struct osc_page *opg; - struct obdo *oa; - struct ost_lvb *lvb; - - oinfo = cl2osc(obj)->oo_oinfo; - lvb = &oinfo->loi_lvb; - oa = attr->cra_oa; - - if ((flags & OBD_MD_FLMTIME) != 0) { - oa->o_mtime = lvb->lvb_mtime; - oa->o_valid |= OBD_MD_FLMTIME; - } - if ((flags & OBD_MD_FLATIME) != 0) { - oa->o_atime = lvb->lvb_atime; - oa->o_valid |= OBD_MD_FLATIME; - } - if ((flags & OBD_MD_FLCTIME) != 0) { - oa->o_ctime = lvb->lvb_ctime; - oa->o_valid |= OBD_MD_FLCTIME; - } - if (flags & OBD_MD_FLGROUP) { - ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); - oa->o_valid |= OBD_MD_FLGROUP; - } - if (flags & OBD_MD_FLID) { - ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); - oa->o_valid |= OBD_MD_FLID; - } - if (flags & OBD_MD_FLHANDLE) { - clerq = slice->crs_req; - LASSERT(!cfs_list_empty(&clerq->crq_pages)); - apage = container_of(clerq->crq_pages.next, - struct cl_page, cp_flight); - opg = osc_cl_page_osc(apage); - apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ - lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); - if (lock == NULL) { - struct cl_object_header *head; - struct cl_lock *scan; - - head = cl_object_header(apage->cp_obj); - cfs_list_for_each_entry(scan, &head->coh_locks, - cll_linkage) - CL_LOCK_DEBUG(D_ERROR, env, scan, - "no cover page!\n"); - CL_PAGE_DEBUG(D_ERROR, env, apage, - "dump uncover page!\n"); - libcfs_debug_dumpstack(NULL); - LBUG(); - } - - olck = osc_lock_at(lock); - LASSERT(olck != NULL); - LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); - /* check for lockless io. */ - if (olck->ols_lock != NULL) { - oa->o_handle = olck->ols_lock->l_remote_handle; - oa->o_valid |= OBD_MD_FLHANDLE; - } - cl_lock_put(env, lock); - } -} - -static const struct cl_req_operations osc_req_ops = { - .cro_prep = osc_req_prep, - .cro_attr_set = osc_req_attr_set, - .cro_completion = osc_req_completion -}; - - int osc_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) { @@ -865,19 +1073,4 @@ int osc_io_init(const struct lu_env *env, return 0; } -int osc_req_init(const struct lu_env *env, struct cl_device *dev, - struct cl_req *req) -{ - struct osc_req *or; - int result; - - OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, __GFP_IO); - if (or != NULL) { - cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); - result = 0; - } else - result = -ENOMEM; - return result; -} - /** @} osc */