X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fllite%2Fvvp_io.c;h=50ce9095a68db24f96f8113311d520a7235760e3;hb=0a714ba01bbcdb43fa2d07e88652be2b8fb1c52f;hp=a7653db3c346aded826b8499b94fcf837236f4e9;hpb=742b9ac0e4714a70529429a5f6661878e58d6594;p=fs%2Flustre-release.git diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index a7653db..50ce909 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -183,36 +183,6 @@ static int vvp_mmap_locks(const struct lu_env *env, RETURN(0); } -static void vvp_io_update_iov(const struct lu_env *env, - struct ccc_io *vio, struct cl_io *io) -{ - int i; - size_t size = io->u.ci_rw.crw_count; - - vio->cui_iov_olen = 0; - if (cl_io_is_sendfile(io) || size == vio->cui_tot_count) - return; - - if (vio->cui_tot_nrsegs == 0) - vio->cui_tot_nrsegs = vio->cui_nrsegs; - - for (i = 0; i < vio->cui_tot_nrsegs; i++) { - struct iovec *iv = &vio->cui_iov[i]; - - if (iv->iov_len < size) - size -= iv->iov_len; - else { - if (iv->iov_len > size) { - vio->cui_iov_olen = iv->iov_len; - iv->iov_len = size; - } - break; - } - } - - vio->cui_nrsegs = i + 1; -} - static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, enum cl_lock_mode mode, loff_t start, loff_t end) { @@ -224,7 +194,7 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, LASSERT(vvp_env_io(env)->cui_oneshot == 0); ENTRY; - vvp_io_update_iov(env, cio, io); + ccc_io_update_iov(env, cio, io); if (io->u.ci_rw.crw_nonblock) ast_flags |= CEF_NONBLOCK; @@ -310,7 +280,7 @@ static int vvp_io_trunc_iter_init(const struct lu_env *env, } /** - * Implementation of cl_io_operations::vio_lock() method for CIT_TRUNC io. + * Implementation of cl_io_operations::cio_lock() method for CIT_TRUNC io. * * Handles "lockless io" mode when extent locking is done by server. */ @@ -454,14 +424,14 @@ static int vvp_io_read_start(const struct lu_env *env, int result; loff_t pos = io->u.ci_rd.rd.crw_pos; - size_t cnt = io->u.ci_rd.rd.crw_count; - size_t tot = cio->cui_tot_count; + long cnt = io->u.ci_rd.rd.crw_count; + long tot = cio->cui_tot_count; int exceed = 0; CLOBINVRNT(env, obj, ccc_object_invariant(obj)); LASSERT(vio->cui_oneshot == 0); - CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + (long long)cnt); + CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt); result = ccc_prep_size(env, obj, io, pos, tot, 1, &exceed); if (result != 0) @@ -470,7 +440,7 @@ static int vvp_io_read_start(const struct lu_env *env, goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, "LPSZ" bytes, offset %lld, size %llu\n", + "Read ino %lu, %lu bytes, offset %lld, size %llu\n", inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ @@ -522,12 +492,16 @@ static int vvp_io_write_start(const struct lu_env *env, ENTRY; - if (cl_io_is_append(io)) + if (cl_io_is_append(io)) { /* * PARALLEL IO This has to be changed for parallel IO doing * out-of-order writes. */ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); +#ifndef HAVE_FILE_WRITEV + cio->cui_iocb->ki_pos = pos; +#endif + } CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); @@ -627,37 +601,6 @@ static int vvp_io_fault_start(const struct lu_env *env, return result; } -static void vvp_io_advance(const struct lu_env *env, - const struct cl_io_slice *ios, size_t nob) -{ - struct ccc_io *vio = cl2ccc_io(env, ios); - struct cl_io *io = ios->cis_io; - struct cl_object *obj = ios->cis_io->ci_obj; - - CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - - if (!cl_io_is_sendfile(io) && io->ci_continue) { - /* update the iov */ - LASSERT(vio->cui_tot_nrsegs >= vio->cui_nrsegs); - LASSERT(vio->cui_tot_count >= nob); - - vio->cui_iov += vio->cui_nrsegs; - vio->cui_tot_nrsegs -= vio->cui_nrsegs; - vio->cui_tot_count -= nob; - - if (vio->cui_iov_olen) { - struct iovec *iv; - - vio->cui_iov--; - vio->cui_tot_nrsegs++; - iv = &vio->cui_iov[0]; - iv->iov_base += iv->iov_len; - LASSERT(vio->cui_iov_olen > iv->iov_len); - iv->iov_len = vio->cui_iov_olen - iv->iov_len; - } - } -} - static int vvp_io_read_page(const struct lu_env *env, const struct cl_io_slice *ios, const struct cl_page_slice *slice) @@ -679,7 +622,7 @@ static int vvp_io_read_page(const struct lu_env *env, ENTRY; - if (sbi->ll_ra_info.ra_max_pages) + if (sbi->ll_ra_info.ra_max_pages_per_file) ras_update(sbi, inode, ras, page->cp_index, cp->cpg_defer_uptodate); @@ -699,14 +642,14 @@ static int vvp_io_read_page(const struct lu_env *env, if (cp->cpg_defer_uptodate) { cp->cpg_ra_used = 1; - cl_page_export(env, page); + cl_page_export(env, page, 1); } /* * Add page into the queue even when it is marked uptodate above. * this will unlock it automatically as part of cl_page_list_disown(). */ cl_2queue_add(queue, page); - if (sbi->ll_ra_info.ra_max_pages) + if (sbi->ll_ra_info.ra_max_pages_per_file) ll_readahead(env, io, ras, vmpage->mapping, &queue->c2_qin, fd->fd_flags); @@ -718,10 +661,6 @@ static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, int to, enum cl_req_type crt) { struct cl_2queue *queue; - struct ccc_object *cobo = cl2ccc(page->cp_obj); - struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io; - - int writing = io->ci_type == CIT_WRITE; int result; LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); @@ -729,19 +668,9 @@ static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, queue = &io->ci_queue; cl_2queue_init_page(queue, page); - - if (writing) - /* Do not pass llap here as it is sync write. */ - vvp_write_pending(cobo, cp); - - cl_sync_io_init(anchor, 1); - cp->cpg_sync_io = anchor; cl_page_clip(env, page, 0, to); - result = cl_io_submit_rw(env, io, crt, queue, CRP_NORMAL); - if (result == 0) - result = cl_sync_io_wait(env, io, &queue->c2_qout, anchor); - else - cp->cpg_sync_io = NULL; + + result = cl_io_submit_sync(env, io, crt, queue, CRP_NORMAL, 0); LASSERT(cl_page_is_owned(page, io)); cl_page_clip(env, page, 0, CFS_PAGE_SIZE); @@ -764,7 +693,7 @@ static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, struct ccc_page *cp, unsigned from, unsigned to) { - struct cl_attr *attr = &ccc_env_info(env)->cti_attr; + struct cl_attr *attr = ccc_env_thread_attr(env); loff_t offset = cl_offset(obj, pg->cp_index); int result; @@ -796,7 +725,7 @@ static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, * details. -jay */ if (result == 0) - cl_page_export(env, pg); + cl_page_export(env, pg, 1); } return result; } @@ -886,6 +815,9 @@ static int vvp_io_commit_write(const struct lu_env *env, tallyop = LPROC_LL_DIRTY_MISSES; vvp_write_pending(cl2ccc(obj), cp); set_page_dirty(vmpage); + /* ll_set_page_dirty() does the same for now, but + * it will not soon. */ + vvp_write_pending(cl2ccc(obj), cp); result = cl_page_cache_add(env, io, pg, CRT_WRITE); if (result == -EDQUOT) /* @@ -904,6 +836,9 @@ static int vvp_io_commit_write(const struct lu_env *env, */ result = vvp_page_sync_io(env, io, pg, cp, to, CRT_WRITE); + if (result) + CERROR("Write page %lu of inode %p failed %d\n", + pg->cp_index, inode, result); } else { tallyop = LPROC_LL_DIRTY_HITS; result = 0; @@ -915,7 +850,7 @@ static int vvp_io_commit_write(const struct lu_env *env, if (result == 0) { if (size > i_size_read(inode)) i_size_write(inode, size); - cl_page_export(env, pg); + cl_page_export(env, pg, 1); } else if (size > i_size_read(inode)) cl_page_discard(env, io, pg); RETURN(result); @@ -927,13 +862,13 @@ static const struct cl_io_operations vvp_io_ops = { .cio_fini = vvp_io_fini, .cio_lock = vvp_io_read_lock, .cio_start = vvp_io_read_start, - .cio_advance = vvp_io_advance + .cio_advance = ccc_io_advance }, [CIT_WRITE] = { .cio_fini = vvp_io_fini, .cio_lock = vvp_io_write_lock, .cio_start = vvp_io_write_start, - .cio_advance = vvp_io_advance + .cio_advance = ccc_io_advance }, [CIT_TRUNC] = { .cio_fini = vvp_io_trunc_fini,