struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->cti_descr;
ldlm_policy_data_t policy;
- struct inode *inode;
unsigned long addr;
unsigned long seg;
ssize_t count;
count += addr & (~CFS_PAGE_MASK);
addr &= CFS_PAGE_MASK;
while((vma = our_vma(addr, count)) != NULL) {
- struct file *file = vma->vm_file;
- struct ll_file_data *fd;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ int flags = CEF_MUST;
- LASSERT(file);
- fd = LUSTRE_FPRIVATE(file);
-
- inode = file->f_dentry->d_inode;
- if (!(fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK))
- goto cont;
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case, a lockless lock will be
+ * generated.
+ */
+ flags = CEF_NEVER;
+ }
/*
* XXX: Required lock mode can be weakened: CIT_WRITE
policy.l_extent.start);
descr->cld_end = cl_index(descr->cld_obj,
policy.l_extent.end);
- result = cl_io_lock_alloc_add(env, io, descr, CEF_MUST);
+ result = cl_io_lock_alloc_add(env, io, descr, flags);
if (result < 0)
RETURN(result);
- cont:
if (vma->vm_end - addr >= count)
break;
+
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
RETURN(0);
}
-static void vvp_io_update_iov(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
-{
- int i;
- size_t size = io->u.ci_rw.crw_count;
-
- vio->cui_iov_olen = 0;
- if (cl_io_is_sendfile(io) || size == vio->cui_tot_count)
- return;
-
- if (vio->cui_tot_nrsegs == 0)
- vio->cui_tot_nrsegs = vio->cui_nrsegs;
-
- for (i = 0; i < vio->cui_tot_nrsegs; i++) {
- struct iovec *iv = &vio->cui_iov[i];
-
- if (iv->iov_len < size)
- size -= iv->iov_len;
- else {
- if (iv->iov_len > size) {
- vio->cui_iov_olen = iv->iov_len;
- iv->iov_len = size;
- }
- break;
- }
- }
-
- vio->cui_nrsegs = i + 1;
-}
-
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
LASSERT(vvp_env_io(env)->cui_oneshot == 0);
ENTRY;
- vvp_io_update_iov(env, cio, io);
+ ccc_io_update_iov(env, cio, io);
if (io->u.ci_rw.crw_nonblock)
ast_flags |= CEF_NONBLOCK;
}
/**
- * Implementation of cl_io_operations::vio_lock() method for CIT_TRUNC io.
+ * Implementation of cl_io_operations::cio_lock() method for CIT_TRUNC io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
* Wait for the transfer completion for a partially
* truncated page to avoid dead-locking an OST with
* the concurrent page-wise overlapping WRITE and
- * PUNCH requests.
+ * PUNCH requests. BUG:17397.
*
* Partial page is disowned in vvp_io_trunc_end().
*/
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
- size_t cnt = io->u.ci_rd.rd.crw_count;
- size_t tot = cio->cui_tot_count;
+ long cnt = io->u.ci_rd.rd.crw_count;
+ long tot = cio->cui_tot_count;
+ int exceed = 0;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(vio->cui_oneshot == 0);
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
- result = ccc_prep_size(env, obj, io, pos + tot - 1, 1);
+ result = ccc_prep_size(env, obj, io, pos, tot, 1, &exceed);
if (result != 0)
return result;
+ else if (exceed != 0)
+ goto out;
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, "LPSZ" bytes, offset %lld, size %llu\n",
+ "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
result = lustre_generic_file_read(file, cio, &pos);
}
+out:
if (result >= 0) {
if (result < cnt)
io->ci_continue = 0;
ENTRY;
- if (cl_io_is_append(io))
+ if (cl_io_is_append(io)) {
/*
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+#ifndef HAVE_FILE_WRITEV
+ cio->cui_iocb->ki_pos = pos;
+#endif
+ }
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + cnt);
+ CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
if (cl2vvp_io(env, ios)->cui_oneshot > 0)
result = 0;
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, offset, 0);
+ result = ccc_prep_size(env, obj, io, 0, offset + 1, 0, NULL);
if (result != 0)
return result;
return result;
}
-static void vvp_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios, size_t nob)
-{
- struct ccc_io *vio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- if (!cl_io_is_sendfile(io) && io->ci_continue) {
- /* update the iov */
- LASSERT(vio->cui_tot_nrsegs >= vio->cui_nrsegs);
- LASSERT(vio->cui_tot_count >= nob);
-
- vio->cui_iov += vio->cui_nrsegs;
- vio->cui_tot_nrsegs -= vio->cui_nrsegs;
- vio->cui_tot_count -= nob;
-
- if (vio->cui_iov_olen) {
- struct iovec *iv;
-
- vio->cui_iov--;
- vio->cui_tot_nrsegs++;
- iv = &vio->cui_iov[0];
- iv->iov_base += iv->iov_len;
- LASSERT(vio->cui_iov_olen > iv->iov_len);
- iv->iov_len = vio->cui_iov_olen - iv->iov_len;
- }
- }
-}
-
static int vvp_io_read_page(const struct lu_env *env,
const struct cl_io_slice *ios,
const struct cl_page_slice *slice)
struct ll_readahead_state *ras = &fd->fd_ras;
cfs_page_t *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
+ int rc;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(cl2vvp_io(env, ios)->cui_oneshot == 0);
ENTRY;
- if (sbi->ll_ra_info.ra_max_pages)
+ if (sbi->ll_ra_info.ra_max_pages_per_file)
ras_update(sbi, inode, ras, page->cp_index,
cp->cpg_defer_uptodate);
/* Sanity check whether the page is protected by a lock. */
- if (likely(!(fd->fd_flags & LL_FILE_IGNORE_LOCK))) {
- int rc;
-
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- RETURN(rc);
- }
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc != -EBUSY) {
+ CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
+ rc == -ENODATA ? "without a lock" :
+ "match failed", rc);
+ if (rc != -ENODATA)
+ RETURN(rc);
}
if (cp->cpg_defer_uptodate) {
cp->cpg_ra_used = 1;
- cl_page_export(env, page);
+ cl_page_export(env, page, 1);
}
/*
* Add page into the queue even when it is marked uptodate above.
* this will unlock it automatically as part of cl_page_list_disown().
*/
cl_2queue_add(queue, page);
- if (sbi->ll_ra_info.ra_max_pages)
+ if (sbi->ll_ra_info.ra_max_pages_per_file)
ll_readahead(env, io, ras,
vmpage->mapping, &queue->c2_qin, fd->fd_flags);
int to, enum cl_req_type crt)
{
struct cl_2queue *queue;
- struct ccc_object *cobo = cl2ccc(page->cp_obj);
- struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io;
-
- int writing = io->ci_type == CIT_WRITE;
int result;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
queue = &io->ci_queue;
cl_2queue_init_page(queue, page);
-
- if (writing)
- /* Do not pass llap here as it is sync write. */
- vvp_write_pending(cobo, cp);
-
- cl_sync_io_init(anchor, 1);
- cp->cpg_sync_io = anchor;
cl_page_clip(env, page, 0, to);
- result = cl_io_submit_rw(env, io, crt, queue);
- if (result == 0)
- result = cl_sync_io_wait(env, io, &queue->c2_qout, anchor);
- else
- cp->cpg_sync_io = NULL;
+
+ result = cl_io_submit_sync(env, io, crt, queue, CRP_NORMAL, 0);
LASSERT(cl_page_is_owned(page, io));
cl_page_clip(env, page, 0, CFS_PAGE_SIZE);
struct ccc_page *cp,
unsigned from, unsigned to)
{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
loff_t offset = cl_offset(obj, pg->cp_index);
int result;
* details. -jay
*/
if (result == 0)
- cl_page_export(env, pg);
+ cl_page_export(env, pg, 1);
}
return result;
}
tallyop = LPROC_LL_DIRTY_MISSES;
vvp_write_pending(cl2ccc(obj), cp);
set_page_dirty(vmpage);
+ /* ll_set_page_dirty() does the same for now, but
+ * it will not soon. */
+ vvp_write_pending(cl2ccc(obj), cp);
result = cl_page_cache_add(env, io, pg, CRT_WRITE);
if (result == -EDQUOT)
/*
*/
result = vvp_page_sync_io(env, io, pg, cp,
to, CRT_WRITE);
+ if (result)
+ CERROR("Write page %lu of inode %p failed %d\n",
+ pg->cp_index, inode, result);
} else {
tallyop = LPROC_LL_DIRTY_HITS;
result = 0;
if (result == 0) {
if (size > i_size_read(inode))
i_size_write(inode, size);
- cl_page_export(env, pg);
+ cl_page_export(env, pg, 1);
} else if (size > i_size_read(inode))
cl_page_discard(env, io, pg);
RETURN(result);
.cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
- .cio_advance = vvp_io_advance
+ .cio_advance = ccc_io_advance
},
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = vvp_io_advance
+ .cio_advance = ccc_io_advance
},
[CIT_TRUNC] = {
.cio_fini = vvp_io_trunc_fini,