struct vm_area_struct *vma;
struct cl_lock_descr *descr = &cti->cti_descr;
ldlm_policy_data_t policy;
- struct inode *inode;
unsigned long addr;
unsigned long seg;
ssize_t count;
count += addr & (~CFS_PAGE_MASK);
addr &= CFS_PAGE_MASK;
while((vma = our_vma(addr, count)) != NULL) {
- struct file *file = vma->vm_file;
- struct ll_file_data *fd;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ int flags = CEF_MUST;
- LASSERT(file);
- fd = LUSTRE_FPRIVATE(file);
-
- inode = file->f_dentry->d_inode;
- if (!(fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK))
- goto cont;
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case, a lockless lock will be
+ * generated.
+ */
+ flags = CEF_NEVER;
+ }
/*
* XXX: Required lock mode can be weakened: CIT_WRITE
policy.l_extent.start);
descr->cld_end = cl_index(descr->cld_obj,
policy.l_extent.end);
- result = cl_io_lock_alloc_add(env, io, descr, CEF_MUST);
+ result = cl_io_lock_alloc_add(env, io, descr, flags);
if (result < 0)
RETURN(result);
- cont:
if (vma->vm_end - addr >= count)
break;
+
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
}
/**
- * Implementation of cl_io_operations::vio_lock() method for CIT_TRUNC io.
+ * Implementation of cl_io_operations::cio_lock() method for CIT_TRUNC io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
- size_t cnt = io->u.ci_rd.rd.crw_count;
- size_t tot = cio->cui_tot_count;
+ long cnt = io->u.ci_rd.rd.crw_count;
+ long tot = cio->cui_tot_count;
int exceed = 0;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(vio->cui_oneshot == 0);
- CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + (long long)cnt);
+ CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
result = ccc_prep_size(env, obj, io, pos, tot, 1, &exceed);
if (result != 0)
goto out;
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, "LPSZ" bytes, offset %lld, size %llu\n",
+ "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
struct ll_readahead_state *ras = &fd->fd_ras;
cfs_page_t *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
+ int rc;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(cl2vvp_io(env, ios)->cui_oneshot == 0);
cp->cpg_defer_uptodate);
/* Sanity check whether the page is protected by a lock. */
- if (likely(!(fd->fd_flags & LL_FILE_IGNORE_LOCK))) {
- int rc;
-
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- RETURN(rc);
- }
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc != -EBUSY) {
+ CL_PAGE_HEADER(D_WARNING, env, page, "%s: %i\n",
+ rc == -ENODATA ? "without a lock" :
+ "match failed", rc);
+ if (rc != -ENODATA)
+ RETURN(rc);
}
if (cp->cpg_defer_uptodate) {
cp->cpg_ra_used = 1;
- cl_page_export(env, page);
+ cl_page_export(env, page, 1);
}
/*
* Add page into the queue even when it is marked uptodate above.
int to, enum cl_req_type crt)
{
struct cl_2queue *queue;
- struct ccc_object *cobo = cl2ccc(page->cp_obj);
- struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io;
-
- int writing = io->ci_type == CIT_WRITE;
int result;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
queue = &io->ci_queue;
cl_2queue_init_page(queue, page);
-
- if (writing)
- /* Do not pass llap here as it is sync write. */
- vvp_write_pending(cobo, cp);
-
- cl_sync_io_init(anchor, 1);
- cp->cpg_sync_io = anchor;
cl_page_clip(env, page, 0, to);
- result = cl_io_submit_rw(env, io, crt, queue, CRP_NORMAL);
- if (result == 0)
- result = cl_sync_io_wait(env, io, &queue->c2_qout, anchor);
- else
- cp->cpg_sync_io = NULL;
+
+ result = cl_io_submit_sync(env, io, crt, queue, CRP_NORMAL, 0);
LASSERT(cl_page_is_owned(page, io));
cl_page_clip(env, page, 0, CFS_PAGE_SIZE);
struct ccc_page *cp,
unsigned from, unsigned to)
{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
loff_t offset = cl_offset(obj, pg->cp_index);
int result;
* details. -jay
*/
if (result == 0)
- cl_page_export(env, pg);
+ cl_page_export(env, pg, 1);
}
return result;
}
tallyop = LPROC_LL_DIRTY_MISSES;
vvp_write_pending(cl2ccc(obj), cp);
set_page_dirty(vmpage);
+ /* ll_set_page_dirty() does the same for now, but
+ * it will not soon. */
+ vvp_write_pending(cl2ccc(obj), cp);
result = cl_page_cache_add(env, io, pg, CRT_WRITE);
if (result == -EDQUOT)
/*
if (result == 0) {
if (size > i_size_read(inode))
i_size_write(inode, size);
- cl_page_export(env, pg);
+ cl_page_export(env, pg, 1);
} else if (size > i_size_read(inode))
cl_page_discard(env, io, pg);
RETURN(result);