io = ccc_env_io(env)->cui_cl.cis_io;
LASSERT(io != NULL);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode mutex to operate transient pages.
- */
- if (rw == READ)
- mutex_lock(&inode->i_mutex);
-
LASSERT(obj->cob_transient_pages == 0);
for (seg = 0; seg < nr_segs; seg++) {
long iov_left = iov[seg].iov_len;
}
out:
LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
- mutex_unlock(&inode->i_mutex);
if (tot_bytes > 0) {
struct ccc_io *cio = ccc_env_io(env);
LASSERT(cio->cui_iocb->ki_pos == pos);
}
- CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+ CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
- result = 0;
- else
- result = generic_file_aio_write(cio->cui_iocb,
- cio->cui_iov, cio->cui_nrsegs,
- cio->cui_iocb->ki_pos);
+ if (cio->cui_iov == NULL) {
+ /* from a temp io in ll_cl_init(). */
+ result = 0;
+ } else {
+ /*
+ * When using the locked AIO function (generic_file_aio_write())
+ * testing has shown the inode mutex to be a limiting factor
+ * with multi-threaded single shared file performance. To get
+ * around this, we now use the lockless version. To maintain
+ * consistency, proper locking to protect against writes,
+ * trucates, etc. is handled in the higher layers of lustre.
+ */
+ result = __generic_file_aio_write(cio->cui_iocb,
+ cio->cui_iov, cio->cui_nrsegs,
+ &cio->cui_iocb->ki_pos);
+ if (result > 0 || result == -EIOCBQUEUED) {
+ ssize_t err;
+
+ err = generic_write_sync(cio->cui_iocb->ki_filp,
+ pos, result);
+ if (err < 0 && result > 0)
+ result = err;
+ }
+
+ }
if (result > 0) {
result = vvp_io_write_commit(env, io);
if (cio->u.write.cui_written > 0) {
static void vvp_transient_page_verify(const struct cl_page *page)
{
- struct inode *inode = ccc_object_inode(page->cp_obj);
-
- LASSERT(!mutex_trylock(&inode->i_mutex));
}
static int vvp_transient_page_own(const struct lu_env *env,
struct ccc_object *clobj = cl2ccc(clp->cp_obj);
vvp_page_fini_common(cp);
- LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
clobj->cob_transient_pages--;
}
} else {
struct ccc_object *clobj = cl2ccc(obj);
- LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
cl_page_slice_add(page, &cpg->cpg_cl, obj, index,
&vvp_transient_page_ops);
clobj->cob_transient_pages++;
static inline int cl_page_invariant(const struct cl_page *pg)
{
- /*
- * Page invariant is protected by a VM lock.
- */
- LINVRNT(cl_page_is_vmlocked(NULL, pg));
-
return cl_page_in_use_noref(pg);
}
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
if (anchor) {
- LASSERT(cl_page_is_vmlocked(env, pg));
LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
}