+ struct ll_cl_context *lcc = NULL;
+ const struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
+ struct cl_page *page = NULL;
+
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ struct page *vmpage = NULL;
+ unsigned from = pos & (PAGE_SIZE - 1);
+ unsigned to = from + len;
+ int result = 0;
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
+
+ lcc = ll_cl_find(file);
+ if (lcc == NULL) {
+ vmpage = grab_cache_page_nowait(mapping, index);
+ result = ll_tiny_write_begin(vmpage, mapping);
+ GOTO(out, result);
+ }
+
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+
+ if (file->f_flags & O_DIRECT) {
+ /* direct IO failed because it couldn't clean up cached pages,
+ * this causes a problem for mirror write because the cached
+ * page may belong to another mirror, which will result in
+ * problem submitting the I/O. */
+ if (io->ci_designated_mirror > 0)
+ GOTO(out, result = -EBUSY);
+
+ /**
+ * Direct write can fall back to buffered read, but DIO is done
+ * with lockless i/o, and buffered requires LDLM locking, so
+ * in this case we must restart without lockless.
+ */
+ if (!io->ci_dio_lock) {
+ io->ci_dio_lock = 1;
+ io->ci_need_restart = 1;
+ GOTO(out, result = -ENOLCK);
+ }
+ }
+again:
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
+
+ if (unlikely(vmpage == NULL ||
+ PageDirty(vmpage) || PageWriteback(vmpage))) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *plist = &vio->u.readwrite.vui_queue;
+
+ /* if the page is already in dirty cache, we have to commit
+ * the pages right now; otherwise, it may cause deadlock
+ * because it holds page lock of a dirty page and request for
+ * more grants. It's okay for the dirty page to be the first
+ * one in commit page list, though. */
+ if (vmpage != NULL && plist->pl_nr > 0) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmpage = NULL;
+ }
+
+ /* commit pages and then wait for page lock */
+ result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ GOTO(out, result);
+
+ if (vmpage == NULL) {
+ vmpage = grab_cache_page_write_begin(mapping, index,
+ flags);
+ if (vmpage == NULL)
+ GOTO(out, result = -ENOMEM);
+ }
+ }
+
+ /* page was truncated */
+ if (mapping != vmpage->mapping) {
+ CDEBUG(D_VFSTRACE, "page: %lu was truncated\n", index);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmpage = NULL;
+ goto again;
+ }
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page))
+ GOTO(out, result = PTR_ERR(page));
+
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+
+ cl_page_assume(env, io, page);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page,
+ * so _don't_ set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
+ POISON_PAGE(vmpage, 0x11);
+ } else {
+ /* TODO: can be optimized at OSC layer to check if it
+ * is a lockless IO. In that case, it's not necessary
+ * to read the data. */
+ result = ll_prepare_partial_page(env, io, page, file);
+ if (result) {
+ /* vmpage should have been unlocked */
+ put_page(vmpage);
+ vmpage = NULL;
+
+ if (result == -EAGAIN)
+ goto again;
+ GOTO(out, result);
+ }
+ }
+ }
+ EXIT;
+out:
+ if (result < 0) {
+ if (vmpage != NULL) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
+ /* On tiny_write failure, page and io are always null. */
+ if (!IS_ERR_OR_NULL(page)) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ if (io)
+ io->ci_result = result;
+ } else {
+ *pagep = vmpage;
+ *fsdata = lcc;
+ }
+ RETURN(result);
+}
+
+static int ll_tiny_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *vmpage)
+{
+ struct cl_page *clpage = (struct cl_page *) vmpage->private;
+ loff_t kms = pos+copied;
+ loff_t to = kms & (PAGE_SIZE-1) ? kms & (PAGE_SIZE-1) : PAGE_SIZE;
+ __u16 refcheck;
+ struct lu_env *env = cl_env_get(&refcheck);
+ int rc = 0;
+
+ ENTRY;
+
+ if (IS_ERR(env)) {
+ rc = PTR_ERR(env);
+ goto out;
+ }
+
+ /* This page is dirty in cache, so it should have a cl_page pointer
+ * set in vmpage->private.
+ */
+ LASSERT(clpage != NULL);
+
+ if (copied == 0)
+ goto out_env;
+
+ /* Update the underlying size information in the OSC/LOV objects this
+ * page is part of.
+ */
+ cl_page_touch(env, clpage, to);
+
+out_env:
+ cl_env_put(env, &refcheck);
+
+out:
+ /* Must return page unlocked. */
+ unlock_page(vmpage);
+
+ RETURN(rc);