+ struct ll_cl_context *lcc = fsdata;
+ const struct lu_env *env;
+ struct cl_io *io;
+ struct ccc_io *cio;
+ struct cl_page *page;
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ bool unplug = false;
+ int result = 0;
+ ENTRY;
+
+ page_cache_release(vmpage);
+
+ LASSERT(lcc != NULL);
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+ cio = ccc_env_io(env);
+
+ LASSERT(cl_page_is_owned(page, io));
+ if (copied > 0) {
+ struct cl_page_list *plist = &cio->u.write.cui_queue;
+
+ lcc->lcc_page = NULL; /* page will be queued */
+
+ /* Add it into write queue */
+ cl_page_list_add(plist, page);
+ if (plist->pl_nr == 1) /* first page */
+ cio->u.write.cui_from = from;
+ else
+ LASSERT(from == 0);
+ cio->u.write.cui_to = from + copied;
+
+ /* To address the deadlock in balance_dirty_pages() where
+ * this dirty page may be written back in the same thread. */
+ if (PageDirty(vmpage))
+ unplug = true;
+
+ /* We may have one full RPC, commit it soon */
+ if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
+ unplug = true;
+
+ CL_PAGE_DEBUG(D_VFSTRACE, env, page,
+ "queued page: %d.\n", plist->pl_nr);
+ } else {
+ cl_page_disown(env, io, page);
+
+ lcc->lcc_page = NULL;
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+
+ /* page list is not contiguous now, commit it now */
+ unplug = true;
+ }
+ if (unplug ||
+ file->f_flags & O_SYNC || IS_SYNC(file->f_dentry->d_inode))
+ result = vvp_io_write_commit(env, io);