+/**
+ * Prepare partially written-to page for a write.
+ */
+static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct cl_object *obj = io->ci_obj;
+ struct ccc_page *cp = cl_object_page_slice(obj, pg);
+ loff_t offset = cl_offset(obj, ccc_index(cp));
+ int result;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result == 0) {
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ ll_kunmap_atomic(kaddr, KM_USER0);
+ } else if (cp->cpg_defer_uptodate)
+ cp->cpg_ra_used = 1;
+ else
+ result = ll_page_sync_io(env, io, pg, CRT_READ);
+ }
+ return result;
+}
+
+static int ll_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct ll_cl_context *lcc;
+ const struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *vmpage = NULL;
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
+ int result = 0;
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
+
+ lcc = ll_cl_find(file);
+ if (lcc == NULL)
+ GOTO(out, result = -EIO);
+
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
+
+ if (unlikely(vmpage == NULL ||
+ PageDirty(vmpage) || PageWriteback(vmpage))) {
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_page_list *plist = &cio->u.write.cui_queue;
+
+ /* if the page is already in dirty cache, we have to commit
+ * the pages right now; otherwise, it may cause deadlock
+ * because it holds page lock of a dirty page and request for
+ * more grants. It's okay for the dirty page to be the first
+ * one in commit page list, though. */
+ if (vmpage != NULL && plist->pl_nr > 0) {
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ vmpage = NULL;
+ }
+
+ /* commit pages and then wait for page lock */
+ result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ GOTO(out, result);
+
+ if (vmpage == NULL) {
+ vmpage = grab_cache_page_write_begin(mapping, index,
+ flags);
+ if (vmpage == NULL)
+ GOTO(out, result = -ENOMEM);
+ }
+ }
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page))
+ GOTO(out, result = PTR_ERR(page));
+
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+
+ cl_page_assume(env, io, page);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page,
+ * so _don't_ set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
+ POISON_PAGE(vmpage, 0x11);
+ } else {
+ /* TODO: can be optimized at OSC layer to check if it
+ * is a lockless IO. In that case, it's not necessary
+ * to read the data. */
+ result = ll_prepare_partial_page(env, io, page);
+ if (result == 0)
+ SetPageUptodate(vmpage);
+ }
+ }
+ if (result < 0)
+ cl_page_unassume(env, io, page);
+ EXIT;
+out:
+ if (result < 0) {
+ if (vmpage != NULL) {
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ }
+ } else {
+ *pagep = vmpage;
+ *fsdata = lcc;
+ }
+ RETURN(result);
+}
+
+static int ll_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *vmpage, void *fsdata)
+{
+ struct ll_cl_context *lcc = fsdata;
+ const struct lu_env *env;
+ struct cl_io *io;
+ struct ccc_io *cio;
+ struct cl_page *page;
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ bool unplug = false;
+ int result = 0;
+ ENTRY;
+
+ page_cache_release(vmpage);
+
+ LASSERT(lcc != NULL);
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+ cio = ccc_env_io(env);
+
+ LASSERT(cl_page_is_owned(page, io));
+ if (copied > 0) {
+ struct cl_page_list *plist = &cio->u.write.cui_queue;
+
+ lcc->lcc_page = NULL; /* page will be queued */
+
+ /* Add it into write queue */
+ cl_page_list_add(plist, page);
+ if (plist->pl_nr == 1) /* first page */
+ cio->u.write.cui_from = from;
+ else
+ LASSERT(from == 0);
+ cio->u.write.cui_to = from + copied;
+
+ /* To address the deadlock in balance_dirty_pages() where
+ * this dirty page may be written back in the same thread. */
+ if (PageDirty(vmpage))
+ unplug = true;
+
+ /* We may have one full RPC, commit it soon */
+ if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
+ unplug = true;
+
+ CL_PAGE_DEBUG(D_VFSTRACE, env, page,
+ "queued page: %d.\n", plist->pl_nr);
+ } else {
+ cl_page_disown(env, io, page);
+
+ lcc->lcc_page = NULL;
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+
+ /* page list is not contiguous now, commit it now */
+ unplug = true;
+ }
+ if (unplug ||
+ file->f_flags & O_SYNC || IS_SYNC(file->f_dentry->d_inode))
+ result = vvp_io_write_commit(env, io);
+
+ RETURN(result >= 0 ? copied : result);
+}
+
+#ifdef CONFIG_MIGRATION
+static int ll_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page
+#ifdef HAVE_MIGRATEPAGE_4ARGS
+ , enum migrate_mode mode
+#endif
+ )
+{
+ /* Always fail page migration until we have a proper implementation */
+ return -EIO;
+}
+#endif
+
+#ifndef MS_HAS_NEW_AOPS
+const struct address_space_operations ll_aops = {
+ .readpage = ll_readpage,