+ /* plist is not sorted any more */
+ cl_page_list_splice(&queue->c2_qin, plist);
+ cl_page_list_splice(&queue->c2_qout, plist);
+ cl_2queue_fini(env, queue);
+
+ if (rc == 0) {
+ /* calculate bytes */
+ bytes = plist->pl_nr << PAGE_SHIFT;
+ bytes -= from + PAGE_SIZE - to;
+
+ while (plist->pl_nr > 0) {
+ page = cl_page_list_first(plist);
+ cl_page_list_del(env, plist, page);
+
+ cl_page_clip(env, page, 0, PAGE_SIZE);
+
+ SetPageUptodate(cl_page_vmpage(page));
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ }
+
+ RETURN(bytes > 0 ? bytes : rc);
+}
+
+/*
+ * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
+ * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
+ */
+static inline void ll_account_page_dirtied(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE_ACCOUNT_PAGE_DIRTIED_3ARGS
+ struct mem_cgroup *memcg = mem_cgroup_begin_page_stat(page);
+
+ account_page_dirtied(page, mapping, memcg);
+ mem_cgroup_end_page_stat(memcg);
+#elif defined HAVE_ACCOUNT_PAGE_DIRTIED
+ account_page_dirtied(page, mapping, memcg);
+#else
+ typedef unsigned int (dirtied_t)(struct page *pg,
+ struct address_space *as);
+ const char *symbol = "account_page_dirtied";
+ static dirtied_t *dirtied = NULL;
+
+ if (!dirtied)
+ dirtied = (dirtied_t *)symbol_get(symbol);
+
+ if (dirtied)
+ dirtied(page, mapping);
+#endif
+}
+
+/*
+ * From kernel v4.19-rc5-248-g9b89a0355144 use XArrary
+ * Prior kernels use radix_tree for tags
+ */
+static inline void ll_page_tag_dirty(struct page *page,
+ struct address_space *mapping)
+{
+#ifdef HAVE___XA_SET_MARK
+ __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
+#else
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+#endif
+}
+
+/* Taken from kernel set_page_dirty, __set_page_dirty_nobuffers
+ * Last change to this area: b93b016313b3ba8003c3b8bb71f569af91f19fc7
+ *
+ * Current with Linus tip of tree (7/13/2019):
+ * v5.2-rc4-224-ge01e060fe0
+ *
+ * Backwards compat for 3.x, 5.x kernels relating to memcg handling
+ * & rename of radix tree to xarray.
+ */
+void vvp_set_pagevec_dirty(struct pagevec *pvec)
+{
+ struct page *page = pvec->pages[0];
+ struct address_space *mapping = page->mapping;
+ unsigned long flags;
+ int count = pagevec_count(pvec);
+ int dirtied = 0;
+ int i = 0;
+
+ ENTRY;
+
+ /* From set_page_dirty */
+ for (i = 0; i < count; i++)
+ ClearPageReclaim(pvec->pages[i]);
+
+ LASSERTF(page->mapping,
+ "mapping must be set. page %p, page->private (cl_page) %p",
+ page, (void *) page->private);
+
+ /* Rest of code derived from __set_page_dirty_nobuffers */
+ xa_lock_irqsave(&mapping->i_pages, flags);
+
+ /* Notes on differences with __set_page_dirty_nobuffers:
+ * 1. We don't need to call page_mapping because we know this is a page
+ * cache page.
+ * 2. We have the pages locked, so there is no need for the careful
+ * mapping/mapping2 dance.
+ * 3. No mapping is impossible. (Race w/truncate mentioned in
+ * dirty_nobuffers should be impossible because we hold the page lock.)
+ * 4. All mappings are the same because i/o is only to one file.
+ * 5. We invert the lock order on lock_page_memcg(page) and the mapping
+ * xa_lock, but this is the only function that should use that pair of
+ * locks and it can't race because Lustre locks pages throughout i/o.
+ */
+ for (i = 0; i < count; i++) {
+ page = pvec->pages[i];
+ lock_page_memcg(page);
+ if (TestSetPageDirty(page)) {
+ unlock_page_memcg(page);
+ continue;
+ }
+ LASSERTF(page->mapping == mapping,
+ "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
+ page, page->mapping, mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ ll_account_page_dirtied(page, mapping);
+ ll_page_tag_dirty(page, mapping);
+ dirtied++;
+ unlock_page_memcg(page);
+ }
+ xa_unlock_irqrestore(&mapping->i_pages, flags);
+
+ CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
+ count, dirtied);
+
+ if (mapping->host && dirtied) {
+ /* !PageAnon && !swapper_space */
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+
+ EXIT;
+}
+
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct pagevec *pvec)
+{
+ int count = 0;
+ int i = 0;
+
+ ENTRY;
+
+ count = pagevec_count(pvec);
+ LASSERT(count > 0);
+
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ SetPageUptodate(vmpage);
+ }
+
+ vvp_set_pagevec_dirty(pvec);
+
+ for (i = 0; i < count; i++) {
+ struct page *vmpage = pvec->pages[i];
+ struct cl_page *page = (struct cl_page *) vmpage->private;
+ cl_page_disown(env, io, page);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+ cl_page_put(env, page);
+ }
+
+ EXIT;
+}
+
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ pgoff_t index = CL_PAGE_EOF;
+
+ cl_page_list_for_each(page, plist) {
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+
+ if (index == CL_PAGE_EOF) {
+ index = vvp_index(vpg);
+ continue;
+ }
+
+ ++index;
+ if (index == vvp_index(vpg))
+ continue;
+
+ return false;
+ }
+ return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
+ struct cl_page *page;
+ int rc = 0;
+ int bytes = 0;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
+ ENTRY;
+
+ if (npages == 0)
+ RETURN(0);
+
+ CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
+
+ LASSERT(page_list_sanity_check(obj, queue));
+
+ /* submit IO with async write */
+ rc = cl_io_commit_async(env, io, queue,
+ vio->u.write.vui_from, vio->u.write.vui_to,
+ write_commit_callback);
+ npages -= queue->pl_nr; /* already committed pages */
+ if (npages > 0) {
+ /* calculate how many bytes were written */
+ bytes = npages << PAGE_SHIFT;
+
+ /* first page */
+ bytes -= vio->u.write.vui_from;
+ if (queue->pl_nr == 0) /* last page */
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
+ LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+ vio->u.write.vui_written += bytes;
+
+ CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+ npages, bytes, vio->u.write.vui_written);
+
+ /* the first page must have been written. */
+ vio->u.write.vui_from = 0;
+ }
+ LASSERT(page_list_sanity_check(obj, queue));
+ LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+ /* out of quota, try sync write */
+ if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+ rc = vvp_io_commit_sync(env, io, queue,
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
+ if (rc > 0) {
+ vio->u.write.vui_written += rc;
+ rc = 0;
+ }
+ }
+
+ /* update inode size */
+ ll_merge_attr(env, inode);
+
+ /* Now the pages in queue were failed to commit, discard them
+ * unless they were dirtied before. */
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ cl_page_list_del(env, queue, page);
+
+ if (!PageDirty(cl_page_vmpage(page)))
+ cl_page_discard(env, io, page);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ cl_page_list_fini(env, queue);
+
+ RETURN(rc);