Whamcloud - gitweb
LU-13476 llite: Fix lock ordering in pagevec_dirty
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index ac3965f..5fb6e13 100644 (file)
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-
 #include <obd.h>
 #include <linux/pagevec.h>
 #include <linux/memcontrol.h>
+
 #include "llite_internal.h"
 #include "vvp_internal.h"
 #include <libcfs/linux/linux-misc.h>
@@ -648,13 +648,23 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
        struct cl_io  *io  = ios->cis_io;
-       __u64 new_size;
+       __u64 lock_start = 0;
+       __u64 lock_end = OBD_OBJECT_EOF;
        __u32 enqflags = 0;
 
        if (cl_io_is_trunc(io)) {
-               new_size = io->u.ci_setattr.sa_attr.lvb_size;
-               if (new_size == 0)
+               struct inode *inode = vvp_object_inode(io->ci_obj);
+
+               /* set enqueue flags to CEF_MUST in case of encrypted file,
+                * to prevent lockless truncate
+                */
+               if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
+                       enqflags = CEF_MUST;
+               else if (io->u.ci_setattr.sa_attr.lvb_size == 0)
                        enqflags = CEF_DISCARD_DATA;
+       } else if (cl_io_is_fallocate(io)) {
+               lock_start = io->u.ci_setattr.sa_falloc_offset;
+               lock_end = lock_start + io->u.ci_setattr.sa_attr.lvb_size;
        } else {
                unsigned int valid = io->u.ci_setattr.sa_avalid;
 
@@ -668,12 +678,10 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
                     io->u.ci_setattr.sa_attr.lvb_atime >=
                     io->u.ci_setattr.sa_attr.lvb_ctime))
                        return 0;
-
-               new_size = 0;
        }
 
        return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
-                              new_size, OBD_OBJECT_EOF);
+                              lock_start, lock_end);
 }
 
 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
@@ -730,10 +738,13 @@ static int vvp_io_setattr_start(const struct lu_env *env,
 
        if (cl_io_is_trunc(io)) {
                trunc_sem_down_write(&lli->lli_trunc_sem);
+               mutex_lock(&lli->lli_setattr_mutex);
+               inode_dio_wait(inode);
+       } else if (cl_io_is_fallocate(io)) {
                inode_lock(inode);
                inode_dio_wait(inode);
        } else {
-               inode_lock(inode);
+               mutex_lock(&lli->lli_setattr_mutex);
        }
 
        if (io->u.ci_setattr.sa_avalid & TIMES_SET_FLAGS)
@@ -753,11 +764,12 @@ static void vvp_io_setattr_end(const struct lu_env *env,
                /* Truncate in memory pages - they must be clean pages
                 * because osc has already notified to destroy osc_extents. */
                vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-               inode_dio_write_done(inode);
-               inode_unlock(inode);
+               mutex_unlock(&lli->lli_setattr_mutex);
                trunc_sem_up_write(&lli->lli_trunc_sem);
-       } else {
+       } else if (cl_io_is_fallocate(io)) {
                inode_unlock(inode);
+       } else {
+               mutex_unlock(&lli->lli_setattr_mutex);
        }
 }
 
@@ -930,7 +942,7 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 
 /*
  * Kernels 4.2 - 4.5 pass memcg argument to account_page_dirtied()
- * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied 
+ * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
  */
 static inline void ll_account_page_dirtied(struct page *page,
                                           struct address_space *mapping)
@@ -940,19 +952,10 @@ static inline void ll_account_page_dirtied(struct page *page,
 
        account_page_dirtied(page, mapping, memcg);
        mem_cgroup_end_page_stat(memcg);
-#elif defined HAVE_ACCOUNT_PAGE_DIRTIED
-       account_page_dirtied(page, mapping, memcg);
+#elif defined(HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT)
+       account_page_dirtied(page, mapping);
 #else
-       typedef unsigned int (dirtied_t)(struct page *pg,
-                                        struct address_space *as);
-       const char *symbol = "account_page_dirtied";
-       static dirtied_t *dirtied = NULL;
-
-       if (!dirtied)
-               dirtied = (dirtied_t *)symbol_get(symbol);
-
-       if (dirtied)
-               dirtied(page, mapping);
+       vvp_account_page_dirtied(page, mapping);
 #endif
 }
 
@@ -963,7 +966,7 @@ static inline void ll_account_page_dirtied(struct page *page,
 static inline void ll_page_tag_dirty(struct page *page,
                                     struct address_space *mapping)
 {
-#ifdef HAVE___XA_SET_MARK
+#ifndef HAVE_RADIX_TREE_TAG_SET
        __xa_set_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY);
 #else
        radix_tree_tag_set(&mapping->page_tree, page_index(page),
@@ -985,22 +988,34 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
        struct page *page = pvec->pages[0];
        struct address_space *mapping = page->mapping;
        unsigned long flags;
+       unsigned long skip_pages = 0;
        int count = pagevec_count(pvec);
        int dirtied = 0;
-       int i = 0;
+       int i;
 
        ENTRY;
 
-       /* From set_page_dirty */
-       for (i = 0; i < count; i++)
-               ClearPageReclaim(pvec->pages[i]);
-
+       BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
        LASSERTF(page->mapping,
-                "mapping must be set. page %p, page->private (cl_page) %p",
+                "mapping must be set. page %p, page->private (cl_page) %p\n",
                 page, (void *) page->private);
 
-       /* Rest of code derived from __set_page_dirty_nobuffers */
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       for (i = 0; i < count; i++) {
+               page = pvec->pages[i];
+
+               ClearPageReclaim(page);
+
+               lock_page_memcg(page);
+               if (TestSetPageDirty(page)) {
+                       /* page is already dirty .. no extra work needed
+                        * set a flag for the i'th page to be skipped
+                        */
+                       unlock_page_memcg(page);
+                       skip_pages |= (1 << i);
+               }
+       }
+
+       ll_xa_lock_irqsave(&mapping->i_pages, flags);
 
        /* Notes on differences with __set_page_dirty_nobuffers:
         * 1. We don't need to call page_mapping because we know this is a page
@@ -1010,17 +1025,13 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
         * 3. No mapping is impossible. (Race w/truncate mentioned in
         * dirty_nobuffers should be impossible because we hold the page lock.)
         * 4. All mappings are the same because i/o is only to one file.
-        * 5. We invert the lock order on lock_page_memcg(page) and the mapping
-        * xa_lock, but this is the only function that should use that pair of
-        * locks and it can't race because Lustre locks pages throughout i/o.
         */
        for (i = 0; i < count; i++) {
                page = pvec->pages[i];
-               lock_page_memcg(page);
-               if (TestSetPageDirty(page)) {
-                       unlock_page_memcg(page);
+               /* if the i'th page was unlocked above, skip it here */
+               if ((skip_pages >> i) & 1)
                        continue;
-               }
+
                LASSERTF(page->mapping == mapping,
                         "all pages must have the same mapping.  page %p, mapping %p, first mapping %p\n",
                         page, page->mapping, mapping);
@@ -1030,7 +1041,7 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
                dirtied++;
                unlock_page_memcg(page);
        }
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
 
        CDEBUG(D_VFSTRACE, "mapping %p, count %d, dirtied %d\n", mapping,
               count, dirtied);
@@ -1191,6 +1202,7 @@ static int vvp_io_write_start(const struct lu_env *env,
        size_t                   cnt = io->u.ci_wr.wr.crw_count;
        bool                     lock_inode = !IS_NOSEC(inode);
        size_t nob = io->ci_nob;
+       struct iov_iter iter;
        size_t written = 0;
 
        ENTRY;
@@ -1253,6 +1265,7 @@ static int vvp_io_write_start(const struct lu_env *env,
                 * trucates, etc. is handled in the higher layers of lustre.
                 */
                lock_inode = !IS_NOSEC(inode);
+               iter = *vio->vui_iter;
 
                if (unlikely(lock_inode))
                        inode_lock(inode);
@@ -1262,7 +1275,7 @@ static int vvp_io_write_start(const struct lu_env *env,
                        inode_unlock(inode);
 
                written = result;
-               if (result > 0 || result == -EIOCBQUEUED)
+               if (result > 0)
 #ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
                        result = generic_write_sync(vio->vui_iocb, result);
 #else
@@ -1279,12 +1292,20 @@ static int vvp_io_write_start(const struct lu_env *env,
 
        if (result > 0) {
                result = vvp_io_write_commit(env, io);
+               /* Simulate short commit */
+               if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
+                       vio->u.write.vui_written >>= 1;
+                       if (vio->u.write.vui_written > 0)
+                               io->ci_need_restart = 1;
+               }
                if (vio->u.write.vui_written > 0) {
                        result = vio->u.write.vui_written;
                        CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
                                file_dentry(file)->d_name.name,
                                io->ci_nob, result);
                        io->ci_nob += result;
+               } else {
+                       io->ci_continue = 0;
                }
        }
        if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
@@ -1294,15 +1315,24 @@ static int vvp_io_write_start(const struct lu_env *env,
                       file_dentry(file)->d_name.name,
                       vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
                       written, io->ci_nob - nob, result);
-               /* rewind ki_pos to where it has successfully committed */
+               /*
+                * Rewind ki_pos and vui_iter to where it has
+                * successfully committed.
+                */
                vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
+               iov_iter_advance(&iter, io->ci_nob - nob);
+               vio->vui_iter->iov = iter.iov;
+               vio->vui_iter->nr_segs = iter.nr_segs;
+               vio->vui_iter->iov_offset = iter.iov_offset;
+               vio->vui_iter->count = iter.count;
        }
-       if (result > 0) {
+       if (result > 0 || result == -EIOCBQUEUED) {
                ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
 
                if (result < cnt)
                        io->ci_continue = 0;
-               result = 0;
+               if (result > 0)
+                       result = 0;
        }
 
        RETURN(result);
@@ -1456,7 +1486,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
        if (fio->ft_mkwrite) {
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
-                       struct cl_page_list *plist = &io->ci_queue.c2_qin;
+                       struct cl_page_list *plist = &vio->u.fault.ft_queue;
                        struct vvp_page *vpg = cl_object_page_slice(obj, page);
                        int to = PAGE_SIZE;
 
@@ -1468,13 +1498,34 @@ static int vvp_io_fault_start(const struct lu_env *env,
 
                        /* size fixup */
                        if (last_index == vvp_index(vpg))
-                               to = size & ~PAGE_MASK;
+                               to = ((size - 1) & ~PAGE_MASK) + 1;
 
                        /* Do not set Dirty bit here so that in case IO is
                         * started before the page is really made dirty, we
                         * still have chance to detect it. */
                        result = cl_io_commit_async(env, io, plist, 0, to,
                                                    mkwrite_commit_callback);
+                       /* Have overquota flag, trying sync write to check
+                        * whether indeed out of quota */
+                       if (result == -EDQUOT) {
+                               cl_page_get(page);
+                               result = vvp_io_commit_sync(env, io,
+                                                           plist, 0, to);
+                               if (result >= 0) {
+                                       io->ci_noquota = 1;
+                                       cl_page_own(env, io, page);
+                                       cl_page_list_add(plist, page);
+                                       lu_ref_add(&page->cp_reference,
+                                                  "cl_io", io);
+                                       result = cl_io_commit_async(env, io,
+                                               plist, 0, to,
+                                               mkwrite_commit_callback);
+                                       io->ci_noquota = 0;
+                               } else {
+                                       cl_page_put(env, page);
+                               }
+                       }
+
                        LASSERT(cl_page_is_owned(page, io));
                        cl_page_list_fini(env, plist);
 
@@ -1489,8 +1540,9 @@ static int vvp_io_fault_start(const struct lu_env *env,
                                if (result == -EDQUOT)
                                        result = -ENOSPC;
                                GOTO(out, result);
-                       } else
+                       } else {
                                cl_page_disown(env, io, page);
+                       }
                }
        }