Whamcloud - gitweb
LU-10810 clio: SEEK_HOLE/SEEK_DATA on client side
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index 94df8b5..ba6f63a 100644 (file)
@@ -57,18 +57,6 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
 }
 
 /**
- * True, if \a io is a normal io, False for splice_{read,write}
- */
-static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
-{
-       struct vvp_io *vio = vvp_env_io(env);
-
-       LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
-       return vio->vui_io_subtype == IO_NORMAL;
-}
-
-/**
  * For swapping layout. The file's layout may have changed.
  * To avoid populating pages to a wrong stripe, we have to verify the
  * correctness of layout. It works because swapping layout processes
@@ -267,10 +255,20 @@ static int vvp_io_write_iter_init(const struct lu_env *env,
 {
        struct vvp_io *vio = cl2vvp_io(env, ios);
 
-       cl_page_list_init(&vio->u.write.vui_queue);
-       vio->u.write.vui_written = 0;
-       vio->u.write.vui_from = 0;
-       vio->u.write.vui_to = PAGE_SIZE;
+       cl_page_list_init(&vio->u.readwrite.vui_queue);
+       vio->u.readwrite.vui_written = 0;
+       vio->u.readwrite.vui_from = 0;
+       vio->u.readwrite.vui_to = PAGE_SIZE;
+
+       return 0;
+}
+
+static int vvp_io_read_iter_init(const struct lu_env *env,
+                                const struct cl_io_slice *ios)
+{
+       struct vvp_io *vio = cl2vvp_io(env, ios);
+
+       vio->u.readwrite.vui_read = 0;
 
        return 0;
 }
@@ -280,7 +278,7 @@ static void vvp_io_write_iter_fini(const struct lu_env *env,
 {
        struct vvp_io *vio = cl2vvp_io(env, ios);
 
-       LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+       LASSERT(vio->u.readwrite.vui_queue.pl_nr == 0);
 }
 
 static int vvp_io_fault_iter_init(const struct lu_env *env,
@@ -448,9 +446,6 @@ static int vvp_mmap_locks(const struct lu_env *env,
 
        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
 
-       if (!cl_is_normalio(env, io))
-               RETURN(0);
-
        /* nfs or loop back device write */
        if (vio->vui_iter == NULL)
                RETURN(0);
@@ -528,15 +523,17 @@ static void vvp_io_advance(const struct lu_env *env,
                           const struct cl_io_slice *ios,
                           size_t nob)
 {
-       struct vvp_io    *vio = cl2vvp_io(env, ios);
-       struct cl_io     *io  = ios->cis_io;
        struct cl_object *obj = ios->cis_io->ci_obj;
+       struct vvp_io *vio = cl2vvp_io(env, ios);
 
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
-       if (!cl_is_normalio(env, io))
-               return;
-
+       /*
+        * Since 3.16(26978b8b4) vfs revert iov iter to
+        * original position even io succeed, so instead
+        * of relying on VFS, we move iov iter by ourselves.
+        */
+       iov_iter_advance(vio->vui_iter, nob);
        vio->vui_tot_count -= nob;
        iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
 }
@@ -546,7 +543,7 @@ static void vvp_io_update_iov(const struct lu_env *env,
 {
        size_t size = io->u.ci_rw.crw_count;
 
-       if (!cl_is_normalio(env, io) || vio->vui_iter == NULL)
+       if (!vio->vui_iter)
                return;
 
        iov_iter_truncate(vio->vui_iter, size);
@@ -801,6 +798,8 @@ static int vvp_io_read_start(const struct lu_env *env,
        size_t tot = vio->vui_tot_count;
        int exceed = 0;
        int result;
+       struct iov_iter iter;
+
        ENTRY;
 
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
@@ -809,8 +808,7 @@ static int vvp_io_read_start(const struct lu_env *env,
                file_dentry(file)->d_name.name,
                pos, pos + cnt);
 
-       if (vio->vui_io_subtype == IO_NORMAL)
-               trunc_sem_down_read(&lli->lli_trunc_sem);
+       trunc_sem_down_read(&lli->lli_trunc_sem);
 
        if (io->ci_async_readahead) {
                file_accessed(file);
@@ -829,7 +827,7 @@ static int vvp_io_read_start(const struct lu_env *env,
                GOTO(out, result);
 
        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
-                        "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+                        "Read ino %lu, %zu bytes, offset %lld, size %llu\n",
                         inode->i_ino, cnt, pos, i_size_read(inode));
 
        /* turn off the kernel's read-ahead */
@@ -851,32 +849,18 @@ static int vvp_io_read_start(const struct lu_env *env,
 
        /* BUG: 5972 */
        file_accessed(file);
-       switch (vio->vui_io_subtype) {
-       case IO_NORMAL:
-               LASSERT(vio->vui_iocb->ki_pos == pos);
-               result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
-               break;
-       case IO_SPLICE:
-               result = generic_file_splice_read(file, &pos,
-                                                 vio->u.splice.vui_pipe, cnt,
-                                                 vio->u.splice.vui_flags);
-               /* LU-1109: do splice read stripe by stripe otherwise if it
-                * may make nfsd stuck if this read occupied all internal pipe
-                * buffers. */
-               io->ci_continue = 0;
-               break;
-       default:
-               CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
-               LBUG();
-       }
-       GOTO(out, result);
-
+       LASSERT(vio->vui_iocb->ki_pos == pos);
+       iter = *vio->vui_iter;
+       result = generic_file_read_iter(vio->vui_iocb, &iter);
 out:
        if (result >= 0) {
                if (result < cnt)
                        io->ci_continue = 0;
                io->ci_nob += result;
                result = 0;
+       } else if (result == -EIOCBQUEUED) {
+               io->ci_nob += vio->u.readwrite.vui_read;
+               vio->vui_iocb->ki_pos = pos + vio->u.readwrite.vui_read;
        }
 
        return result;
@@ -988,21 +972,33 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
        struct page *page = pvec->pages[0];
        struct address_space *mapping = page->mapping;
        unsigned long flags;
+       unsigned long skip_pages = 0;
        int count = pagevec_count(pvec);
        int dirtied = 0;
-       int i = 0;
+       int i;
 
        ENTRY;
 
-       /* From set_page_dirty */
-       for (i = 0; i < count; i++)
-               ClearPageReclaim(pvec->pages[i]);
-
+       BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
        LASSERTF(page->mapping,
                 "mapping must be set. page %p, page->private (cl_page) %p\n",
                 page, (void *) page->private);
 
-       /* Rest of code derived from __set_page_dirty_nobuffers */
+       for (i = 0; i < count; i++) {
+               page = pvec->pages[i];
+
+               ClearPageReclaim(page);
+
+               lock_page_memcg(page);
+               if (TestSetPageDirty(page)) {
+                       /* page is already dirty .. no extra work needed
+                        * set a flag for the i'th page to be skipped
+                        */
+                       unlock_page_memcg(page);
+                       skip_pages |= (1 << i);
+               }
+       }
+
        ll_xa_lock_irqsave(&mapping->i_pages, flags);
 
        /* Notes on differences with __set_page_dirty_nobuffers:
@@ -1013,17 +1009,13 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec)
         * 3. No mapping is impossible. (Race w/truncate mentioned in
         * dirty_nobuffers should be impossible because we hold the page lock.)
         * 4. All mappings are the same because i/o is only to one file.
-        * 5. We invert the lock order on lock_page_memcg(page) and the mapping
-        * xa_lock, but this is the only function that should use that pair of
-        * locks and it can't race because Lustre locks pages throughout i/o.
         */
        for (i = 0; i < count; i++) {
                page = pvec->pages[i];
-               lock_page_memcg(page);
-               if (TestSetPageDirty(page)) {
-                       unlock_page_memcg(page);
+               /* if the i'th page was unlocked above, skip it here */
+               if ((skip_pages >> i) & 1)
                        continue;
-               }
+
                LASSERTF(page->mapping == mapping,
                         "all pages must have the same mapping.  page %p, mapping %p, first mapping %p\n",
                         page, page->mapping, mapping);
@@ -1105,24 +1097,25 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        struct cl_object *obj = io->ci_obj;
        struct inode *inode = vvp_object_inode(obj);
        struct vvp_io *vio = vvp_env_io(env);
-       struct cl_page_list *queue = &vio->u.write.vui_queue;
+       struct cl_page_list *queue = &vio->u.readwrite.vui_queue;
        struct cl_page *page;
        int rc = 0;
        int bytes = 0;
-       unsigned int npages = vio->u.write.vui_queue.pl_nr;
+       unsigned int npages = vio->u.readwrite.vui_queue.pl_nr;
        ENTRY;
 
        if (npages == 0)
                RETURN(0);
 
        CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
-               npages, vio->u.write.vui_from, vio->u.write.vui_to);
+               npages, vio->u.readwrite.vui_from, vio->u.readwrite.vui_to);
 
        LASSERT(page_list_sanity_check(obj, queue));
 
        /* submit IO with async write */
        rc = cl_io_commit_async(env, io, queue,
-                               vio->u.write.vui_from, vio->u.write.vui_to,
+                               vio->u.readwrite.vui_from,
+                               vio->u.readwrite.vui_to,
                                write_commit_callback);
        npages -= queue->pl_nr; /* already committed pages */
        if (npages > 0) {
@@ -1130,18 +1123,18 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
                bytes = npages << PAGE_SHIFT;
 
                /* first page */
-               bytes -= vio->u.write.vui_from;
+               bytes -= vio->u.readwrite.vui_from;
                if (queue->pl_nr == 0) /* last page */
-                       bytes -= PAGE_SIZE - vio->u.write.vui_to;
+                       bytes -= PAGE_SIZE - vio->u.readwrite.vui_to;
                LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
 
-               vio->u.write.vui_written += bytes;
+               vio->u.readwrite.vui_written += bytes;
 
                CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
-                       npages, bytes, vio->u.write.vui_written);
+                       npages, bytes, vio->u.readwrite.vui_written);
 
                /* the first page must have been written. */
-               vio->u.write.vui_from = 0;
+               vio->u.readwrite.vui_from = 0;
        }
        LASSERT(page_list_sanity_check(obj, queue));
        LASSERT(ergo(rc == 0, queue->pl_nr == 0));
@@ -1149,10 +1142,10 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        /* out of quota, try sync write */
        if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
                rc = vvp_io_commit_sync(env, io, queue,
-                                       vio->u.write.vui_from,
-                                       vio->u.write.vui_to);
+                                       vio->u.readwrite.vui_from,
+                                       vio->u.readwrite.vui_to);
                if (rc > 0) {
-                       vio->u.write.vui_written += rc;
+                       vio->u.readwrite.vui_written += rc;
                        rc = 0;
                }
        }
@@ -1199,8 +1192,7 @@ static int vvp_io_write_start(const struct lu_env *env,
 
        ENTRY;
 
-       if (vio->vui_io_subtype == IO_NORMAL)
-               trunc_sem_down_read(&lli->lli_trunc_sem);
+       trunc_sem_down_read(&lli->lli_trunc_sem);
 
        if (!can_populate_pages(env, io, inode))
                RETURN(0);
@@ -1261,8 +1253,7 @@ static int vvp_io_write_start(const struct lu_env *env,
 
                if (unlikely(lock_inode))
                        inode_lock(inode);
-               result = __generic_file_write_iter(vio->vui_iocb,
-                                                  vio->vui_iter);
+               result = __generic_file_write_iter(vio->vui_iocb, &iter);
                if (unlikely(lock_inode))
                        inode_unlock(inode);
 
@@ -1286,12 +1277,12 @@ static int vvp_io_write_start(const struct lu_env *env,
                result = vvp_io_write_commit(env, io);
                /* Simulate short commit */
                if (CFS_FAULT_CHECK(OBD_FAIL_LLITE_SHORT_COMMIT)) {
-                       vio->u.write.vui_written >>= 1;
-                       if (vio->u.write.vui_written > 0)
+                       vio->u.readwrite.vui_written >>= 1;
+                       if (vio->u.readwrite.vui_written > 0)
                                io->ci_need_restart = 1;
                }
-               if (vio->u.write.vui_written > 0) {
-                       result = vio->u.write.vui_written;
+               if (vio->u.readwrite.vui_written > 0) {
+                       result = vio->u.readwrite.vui_written;
                        CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
                                file_dentry(file)->d_name.name,
                                io->ci_nob, result);
@@ -1301,9 +1292,8 @@ static int vvp_io_write_start(const struct lu_env *env,
                }
        }
        if (vio->vui_iocb->ki_pos != (pos + io->ci_nob - nob)) {
-               CDEBUG(D_VFSTRACE, "%s: write position mismatch: "
-                      "ki_pos %lld vs. pos %lld, written %ld, commit %ld "
-                      "rc %ld\n",
+               CDEBUG(D_VFSTRACE,
+                      "%s: write position mismatch: ki_pos %lld vs. pos %lld, written %zd, commit %zd: rc = %zd\n",
                       file_dentry(file)->d_name.name,
                       vio->vui_iocb->ki_pos, pos + io->ci_nob - nob,
                       written, io->ci_nob - nob, result);
@@ -1312,19 +1302,20 @@ static int vvp_io_write_start(const struct lu_env *env,
                 * successfully committed.
                 */
                vio->vui_iocb->ki_pos = pos + io->ci_nob - nob;
-               iov_iter_advance(&iter, io->ci_nob - nob);
-               vio->vui_iter->iov = iter.iov;
-               vio->vui_iter->nr_segs = iter.nr_segs;
-               vio->vui_iter->iov_offset = iter.iov_offset;
-               vio->vui_iter->count = iter.count;
        }
        if (result > 0 || result == -EIOCBQUEUED) {
                ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
 
-               if (result < cnt)
+               if (result != -EIOCBQUEUED && result < cnt)
                        io->ci_continue = 0;
                if (result > 0)
                        result = 0;
+               /* move forward */
+               if (result == -EIOCBQUEUED) {
+                       io->ci_nob += vio->u.readwrite.vui_written;
+                       vio->vui_iocb->ki_pos = pos +
+                                       vio->u.readwrite.vui_written;
+               }
        }
 
        RETURN(result);
@@ -1333,12 +1324,10 @@ static int vvp_io_write_start(const struct lu_env *env,
 static void vvp_io_rw_end(const struct lu_env *env,
                          const struct cl_io_slice *ios)
 {
-       struct vvp_io           *vio = cl2vvp_io(env, ios);
        struct inode            *inode = vvp_object_inode(ios->cis_obj);
        struct ll_inode_info    *lli = ll_i2info(inode);
 
-       if (vio->vui_io_subtype == IO_NORMAL)
-               trunc_sem_up_read(&lli->lli_trunc_sem);
+       trunc_sem_up_read(&lli->lli_trunc_sem);
 }
 
 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
@@ -1607,10 +1596,56 @@ static int vvp_io_read_ahead(const struct lu_env *env,
        RETURN(result);
 }
 
+static int vvp_io_lseek_lock(const struct lu_env *env,
+                            const struct cl_io_slice *ios)
+{
+       struct cl_io *io = ios->cis_io;
+       __u64 lock_start = io->u.ci_lseek.ls_start;
+       __u64 lock_end = OBD_OBJECT_EOF;
+       __u32 enqflags = CEF_MUST; /* always take client lock */
+
+       return vvp_io_one_lock(env, io, enqflags, CLM_READ,
+                              lock_start, lock_end);
+}
+
+static int vvp_io_lseek_start(const struct lu_env *env,
+                             const struct cl_io_slice *ios)
+{
+       struct cl_io *io = ios->cis_io;
+       struct inode *inode = vvp_object_inode(io->ci_obj);
+       __u64 start = io->u.ci_lseek.ls_start;
+
+       inode_lock(inode);
+       inode_dio_wait(inode);
+
+       /* At the moment we have DLM lock so just update inode
+        * to know the file size.
+        */
+       ll_merge_attr(env, inode);
+       if (start >= i_size_read(inode)) {
+               io->u.ci_lseek.ls_result = -ENXIO;
+               return -ENXIO;
+       }
+       return 0;
+}
+
+static void vvp_io_lseek_end(const struct lu_env *env,
+                            const struct cl_io_slice *ios)
+{
+       struct cl_io *io = ios->cis_io;
+       struct inode *inode = vvp_object_inode(io->ci_obj);
+
+       if (io->u.ci_lseek.ls_result > i_size_read(inode))
+               io->u.ci_lseek.ls_result = -ENXIO;
+
+       inode_unlock(inode);
+}
+
 static const struct cl_io_operations vvp_io_ops = {
        .op = {
                [CIT_READ] = {
                        .cio_fini       = vvp_io_fini,
+                       .cio_iter_init = vvp_io_read_iter_init,
                        .cio_lock       = vvp_io_read_lock,
                        .cio_start      = vvp_io_read_start,
                        .cio_end        = vvp_io_rw_end,
@@ -1652,6 +1687,12 @@ static const struct cl_io_operations vvp_io_ops = {
                [CIT_LADVISE] = {
                        .cio_fini       = vvp_io_fini
                },
+               [CIT_LSEEK] = {
+                       .cio_fini      = vvp_io_fini,
+                       .cio_lock      = vvp_io_lseek_lock,
+                       .cio_start     = vvp_io_lseek_start,
+                       .cio_end       = vvp_io_lseek_end,
+               },
        },
        .cio_read_ahead = vvp_io_read_ahead
 };