Whamcloud - gitweb
b=19964 SOM EA
[fs/lustre-release.git] / lustre / llite / rw26.c
index 031b1ab..d3b26b4 100644 (file)
@@ -173,10 +173,10 @@ static int ll_set_page_dirty(struct page *vmpage)
 #define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
 
 static inline int ll_get_user_pages(int rw, unsigned long user_addr,
-                                    size_t size, struct page ***pages)
+                                    size_t size, struct page ***pages,
+                                    int *max_pages)
 {
         int result = -ENOMEM;
-        int page_count;
 
         /* set an arbitrary limit to prevent arithmetic overflow */
         if (size > MAX_DIRECTIO_SIZE) {
@@ -184,18 +184,18 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
                 return -EFBIG;
         }
 
-        page_count = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
-        page_count -= user_addr >> CFS_PAGE_SHIFT;
+        *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+        *max_pages -= user_addr >> CFS_PAGE_SHIFT;
 
-        OBD_ALLOC_WAIT(*pages, page_count * sizeof(**pages));
+        OBD_ALLOC_WAIT(*pages, *max_pages * sizeof(**pages));
         if (*pages) {
                 down_read(&current->mm->mmap_sem);
                 result = get_user_pages(current, current->mm, user_addr,
-                                        page_count, (rw == READ), 0, *pages,
+                                        *max_pages, (rw == READ), 0, *pages,
                                         NULL);
                 up_read(&current->mm->mmap_sem);
-                if (result < 0)
-                        OBD_FREE(*pages, page_count * sizeof(**pages));
+                if (unlikely(result <= 0))
+                        OBD_FREE(*pages, *max_pages * sizeof(**pages));
         }
 
         return result;
@@ -208,6 +208,8 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
         int i;
 
         for (i = 0; i < npages; i++) {
+                if (pages[i] == NULL)
+                        break;
                 if (do_dirty)
                         set_page_dirty_lock(pages[i]);
                 page_cache_release(pages[i]);
@@ -216,30 +218,30 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
         OBD_FREE(pages, npages * sizeof(*pages));
 }
 
-static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
-                                   int rw, struct inode *inode,
-                                   struct address_space *mapping,
-                                   size_t size, loff_t file_offset,
-                                   struct page **pages, int page_count)
+ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+                           int rw, struct inode *inode,
+                           struct ll_dio_pages *pv)
 {
         struct cl_page    *clp;
-        struct ccc_page   *clup;
         struct cl_2queue  *queue;
         struct cl_object  *obj = io->ci_obj;
-        struct cl_sync_io *anchor = &ccc_env_info(env)->cti_sync_io;
         int i;
         ssize_t rc = 0;
-        ssize_t size_orig = size;
-        size_t page_size  = cl_page_size(obj);
+        loff_t file_offset  = pv->ldp_start_offset;
+        long size           = pv->ldp_size;
+        int page_count      = pv->ldp_nr;
+        struct page **pages = pv->ldp_pages;
+        long page_size      = cl_page_size(obj);
         ENTRY;
 
-        cl_sync_io_init(anchor, page_count);
-
         queue = &io->ci_queue;
         cl_2queue_init(queue);
         for (i = 0; i < page_count; i++) {
+                if (pv->ldp_offsets)
+                    file_offset = pv->ldp_offsets[i];
+                LASSERT(!(file_offset & (page_size - 1)));
                 clp = cl_page_find(env, obj, cl_index(obj, file_offset),
-                                   pages[i], CPT_TRANSIENT);
+                                   pv->ldp_pages[i], CPT_TRANSIENT);
                 if (IS_ERR(clp)) {
                         rc = PTR_ERR(clp);
                         break;
@@ -265,7 +267,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
 
                         src = kmap_atomic(src_page, KM_USER0);
                         dst = kmap_atomic(dst_page, KM_USER1);
-                        memcpy(dst, (const void *)src, min(page_size, size));
+                        memcpy(dst, src, min(page_size, size));
                         kunmap_atomic(dst, KM_USER1);
                         kunmap_atomic(src, KM_USER0);
 
@@ -289,8 +291,6 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
                         break;
                 }
 
-                clup = cl2ccc_page(cl_page_at(clp, &vvp_device_type));
-                clup->cpg_sync_io = anchor;
                 cl_2queue_add(queue, clp);
 
                 /* drop the reference count for cl_page_find, so that the page
@@ -306,21 +306,11 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
         }
 
         if (rc == 0) {
-                rc = cl_io_submit_rw(env, io, rw == READ ? CRT_READ : CRT_WRITE,
-                                     queue);
-                if (rc == 0) {
-                        /*
-                         * If some pages weren't sent for any reason (e.g.,
-                         * direct-io read found up-to-date pages in the
-                         * cache), count them as completed to avoid infinite
-                         * wait.
-                         */
-                        cl_page_list_for_each(clp, &queue->c2_qin)
-                                cl_sync_io_note(anchor, +1);
-                        /* wait for the IO to be finished. */
-                        rc = cl_sync_io_wait(env, io, &queue->c2_qout,
-                                             anchor) ?: size_orig;
-                }
+                rc = cl_io_submit_sync(env, io,
+                                       rw == READ ? CRT_READ : CRT_WRITE,
+                                       queue, CRP_NORMAL, 0);
+                if (rc == 0)
+                        rc = pv->ldp_size;
         }
 
         cl_2queue_discard(env, io, queue);
@@ -328,6 +318,23 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
         cl_2queue_fini(env, queue);
         RETURN(rc);
 }
+EXPORT_SYMBOL(ll_direct_rw_pages);
+
+static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
+                                   int rw, struct inode *inode,
+                                   struct address_space *mapping,
+                                   size_t size, loff_t file_offset,
+                                   struct page **pages, int page_count)
+{
+    struct ll_dio_pages pvec = { .ldp_pages        = pages,
+                                 .ldp_nr           = page_count,
+                                 .ldp_size         = size,
+                                 .ldp_offsets      = NULL,
+                                 .ldp_start_offset = file_offset
+                               };
+
+    return ll_direct_rw_pages(env, io, rw, inode, &pvec);
+}
 
 /* This is the maximum size of a single O_DIRECT request, based on a 128kB
  * kmalloc limit.  We need to fit all of the brw_page structs, each one
@@ -344,11 +351,12 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
         struct file *file = iocb->ki_filp;
         struct inode *inode = file->f_mapping->host;
         struct ccc_object *obj = cl_inode2ccc(inode);
-        ssize_t count = iov_length(iov, nr_segs), tot_bytes = 0;
+        long count = iov_length(iov, nr_segs);
+        long tot_bytes = 0, result = 0;
         struct ll_inode_info *lli = ll_i2info(inode);
         struct lov_stripe_md *lsm = lli->lli_smd;
         unsigned long seg = 0;
-        size_t size = MAX_DIO_SIZE;
+        long size = MAX_DIO_SIZE;
         int refcheck;
         ENTRY;
 
@@ -359,8 +367,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
         if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
                 RETURN(-EINVAL);
 
-        CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size="LPSZ" (max %lu), "
-               "offset=%lld=%llx, pages "LPSZ" (max %lu)\n",
+        CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
+               "offset=%lld=%llx, pages %lu (max %lu)\n",
                inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
                file_offset, file_offset, count >> CFS_PAGE_SHIFT,
                MAX_DIO_SIZE >> CFS_PAGE_SHIFT);
@@ -385,7 +393,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
 
         LASSERT(obj->cob_transient_pages == 0);
         for (seg = 0; seg < nr_segs; seg++) {
-                size_t iov_left = iov[seg].iov_len;
+                long iov_left = iov[seg].iov_len;
                 unsigned long user_addr = (unsigned long)iov[seg].iov_base;
 
                 if (rw == READ) {
@@ -397,43 +405,44 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
 
                 while (iov_left > 0) {
                         struct page **pages;
-                        int page_count;
-                        ssize_t result;
-
-                        page_count = ll_get_user_pages(rw, user_addr,
-                                                       min(size, iov_left),
-                                                       &pages);
-                        LASSERT(page_count != 0);
-                        if (page_count > 0) {
+                        int page_count, max_pages = 0;
+                        long bytes;
+
+                        bytes = min(size,iov_left);
+                        page_count = ll_get_user_pages(rw, user_addr, bytes,
+                                                       &pages, &max_pages);
+                        if (likely(page_count > 0)) {
+                                if (unlikely(page_count <  max_pages))
+                                        bytes = page_count << CFS_PAGE_SHIFT;
                                 result = ll_direct_IO_26_seg(env, io, rw, inode,
                                                              file->f_mapping,
-                                                             min(size,iov_left),
+                                                             bytes,
                                                              file_offset, pages,
                                                              page_count);
-                                ll_free_user_pages(pages, page_count, rw==READ);
+                                ll_free_user_pages(pages, max_pages, rw==READ);
+                        } else if (page_count == 0) {
+                                GOTO(out, result = -EFAULT);
                         } else {
-                                result = 0;
+                                result = page_count;
                         }
-                        if (page_count < 0 || result <= 0) {
+                        if (unlikely(result <= 0)) {
                                 /* If we can't allocate a large enough buffer
                                  * for the request, shrink it to a smaller
                                  * PAGE_SIZE multiple and try again.
                                  * We should always be able to kmalloc for a
                                  * page worth of page pointers = 4MB on i386. */
-                                if ((page_count == -ENOMEM||result == -ENOMEM)&&
+                                if (result == -ENOMEM &&
                                     size > (CFS_PAGE_SIZE / sizeof(*pages)) *
                                            CFS_PAGE_SIZE) {
                                         size = ((((size / 2) - 1) |
                                                  ~CFS_PAGE_MASK) + 1) &
                                                 CFS_PAGE_MASK;
-                                        CDEBUG(D_VFSTRACE, "DIO size now %u\n",
-                                               (int)size);
+                                        CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
+                                               size);
                                         continue;
                                 }
 
-                                if (tot_bytes <= 0)
-                                        tot_bytes = page_count < 0 ? page_count : result;
-                                GOTO(out, tot_bytes);
+                                GOTO(out, result);
                         }
 
                         tot_bytes += result;
@@ -456,7 +465,7 @@ out:
         }
 
         cl_env_put(env, &refcheck);
-        RETURN(tot_bytes);
+        RETURN(tot_bytes ? : result);
 }
 
 struct address_space_operations ll_aops = {