Whamcloud - gitweb
LU-613 clio: Client dead-lock during binary exec
[fs/lustre-release.git] / lustre / llite / rw.c
index 4c6da36..6bc1525 100644 (file)
@@ -38,7 +38,6 @@
  * Lustre Lite I/O page cache routines shared by different kernel revs
  */
 
-#include <linux/autoconf.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/string.h>
@@ -61,7 +60,6 @@
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-//#include <lustre_mdc.h>
 #include <lustre_lite.h>
 #include <obd_cksum.h>
 #include "llite_internal.h"
  * must be called under ->lli_size_sem */
 void ll_truncate(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
         ENTRY;
 
-        CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu\n",inode->i_ino,
+        CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %llu\n", inode->i_ino,
                inode->i_generation, inode, i_size_read(inode));
 
         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
-        if (lli->lli_size_sem_owner == cfs_current()) {
-                LASSERT_SEM_LOCKED(&lli->lli_size_sem);
-                ll_inode_size_unlock(inode, 0);
-        }
 
         EXIT;
         return;
@@ -150,7 +143,6 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
         cio = ccc_env_io(env);
         io = cio->cui_cl.cis_io;
         if (io == NULL && create) {
-                struct vvp_io *vio;
                 loff_t pos;
 
                 /*
@@ -158,9 +150,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
                  * methods directly, bypassing file system ->write() operation,
                  * so cl_io has to be created here.
                  */
-
-                io = &ccc_env_info(env)->cti_io;
-                vio = vvp_env_io(env);
+                io = ccc_env_thread_io(env);
                 ll_io_init(io, file, 1);
 
                 /* No lock at all for this kind of IO - we can't do it because
@@ -214,7 +204,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
                 lcc = ERR_PTR(result);
         }
 
-        CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %i %p %p\n",
+        CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
                vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
                env, io);
         return lcc;
@@ -294,7 +284,7 @@ int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
         struct lu_env    *env;
         struct cl_io     *io;
         struct cl_page   *page;
-        int result;
+        int result = 0;
         ENTRY;
 
         lcc  = ll_cl_get();
@@ -303,11 +293,14 @@ int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
         io   = lcc->lcc_io;
 
         LASSERT(cl_page_is_owned(page, io));
-        result = cl_io_commit_write(env, io, page, from, to);
+        LASSERT(from <= to);
+        if (from != to) /* handle short write case. */
+                result = cl_io_commit_write(env, io, page, from, to);
         if (cl_page_is_owned(page, io))
                 cl_page_unassume(env, io, page);
+
         /*
-         * Release reference acquired by cl_io_prepare_write().
+         * Release reference acquired by ll_prepare_write().
          */
         lu_ref_del(&page->cp_reference, "prepare_write", cfs_current());
         cl_page_put(env, page);
@@ -335,7 +328,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
  * ll_ra_count_get at the exactly same time. All of them will get a zero ra
  * window, although the global window is 100M. -jay
  */
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
+                                     unsigned long len)
 {
         struct ll_ra_info *ra = &sbi->ll_ra_info;
         unsigned long ret;
@@ -346,14 +340,23 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
          * otherwise it will form small read RPC(< 1M), which hurt server
          * performance a lot.
          */
+        if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
+                GOTO(out, ret = 0);
+
         ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
         if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
                 GOTO(out, ret = 0);
 
+        if (ria->ria_pages == 0)
+                /* it needs 1M align again after trimed by ra_max_pages*/
+                if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
+                        ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+
         if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
                 cfs_atomic_sub(ret, &ra->ra_cur_pages);
                 ret = 0;
         }
+
 out:
         RETURN(ret);
 }
@@ -784,8 +787,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
         if (len == 0)
                 RETURN(0);
 
-        reserved = ll_ra_count_get(ll_i2sbi(inode), len);
-
+        reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
         if (reserved < len)
                 ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
 
@@ -1120,7 +1122,7 @@ out_unlock:
         return;
 }
 
-int ll_writepage(struct page *vmpage, struct writeback_control *unused)
+int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
 {
         struct inode           *inode = vmpage->mapping->host;
         struct lu_env          *env;
@@ -1142,11 +1144,11 @@ int ll_writepage(struct page *vmpage, struct writeback_control *unused)
         if (IS_ERR(env))
                 RETURN(PTR_ERR(env));
 
-        io    = &ccc_env_info(env)->cti_io;
         queue = &vvp_env_info(env)->vti_queue;
         clob  = ll_i2info(inode)->lli_clob;
         LASSERT(clob != NULL);
 
+        io = ccc_env_thread_io(env);
         io->ci_obj = clob;
         result = cl_io_init(env, io, CIT_MISC, clob);
         if (result == 0) {
@@ -1167,21 +1169,18 @@ int ll_writepage(struct page *vmpage, struct writeback_control *unused)
                         cl_2queue_init_page(queue, page);
                         result = cl_io_submit_rw(env, io, CRT_WRITE,
                                                  queue, CRP_NORMAL);
-                        cl_page_list_disown(env, io, &queue->c2_qin);
                         if (result != 0) {
                                 /*
-                                 * There is no need to clear PG_writeback, as
-                                 * cl_io_submit_rw() calls completion callback
-                                 * on failure.
-                                 */
-                                /*
                                  * Re-dirty page on error so it retries write,
                                  * but not in case when IO has actually
                                  * occurred and completed with an error.
                                  */
-                                if (!PageError(vmpage))
-                                        set_page_dirty(vmpage);
+                                if (!PageError(vmpage)) {
+                                        redirty_page_for_writepage(wbc, vmpage);
+                                        result = 0;
+                                }
                         }
+                        cl_page_list_disown(env, io, &queue->c2_qin);
                         LASSERT(!cl_page_is_owned(page, io));
                         lu_ref_del(&page->cp_reference,
                                    "writepage", cfs_current());
@@ -1218,6 +1217,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
                 }
                 ll_cl_fini(lcc);
         } else {
+                unlock_page(vmpage);
                 result = PTR_ERR(lcc);
         }
         RETURN(result);