Whamcloud - gitweb
Branch HEAD
authorbobijam <bobijam>
Thu, 17 Sep 2009 01:46:39 +0000 (01:46 +0000)
committerbobijam <bobijam>
Thu, 17 Sep 2009 01:46:39 +0000 (01:46 +0000)
b=20560
o=johann
i=alex.zhuravlev
i=zhenyu.xu (bobijam)

* Fix for crashes when disabling writethrough_cache_enable.
* Discard pages if something failed after filter_preprw_write().
* Don't poison pages for evicted clients.

lustre/obdfilter/filter_io.c
lustre/obdfilter/filter_io_26.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pers.c
lustre/ptlrpc/ptlrpc_internal.h

index 30b8b71..5ed6c98 100644 (file)
@@ -113,10 +113,10 @@ void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
                 obd_size left_space = filter_grant_space_left(exp);
                 struct filter_obd *filter = &exp->exp_obd->u.filter;
 
-                /*Only if left_space < fo_tot_clients * 32M, 
+                /*Only if left_space < fo_tot_clients * 32M,
                  *then the grant space could be shrinked */
-                if (left_space < filter->fo_tot_granted_clients * 
-                                 FILTER_GRANT_SHRINK_LIMIT) { 
+                if (left_space < filter->fo_tot_granted_clients *
+                                 FILTER_GRANT_SHRINK_LIMIT) {
                         fed->fed_grant -= oa->o_grant;
                         filter->fo_tot_granted -= oa->o_grant;
                         CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
@@ -272,7 +272,7 @@ long filter_grant(struct obd_export *exp, obd_size current_grant,
  * generate more memory pressure, but at the same time use __GFP_NOMEMALLOC
  * in order not to exhaust emergency reserves.
  *
- * See Bug 19529 and Bug 19917 for details. 
+ * See Bug 19529 and Bug 19917 for details.
  */
 static struct page *filter_get_page(struct obd_device *obd,
                                     struct inode *inode,
@@ -282,7 +282,7 @@ static struct page *filter_get_page(struct obd_device *obd,
         struct page *page;
 
         page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
-                                   (localreq ? (GFP_NOFS | __GFP_HIGHMEM) 
+                                   (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
                                              : (GFP_HIGHUSER | __GFP_NOMEMALLOC)));
         if (unlikely(page == NULL))
                 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
@@ -694,7 +694,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
                 RETURN(rc);
 
         if (exp->exp_connection &&
-            exp->exp_connection->c_peer.nid == exp->exp_connection->c_self) 
+            exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
                 localreq = 1;
 
         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
@@ -795,8 +795,7 @@ static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
                  * be able to proceed in filter_commitrw_write(). thus let's
                  * just wait for writeout completion, should be rare enough.
                  * -bzzz */
-                if (obd->u.filter.fo_writethrough_cache)
-                        wait_on_page_writeback(lnb->page);
+                wait_on_page_writeback(lnb->page);
                 BUG_ON(PageWriteback(lnb->page));
 
                 /* If the filter writes a partial page, then has the file
index 2621f3f..964f4f1 100644 (file)
@@ -775,6 +775,13 @@ cleanup:
                 if (lnb->page == NULL)
                         continue;
 
+                if (rc)
+                        /* If the write has failed, the page cache may
+                         * not be consitent with what is on disk, so
+                         * force pages to be reread next time it is
+                         * accessed */
+                        ClearPageUptodate(lnb->page);
+
                 LASSERT(PageLocked(lnb->page));
                 unlock_page(lnb->page);
 
index dffcf1a..f6d1d05 100644 (file)
@@ -174,9 +174,10 @@ void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
         if (!ptlrpc_server_bulk_active(desc))   /* completed or */
                 return;                         /* never started */
 
-        /* Do not send any meaningful data over the wire for evicted clients */
-        if (desc->bd_export && desc->bd_export->exp_failed)
-                ptl_rpc_wipe_bulk_pages(desc);
+        /* We used to poison the pages with 0xab here because we did not want to
+         * send any meaningful data over the wire for evicted clients (bug 9297)
+         * However, this is no longer safe now that we use the page cache on the
+         * OSS (bug 20560) */
 
         /* The unlink ensures the callback happens ASAP and is the last
          * one.  If it fails, it must be because completion just happened,
index 1b5f1ed..db58161 100644 (file)
@@ -76,18 +76,6 @@ void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
         desc->bd_iov_count++;
 }
 
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
-{
-        int i;
-        
-        for (i = 0; i < desc->bd_iov_count ; i++) {
-                lnet_kiov_t *kiov = &desc->bd_iov[i];
-                memset(cfs_kmap(kiov->kiov_page)+kiov->kiov_offset, 0xab,
-                       kiov->kiov_len);
-                cfs_kunmap(kiov->kiov_page);
-        }
-}
-
 #else /* !__KERNEL__ */
 
 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
@@ -98,7 +86,7 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
                 md->length = desc->bd_iov[0].iov_len;
                 return;
         }
-        
+
         md->options |= LNET_MD_IOVEC;
         md->start = &desc->bd_iov[0];
         md->length = desc->bd_iov_count;
@@ -106,7 +94,7 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc)
 
 static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
 {
-        if (existing->iov_base + existing->iov_len == candidate->iov_base) 
+        if (existing->iov_base + existing->iov_len == candidate->iov_base)
                 return 1;
 #if 0
         /* Enable this section to provide earlier evidence of fragmented bulk */
@@ -117,7 +105,7 @@ static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate)
         return 0;
 }
 
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, 
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
                           int pageoffset, int len)
 {
         lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count];
@@ -132,14 +120,4 @@ void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
         }
 }
 
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc)
-{
-        int i;
-
-        for(i = 0; i < desc->bd_iov_count; i++) {
-                lnet_md_iovec_t *iov = &desc->bd_iov[i];
-
-                memset(iov->iov_base, 0xab, iov->iov_len);
-        }
-}
 #endif /* !__KERNEL__ */
index 40ccd94..b0690ca 100644 (file)
@@ -87,7 +87,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc);
 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
                           int pageoffset, int len);
-void ptl_rpc_wipe_bulk_pages(struct ptlrpc_bulk_desc *desc);
 
 /* pack_generic.c */
 struct ptlrpc_reply_state *lustre_get_emerg_rs(struct ptlrpc_service *svc);