Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / rw.c
index ac97e87..dc99195 100644 (file)
@@ -169,8 +169,10 @@ void ll_truncate(struct inode *inode)
                         struct ll_async_page *llap = llap_cast_private(page);
                         if (llap != NULL) {
                                 llap->llap_checksum =
-                                        crc32_le(0, kmap(page), CFS_PAGE_SIZE);
-                                kunmap(page);
+                                        crc32_le(0,
+                                                 kmap_atomic(page, KM_USER0),
+                                                 CFS_PAGE_SIZE);
+                                kunmap_atomic(page, KM_USER0);
                         }
                         page_cache_release(page);
                 }
@@ -267,8 +269,8 @@ int ll_prepare_write(struct file *file, struct page *page, unsigned from,
         if (lvb.lvb_size <= offset) {
                 LL_CDEBUG_PAGE(D_PAGE, page, "kms "LPU64" <= offset "LPU64"\n",
                                lvb.lvb_size, offset);
-                memset(kmap(page), 0, CFS_PAGE_SIZE);
-                kunmap(page);
+                memset(kmap_atomic(page, KM_USER0), 0, CFS_PAGE_SIZE);
+                kunmap_atomic(page, KM_USER0);
                 GOTO(prepare_done, rc = 0);
         }
 
@@ -313,16 +315,12 @@ static int ll_ap_make_ready(void *data, int cmd)
          * we got the page cache list we'd create a lock inversion
          * with the removepage path which gets the page lock then the
          * cli lock */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-        clear_page_dirty(page);
-#else
         LASSERTF(!PageWriteback(page),"cmd %x page %p ino %lu index %lu\n", cmd, page,
                  page->mapping->host->i_ino, page->index);
         clear_page_dirty_for_io(page);
 
         /* This actually clears the dirty bit in the radix tree.*/
         set_page_writeback(page);
-#endif
 
         LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
         page_cache_get(page);
@@ -645,8 +643,9 @@ struct ll_async_page *llap_from_page(struct page *page, unsigned origin)
  out:
         if (unlikely(sbi->ll_flags & LL_SBI_CHECKSUM)) {
                 __u32 csum = 0;
-                csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
-                kunmap(page);
+                csum = crc32_le(csum, kmap_atomic(page, KM_USER0),
+                                CFS_PAGE_SIZE);
+                kunmap_atomic(page, KM_USER0);
                 if (origin == LLAP_ORIGIN_READAHEAD ||
                     origin == LLAP_ORIGIN_READPAGE) {
                         llap->llap_checksum = 0;
@@ -717,8 +716,9 @@ static int queue_or_sync_write(struct obd_export *exp, struct inode *inode,
                      llap->llap_checksum != 0)) {
                 __u32 csum = 0;
                 struct page *page = llap->llap_page;
-                csum = crc32_le(csum, kmap(page), CFS_PAGE_SIZE);
-                kunmap(page);
+                csum = crc32_le(csum, kmap_atomic(page, KM_USER0),
+                                CFS_PAGE_SIZE);
+                kunmap_atomic(page, KM_USER0);
                 if (llap->llap_checksum == csum) {
                         CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",
                                page, csum);
@@ -904,14 +904,10 @@ int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc)
                         llap->llap_defer_uptodate = 0;
                 }
                 SetPageError(page);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
                 if (rc == -ENOSPC)
                         set_bit(AS_ENOSPC, &page->mapping->flags);
                 else
                         set_bit(AS_EIO, &page->mapping->flags);
-#else
-                page->mapping->gfp_mask |= AS_EIO_MASK;
-#endif
         }
 
         unlock_page(page);