- for (i = 0, length = size; length > 0;
- length -=pga[i].count, file_offset +=pga[i].count,i++) {/*i last!*/
- pga[i].pg = pages[i];
- pga[i].off = file_offset;
- /* To the end of the page, or the length, whatever is less */
- pga[i].count = min_t(int, CFS_PAGE_SIZE -(file_offset & ~CFS_PAGE_MASK),
- length);
- pga[i].flag = 0;
- if (rw == READ)
- POISON_PAGE(pages[i], 0x0d);
- }
+ /* check the page type: if the page is a host page, then do
+ * write directly */
+ /*
+ * Very rare case that the host pages can be found for
+ * directIO case, since linux kernel truncated all covered
+ * pages before getting here. So, to make the OST happy(to
+ * write a contiguous region), all pages are issued
+ * here. -jay */
+ if (clp->cp_type == CPT_CACHEABLE) {
+ cfs_page_t *vmpage = cl_page_vmpage(env, clp);
+ cfs_page_t *src_page;
+ cfs_page_t *dst_page;
+ void *src;
+ void *dst;
+
+ src_page = (rw == WRITE) ? pages[i] : vmpage;
+ dst_page = (rw == WRITE) ? vmpage : pages[i];
+
+ src = kmap_atomic(src_page, KM_USER0);
+ dst = kmap_atomic(dst_page, KM_USER1);
+ memcpy(dst, (const void *)src, min(page_size, size));
+ kunmap_atomic(dst, KM_USER1);
+ kunmap_atomic(src, KM_USER0);
+
+ /* make sure page will be added to the transfer by
+ * cl_io_submit()->...->vvp_page_prep_write(). */
+ if (rw == WRITE)
+ set_page_dirty(vmpage);
+ /*
+ * If direct-io read finds up-to-date page in the
+ * cache, just copy it to the user space. Page will be
+ * filtered out by vvp_page_prep_read(). This
+ * preserves an invariant, that page is read at most
+ * once, see cl_page_flags::CPF_READ_COMPLETED.
+ */
+ }