- struct cl_page *clp = cp->cpg_cl.cpl_page;
- cfs_page_t *vmpage = cp->cpg_page;
- struct inode *inode = ccc_object_inode(clp->cp_obj);
- struct cl_sync_io *anchor = cp->cpg_sync_io;
-
- LINVRNT(cl_page_is_vmlocked(env, clp));
-
- if (anchor != NULL) {
- cp->cpg_sync_io = NULL;
- cl_sync_io_note(anchor, ioret);
- } else if (clp->cp_type == CPT_CACHEABLE) {
- /*
- * Don't assert the page writeback bit here because the lustre
- * file may be as a backend of swap space. in this case, the
- * page writeback is set by VM, and obvious we shouldn't clear
- * it at all. Fortunately this type of pages are all TRANSIENT
- * pages.
- */
- LASSERT(!PageWriteback(vmpage));
-
- /*
- * Only mark the page error only when it's a cacheable page
- * and NOT a sync io.
- *
- * For sync IO and direct IO(CPT_TRANSIENT), the error is able
- * to be seen by application, so we don't need to mark a page
- * as error at all.
- */
- vvp_vmpage_error(inode, vmpage, ioret);
- unlock_page(vmpage);
- }
+ struct ccc_object *obj = cl_inode2ccc(inode);
+
+ if (ioret == 0) {
+ ClearPageError(vmpage);
+ obj->cob_discard_page_warned = 0;
+ } else {
+ SetPageError(vmpage);
+ if (ioret == -ENOSPC)
+ set_bit(AS_ENOSPC, &inode->i_mapping->flags);
+ else
+ set_bit(AS_EIO, &inode->i_mapping->flags);
+
+ if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
+ obj->cob_discard_page_warned == 0) {
+ obj->cob_discard_page_warned = 1;
+ ll_dirty_page_discard_warn(vmpage, ioret);
+ }
+ }