- .cpo_cache_add = osc_page_cache_add
- }
- },
- .cpo_clip = osc_page_clip,
- .cpo_cancel = osc_page_cancel
-};
-
-static int osc_make_ready(const struct lu_env *env, void *data, int cmd)
-{
- struct osc_page *opg = data;
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
- int result;
-
- LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 1));
-
- ENTRY;
- result = cl_page_make_ready(env, page, CRT_WRITE);
- if (result == 0)
- opg->ops_submit_time = cfs_time_current();
- RETURN(result);
-}
-
-static int osc_refresh_count(const struct lu_env *env, void *data, int cmd)
-{
- struct cl_page *page;
- struct osc_page *osc = data;
- struct cl_object *obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-
- int result;
- loff_t kms;
-
- LINVRNT(osc_page_protected(env, osc, CLM_READ, 1));
-
- /* readpage queues with _COUNT_STABLE, shouldn't get here. */
- LASSERT(!(cmd & OBD_BRW_READ));
- LASSERT(osc != NULL);
- page = osc->ops_cl.cpl_page;
- obj = osc->ops_cl.cpl_obj;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result < 0)
- return result;
- kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
- /* catch race with truncate */
- return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
- /* catch sub-page write at end of file */
- return kms % CFS_PAGE_SIZE;
- else
- return CFS_PAGE_SIZE;
-}
-
-static int osc_completion(const struct lu_env *env,
- void *data, int cmd, struct obdo *oa, int rc)
-{
- struct osc_page *opg = data;
- struct osc_async_page *oap = &opg->ops_oap;
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- enum cl_req_type crt;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 1));
- LINVRNT(cl_page_is_vmlocked(env, page));
-
- ENTRY;
-
- cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
- LASSERT(opg->ops_transfer_pinned);
-
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req != NULL)
- cl_req_page_done(env, page);
- LASSERT(page->cp_req == NULL);
-
- /* As the transfer for this page is being done, clear the flags */
- spin_lock(&oap->oap_lock);
- oap->oap_async_flags = 0;
- spin_unlock(&oap->oap_lock);
-
- crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
- /* Clear opg->ops_transfer_pinned before VM lock is released. */
- opg->ops_transfer_pinned = 0;
-
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- spin_unlock(&obj->oo_seatbelt);
-
- opg->ops_submit_time = 0;
-
- cl_page_completion(env, page, crt, rc);
-
- /* statistic */
- if (rc == 0 && oap->oap_brw_flags & OBD_BRW_SRVLOCK) {
- struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
- struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
- int bytes = opg->ops_to - opg->ops_from;
-
- if (crt == CRT_READ)
- stats->os_lockless_reads += bytes;
- else
- stats->os_lockless_writes += bytes;
- }
-
- /*
- * This has to be the last operation with the page, as locks are
- * released in cl_page_completion() and nothing except for the
- * reference counter protects page from concurrent reclaim.
- */
- lu_ref_del(&page->cp_reference, "transfer", page);
- /*
- * As page->cp_obj is pinned by a reference from page->cp_req, it is
- * safe to call cl_page_put() without risking object destruction in a
- * non-blocking context.
- */
- cl_page_put(env, page);
- RETURN(0);
-}
-
-const static struct obd_async_page_ops osc_async_page_ops = {
- .ap_make_ready = osc_make_ready,
- .ap_refresh_count = osc_refresh_count,
- .ap_completion = osc_completion