void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
+void osc_dio_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int *brw_flags);
int lru_queue_work(const struct lu_env *env, void *data);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
};
/**
+ * tracking structure for the DIO pages in extents
+ */
+struct osc_dio_pages {
+ /* the cl_dio pages struct this is from */
+ struct cl_dio_pages *odp_cdp;
+ int odp_brw_flags;
+ /* this osc dio pages contains a subset of pages in the cdp, this
+ * tells us the range
+ */
+ int odp_from;
+ int odp_to;
+};
+
+/**
* osc_extent data to manage dirty pages.
* osc_extent has the following attributes:
* 1. all pages in the same must be in one RPC in write back;
unsigned int oe_nr_pages;
/** list of pending oap pages. Pages in this list are NOT sorted. */
struct list_head oe_pages;
+ /* tracking structure for dio pages */
+ struct osc_dio_pages oe_odp;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
* oe_start is used as keyword for red-black tree. */
ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
ext->oe_ndelay = !!(brw_flags & OBD_BRW_NDELAY);
ext->oe_dio = true;
+ ext->oe_odp.odp_cdp = cdp;
+ ext->oe_odp.odp_brw_flags = brw_flags;
oscl = oio->oi_write_osclock ? : oio->oi_read_osclock;
if (oscl && oscl->ols_dlmlock != NULL) {
ext->oe_dlmlock = LDLM_LOCK_GET(oscl->ols_dlmlock);
if (from == -1)
from = i;
- osc_page_submit(env, opg, crt, brw_flags);
+ osc_dio_page_submit(env, opg, crt, &brw_flags);
list_add_tail(&oap->oap_pending_item, &list);
queued++;
EXPORT_SYMBOL(osc_page_init);
/**
+ * Helper function called by osc_io_submit() for every page in a dio
+ */
+void osc_dio_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int *brw_flags)
+{
+ struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_io *oio = osc_env_io(env);
+
+ LASSERT(oap->oap_async_flags & ASYNC_READY);
+ LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
+
+ oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+ oap->oap_brw_flags = OBD_BRW_SYNC | *brw_flags;
+
+ if (oio->oi_cap_sys_resource)
+ oap->oap_brw_flags |= OBD_BRW_SYS_RESOURCE;
+
+ *brw_flags = oap->oap_brw_flags;
+}
+
+/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
*/
enum cl_req_type crt, int brw_flags)
{
struct osc_object *obj = osc_page_object(opg);
- struct cl_page *page = opg->ops_cl.cpl_page;
struct osc_async_page *oap = &opg->ops_oap;
struct osc_io *oio = osc_env_io(env);
if (oio->oi_cap_sys_resource)
oap->oap_brw_flags |= OBD_BRW_SYS_RESOURCE;
- if (page->cp_type != CPT_TRANSIENT) {
- osc_page_transfer_get(opg, "transfer\0imm");
- osc_lru_use(osc_cli(obj), opg);
- }
+ osc_page_transfer_get(opg, "transfer\0imm");
+ osc_lru_use(osc_cli(obj), opg);
}
/* --------------- LRU page management ------------------ */