static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
enum osc_extent_state state);
static void osc_completion(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, enum cl_req_type crt,
+ struct osc_transfer_page *otp, enum cl_req_type crt,
int rc);
static void osc_dio_completion(const struct lu_env *env, struct osc_object *osc,
struct cl_dio_pages *cdp, enum cl_req_type crt,
int page_count, int brw_flags, int rc);
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd);
+static int osc_make_ready(const struct lu_env *env,
+ struct osc_transfer_page *otp, int cmd);
static int osc_refresh_count(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, int cmd);
+ struct osc_transfer_page *otp, int cmd);
static int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli, struct osc_object *osc);
static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
const char *func, const int line)
{
struct osc_object *obj = ext->oe_obj;
- struct osc_async_page *oap;
+ struct osc_transfer_page *otp;
size_t page_count;
int rc = 0;
GOTO(out, rc = 0);
page_count = 0;
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
+ list_for_each_entry(otp, &ext->oe_pages, otp_pending_item) {
+ pgoff_t index = osc_index(otp2osc(otp));
++page_count;
if (index > ext->oe_end || index < ext->oe_start)
GOTO(out, rc = 110);
{
struct client_obd *cli = osc_cli(ext->oe_obj);
struct osc_object *osc = ext->oe_obj;
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
+ struct osc_transfer_page *otp;
+ struct osc_transfer_page *tmp;
int nr_pages = ext->oe_nr_pages;
int lost_grant = 0;
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
if (!ext->oe_dio)
osc_lru_add_batch(cli, &ext->oe_pages);
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
- list_del_init(&oap->oap_rpc_item);
- list_del_init(&oap->oap_pending_item);
- if (last_off <= oap->oap_obj_off) {
- last_off = oap->oap_obj_off;
- last_count = oap->oap_count;
+ list_for_each_entry_safe(otp, tmp, &ext->oe_pages,
+ otp_pending_item) {
+ list_del_init(&otp->otp_rpc_item);
+ list_del_init(&otp->otp_pending_item);
+ if (last_off <= otp->otp_obj_off) {
+ last_off = otp->otp_obj_off;
+ last_count = otp->otp_count;
}
--ext->oe_nr_pages;
if (ext->oe_odp.odp_cdp)
continue;
else
- osc_completion(env, osc, oap, crt, rc);
+ osc_completion(env, osc, otp, crt, rc);
}
/* after this, the cl_dio_pages may be freed, so take care not to
struct cl_io *io;
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
- struct osc_async_page *oap;
- struct osc_async_page *tmp;
+ struct osc_transfer_page *otp;
+ struct osc_transfer_page *tmp;
struct pagevec *pvec;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits -
GOTO(out, rc);
/* discard all pages with index greater than trunc_index */
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
- struct cl_page *page = oap2cl_page(oap);
+ list_for_each_entry_safe(otp, tmp, &ext->oe_pages,
+ otp_pending_item) {
+ pgoff_t index = osc_index(otp2osc(otp));
+ struct cl_page *page = otp2cl_page(otp);
- LASSERT(list_empty(&oap->oap_rpc_item));
+ LASSERT(list_empty(&otp->otp_rpc_item));
/* only discard the pages with their index greater than
* trunc_index, and ... */
continue;
}
- list_del_init(&oap->oap_pending_item);
+ list_del_init(&otp->otp_pending_item);
cl_page_get(page);
lu_ref_add(&page->cp_reference, "truncate", current);
static int osc_extent_make_ready(const struct lu_env *env,
struct osc_extent *ext)
{
- struct osc_async_page *oap;
- struct osc_async_page *last = NULL;
+ struct osc_transfer_page *otp;
+ struct osc_transfer_page *last = NULL;
struct osc_object *obj = ext->oe_obj;
unsigned int page_count = 0;
int rc;
OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ list_for_each_entry(otp, &ext->oe_pages, otp_pending_item) {
++page_count;
- if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
- last = oap;
+ if (last == NULL || last->otp_obj_off < otp->otp_obj_off)
+ last = otp;
/* checking ASYNC_READY is race safe */
- if ((oap->oap_async_flags & ASYNC_READY) != 0)
+ if ((otp->otp_flags & ASYNC_READY) != 0)
continue;
- rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
+ rc = osc_make_ready(env, otp, OBD_BRW_WRITE);
switch (rc) {
case 0:
- oap->oap_async_flags |= ASYNC_READY;
+ otp->otp_flags |= ASYNC_READY;
break;
case -EALREADY:
- LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
+ LASSERT((otp->otp_flags & ASYNC_READY) != 0);
break;
default:
LASSERTF(0, "unknown return code: %d\n", rc);
LASSERT(last != NULL);
/* the last page is the only one we need to refresh its count by
* the size of file. */
- if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
- int last_oap_count = osc_refresh_count(env, obj, last,
+ if (!(last->otp_flags & ASYNC_COUNT_STABLE)) {
+ int last_otp_count = osc_refresh_count(env, obj, last,
OBD_BRW_WRITE);
- LASSERTF(last_oap_count > 0,
- "last_oap_count %d\n", last_oap_count);
- LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
- last->oap_count = last_oap_count;
- last->oap_async_flags |= ASYNC_COUNT_STABLE;
+ LASSERTF(last_otp_count > 0,
+ "last_otp_count %d\n", last_otp_count);
+ LASSERT(last->otp_page_off + last_otp_count <= PAGE_SIZE);
+ last->otp_count = last_otp_count;
+ last->otp_flags |= ASYNC_COUNT_STABLE;
}
/* for the rest of pages, we don't need to call osf_refresh_count()
* because it's known they are not the last page */
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_SIZE - oap->oap_page_off;
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ list_for_each_entry(otp, &ext->oe_pages, otp_pending_item) {
+ if (!(otp->otp_flags & ASYNC_COUNT_STABLE)) {
+ otp->otp_count = PAGE_SIZE - otp->otp_page_off;
+ otp->otp_flags |= ASYNC_COUNT_STABLE;
}
}
list_empty_marker(&(OSC)->oo_reading_exts), \
##args)
-static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
- int cmd)
+static int osc_make_ready(const struct lu_env *env,
+ struct osc_transfer_page *otp, int cmd)
{
- struct cl_page *page = oap2cl_page(oap);
+ struct cl_page *page = otp2cl_page(otp);
int result;
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
}
static int osc_refresh_count(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, int cmd)
+ struct osc_transfer_page *otp, int cmd)
{
- struct osc_page *opg = oap2osc_page(oap);
- pgoff_t index = osc_index(oap2osc(oap));
+ struct osc_page *opg = otp2osc_page(otp);
+ pgoff_t index = osc_index(otp2osc(otp));
struct cl_object *obj = osc2cl(osc);
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result;
* async_flag maintenance
*/
static void osc_completion(const struct lu_env *env, struct osc_object *osc,
- struct osc_async_page *oap, enum cl_req_type crt,
+ struct osc_transfer_page *otp, enum cl_req_type crt,
int rc)
{
- struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
+ struct osc_page *opg = otp2osc_page(otp);
+ struct cl_page *page = otp2cl_page(otp);
ENTRY;
/* As the transfer for this page is done, clear the flags */
- oap->oap_async_flags = 0;
+ otp->otp_flags = 0;
LASSERTF(equi(page->cp_state == CPS_PAGEIN,
crt == CRT_READ),
}
/**
- * The companion to osc_enter_cache(), called when @oap is no longer part of
+ * The companion to osc_enter_cache(), called when @otp is no longer part of
* the dirty accounting due to error.
*/
-static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
+static void osc_exit_cache(struct client_obd *cli,
+ struct osc_transfer_page *otp)
{
spin_lock(&cli->cl_loi_list_lock);
- osc_release_write_grant(cli, &oap->oap_brw_page);
+ osc_release_write_grant(cli, &otp->otp_brw_page);
spin_unlock(&cli->cl_loi_list_lock);
}
* is available.
*/
static int osc_enter_cache_try(struct client_obd *cli,
- struct osc_async_page *oap,
+ struct osc_transfer_page *otp,
int bytes)
{
int rc;
if (cli->cl_dirty_pages < cli->cl_dirty_max_pages) {
if (atomic_long_add_return(1, &obd_dirty_pages) <=
obd_max_dirty_pages) {
- osc_consume_write_grant(cli, &oap->oap_brw_page);
+ osc_consume_write_grant(cli, &otp->otp_brw_page);
rc = 1;
goto out;
} else
*/
static inline void cli_unlock_and_unplug(const struct lu_env *env,
struct client_obd *cli,
- struct osc_async_page *oap)
+ struct osc_transfer_page *otp)
{
spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug_async(env, cli, NULL);
CDEBUG(D_CACHE,
"%s: sleeping for cache space for %p\n",
- cli_name(cli), oap);
+ cli_name(cli), otp);
}
static inline void cli_lock_after_unplug(struct client_obd *cli)
* The process will be put into sleep if it's already run out of grant.
*/
static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, struct osc_async_page *oap,
- int bytes)
+ struct osc_object *osc,
+ struct osc_transfer_page *otp, int bytes)
{
struct lov_oinfo *loi = osc->oo_oinfo;
int rc = -EDQUOT;
*/
remain = wait_event_idle_exclusive_timeout_cmd(
cli->cl_cache_waiters,
- (entered = osc_enter_cache_try(cli, oap, bytes)) ||
+ (entered = osc_enter_cache_try(cli, otp, bytes)) ||
(cli->cl_dirty_pages == 0 && cli->cl_w_in_flight == 0),
timeout,
- cli_unlock_and_unplug(env, cli, oap),
+ cli_unlock_and_unplug(env, cli, otp),
cli_lock_after_unplug(cli));
if (entered) {
list_del_init(item);
}
-/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
+/* maintain the osc's cli list membership invariants so that osc_send_otp_rpc
* can find pages to build into rpcs quickly */
static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
{
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
* would be redundant if we were getting read/write work items
- * instead of objects. we don't want send_oap_rpc to drain a
+ * instead of objects. we don't want send_otp_rpc to drain a
* partial read pending queue when we're given this object to
* do io on writes while there are cache waiters */
osc_object_lock(osc);
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct cl_page *page, loff_t offset)
{
- struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_transfer_page *otp = &ops->ops_otp;
ENTRY;
if (!page)
- return round_up(sizeof(*oap), 8);
+ return round_up(sizeof(*otp), 8);
- oap->oap_obj = osc;
- oap->oap_page = page->cp_vmpage;
- oap->oap_obj_off = offset;
- oap->oap_page_off = 0;
- oap->oap_count = PAGE_SIZE;
+ otp->otp_obj = osc;
+ otp->otp_page = page->cp_vmpage;
+ otp->otp_obj_off = offset;
+ otp->otp_page_off = 0;
+ otp->otp_count = PAGE_SIZE;
LASSERT(!(offset & ~PAGE_MASK));
/* Count of transient (direct i/o) pages is always stable by the time
* cl_page_clip later to set this.
*/
if (page->cp_type == CPT_TRANSIENT)
- oap->oap_async_flags |= ASYNC_COUNT_STABLE|ASYNC_URGENT|
+ otp->otp_flags |= ASYNC_COUNT_STABLE|ASYNC_URGENT|
ASYNC_READY;
- INIT_LIST_HEAD(&oap->oap_pending_item);
- INIT_LIST_HEAD(&oap->oap_rpc_item);
+ INIT_LIST_HEAD(&otp->otp_pending_item);
+ INIT_LIST_HEAD(&otp->otp_rpc_item);
- CDEBUG(D_INFO, "oap %p vmpage %p obj off %llu\n",
- oap, oap->oap_page, oap->oap_obj_off);
+ CDEBUG(D_INFO, "otp %p vmpage %p obj off %llu\n",
+ otp, otp->otp_page, otp->otp_obj_off);
RETURN(0);
}
EXPORT_SYMBOL(osc_prep_async_page);
{
struct osc_io *oio = osc_env_io(env);
struct osc_extent *ext = NULL;
- struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_transfer_page *otp = &ops->ops_otp;
struct client_obd *cli = osc_cli(osc);
struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
pgoff_t index;
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
- if (!list_empty(&oap->oap_pending_item) ||
- !list_empty(&oap->oap_rpc_item))
+ if (!list_empty(&otp->otp_pending_item) ||
+ !list_empty(&otp->otp_rpc_item))
RETURN(-EBUSY);
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
- brw_flags |= ops->ops_oap.oap_srvlock ? OBD_BRW_SRVLOCK : 0;
+ brw_flags |= ops->ops_otp.otp_srvlock ? OBD_BRW_SRVLOCK : 0;
if (io->ci_noquota) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
RETURN(rc);
}
- oap->oap_cmd = cmd;
+ otp->otp_cmd = cmd;
/* No need to hold a lock here,
* since this page is not in any list yet. */
- oap->oap_async_flags = 0;
- oap->oap_brw_flags = brw_flags;
+ otp->otp_flags = 0;
+ otp->otp_brw_flags = brw_flags;
- OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
- oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
+ OSC_IO_DEBUG(osc, "otp %p page %p added for cmd %d\n",
+ otp, otp->otp_page, otp->otp_cmd & OBD_BRW_RWMASK);
- index = osc_index(oap2osc(oap));
+ index = osc_index(otp2osc(otp));
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
/* it doesn't need any grant to dirty this page */
spin_lock(&cli->cl_loi_list_lock);
- rc = osc_enter_cache_try(cli, oap, grants);
+ rc = osc_enter_cache_try(cli, otp, grants);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
}
if (grants == 0) {
- rc = osc_enter_cache(env, cli, osc, oap, tmp);
+ rc = osc_enter_cache(env, cli, osc, otp, tmp);
if (rc == 0)
grants = tmp;
}
ext = osc_extent_find(env, osc, index, &tmp);
if (IS_ERR(ext)) {
LASSERT(tmp == grants);
- osc_exit_cache(cli, oap);
+ osc_exit_cache(cli, otp);
rc = PTR_ERR(ext);
ext = NULL;
} else {
if (ext != NULL) {
EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
ext, "index = %lu.\n", index);
- LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
+ LASSERT((otp->otp_brw_flags & OBD_BRW_FROM_GRANT) != 0);
osc_object_lock(osc);
if (ext->oe_nr_pages == 0)
- ext->oe_srvlock = ops->ops_oap.oap_srvlock;
+ ext->oe_srvlock = ops->ops_otp.otp_srvlock;
else
- LASSERT(ext->oe_srvlock == ops->ops_oap.oap_srvlock);
+ LASSERT(ext->oe_srvlock == ops->ops_otp.otp_srvlock);
++ext->oe_nr_pages;
- list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
+ list_add_tail(&otp->otp_pending_item, &ext->oe_pages);
osc_object_unlock(osc);
if (!ext->oe_layout_version)
int osc_teardown_async_page(const struct lu_env *env,
struct osc_object *obj, struct osc_page *ops)
{
- struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_transfer_page *otp = &ops->ops_otp;
int rc = 0;
ENTRY;
- CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, osc_index(oap2osc(oap)));
+ CDEBUG(D_INFO, "teardown otp %p page %p at index %lu.\n",
+ otp, ops, osc_index(otp2osc(otp)));
- if (!list_empty(&oap->oap_rpc_item)) {
- CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
+ if (!list_empty(&otp->otp_rpc_item)) {
+ CDEBUG(D_CACHE, "otp %p is not in cache.\n", otp);
rc = -EBUSY;
- } else if (!list_empty(&oap->oap_pending_item)) {
+ } else if (!list_empty(&otp->otp_pending_item)) {
struct osc_extent *ext = NULL;
osc_object_lock(obj);
- ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ ext = osc_extent_lookup(obj, osc_index(otp2osc(otp)));
osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details. */
if (ext != NULL && ext->oe_state != OES_TRUNC) {
OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- osc_index(oap2osc(oap)));
+ osc_index(otp2osc(otp)));
rc = -EBUSY;
}
if (ext != NULL)
struct osc_object *obj = osc_page_object(ops);
struct cl_page *cp = ops->ops_cl.cpl_page;
pgoff_t index = osc_index(ops);
- struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_transfer_page *otp = &ops->ops_otp;
bool unplug = false;
int rc = 0;
ENTRY;
if (rc)
GOTO(out, rc);
- oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
+ otp->otp_flags |= ASYNC_READY|ASYNC_URGENT;
if (current->flags & PF_MEMALLOC)
ext->oe_memalloc = 1;
ext->oe_urgent = 1;
if (ext->oe_state == OES_CACHE) {
OSC_EXTENT_DUMP(D_CACHE, ext,
- "flush page %p make it urgent.\n", oap);
+ "flush page %p make it urgent.\n", otp);
if (list_empty(&ext->oe_link))
list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
unplug = true;
{
struct client_obd *cli = osc_cli(obj);
struct osc_io *oio = osc_env_io(env);
- struct osc_async_page *oap;
+ struct osc_transfer_page *otp;
struct osc_extent *ext;
struct osc_lock *oscl;
struct cl_page *page;
for (i = from_page; i <= to_page; i++) {
page = cdp->cdp_cl_pages[i];
opg = osc_cl_page_osc(page, obj);
- oap = &opg->ops_oap;
+ otp = &opg->ops_otp;
osc_consume_write_grant(cli,
- &oap->oap_brw_page);
+ &otp->otp_brw_page);
}
atomic_long_add(page_count, &obd_dirty_pages);
osc_unreserve_grant_nolock(cli, grants, 0);
struct osc_io *oio = osc_env_io(env);
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
- struct osc_async_page *oap;
+ struct osc_transfer_page *otp;
int page_count = 0;
int mppr = cli->cl_max_pages_per_rpc;
bool can_merge = true;
else
crt = CRT_WRITE;
- list_for_each_entry(oap, list, oap_pending_item) {
- struct osc_page *opg = oap2osc_page(oap);
+ list_for_each_entry(otp, list, otp_pending_item) {
+ struct osc_page *opg = otp2osc_page(otp);
pgoff_t index = osc_index(opg);
if (index > end)
++page_count;
mppr <<= (page_count > mppr);
- if (unlikely(oap->oap_count < PAGE_SIZE))
+ if (unlikely(otp->otp_count < PAGE_SIZE))
can_merge = false;
}
ext = osc_extent_alloc(obj);
if (ext == NULL) {
- struct osc_async_page *tmp;
+ struct osc_transfer_page *tmp;
- list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
- list_del_init(&oap->oap_pending_item);
- osc_completion(env, obj, oap, crt, -ENOMEM);
+ list_for_each_entry_safe(otp, tmp, list, otp_pending_item) {
+ list_del_init(&otp->otp_pending_item);
+ osc_completion(env, obj, otp, crt, -ENOMEM);
}
RETURN(-ENOMEM);
}
CDEBUG(D_CACHE, "requesting %d bytes grant\n", grants);
spin_lock(&cli->cl_loi_list_lock);
if (osc_reserve_grant(cli, grants) == 0) {
- list_for_each_entry(oap, list, oap_pending_item) {
+ list_for_each_entry(otp, list, otp_pending_item) {
osc_consume_write_grant(cli,
- &oap->oap_brw_page);
+ &otp->otp_brw_page);
}
atomic_long_add(page_count, &obd_dirty_pages);
osc_unreserve_grant_nolock(cli, grants, 0);
ENTRY;
if (pga[0]->bp_page) {
- clpage = oap2cl_page(brw_page2oap(pga[0]));
+ clpage = otp2cl_page(brw_page2otp(pga[0]));
inode = clpage->cp_inode;
if (clpage->cp_type == CPT_TRANSIENT)
directio = true;
map_orig = brwpg->bp_page->mapping;
brwpg->bp_page->mapping = inode->i_mapping;
index_orig = brwpg->bp_page->index;
- clpage = oap2cl_page(brw_page2oap(brwpg));
+ clpage = otp2cl_page(brw_page2otp(brwpg));
brwpg->bp_page->index = clpage->cp_page_index;
}
data_page =
brwpg->bp_page = data_page;
/* there should be no gap in the middle of page array */
if (i == page_count - 1) {
- struct osc_async_page *oap =
- brw_page2oap(brwpg);
+ struct osc_transfer_page *otp =
+ brw_page2otp(brwpg);
- oa->o_size = oap->oap_count +
- oap->oap_obj_off + oap->oap_page_off;
+ oa->o_size = otp->otp_count +
+ otp->otp_obj_off + otp->otp_page_off;
}
/* len is forced to nunits, and relative offset to 0
* so store the old, clear text info
}
}
- if (brw_page2oap(pga[0])->oap_brw_flags & OBD_BRW_RDMA_ONLY) {
+ if (brw_page2otp(pga[0])->otp_brw_flags & OBD_BRW_RDMA_ONLY) {
enable_checksum = false;
short_io_size = 0;
gpu = 1;
aa->aa_ppga = pga;
aa->aa_cli = cli;
aa->aa_obj = clobj;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ INIT_LIST_HEAD(&aa->aa_otps);
*reqp = req;
niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
}
/* get the inode from the first cl_page */
- clpage = oap2cl_page(brw_page2oap(aa->aa_ppga[0]));
+ clpage = otp2cl_page(brw_page2otp(aa->aa_ppga[0]));
inode = clpage->cp_inode;
if (clpage->cp_type == CPT_TRANSIENT && inode) {
blockbits = inode->i_blkbits;
unsigned int i;
clpage =
- oap2cl_page(brw_page2oap(brwpg));
+ otp2cl_page(brw_page2otp(brwpg));
lblk_num =
((u64)(clpage->cp_page_index) <<
(PAGE_SHIFT - blockbits)) +
"request %p != aa_request %p\n",
request, aa->aa_request);
/*
- * New request takes over pga and oaps from old request.
+ * New request takes over pga and otps from old request.
* Note that copying a list_head doesn't work, need to move it...
*/
aa->aa_resends++;
new_aa = ptlrpc_req_async_args(new_aa, new_req);
- INIT_LIST_HEAD(&new_aa->aa_oaps);
- list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
+ INIT_LIST_HEAD(&new_aa->aa_otps);
+ list_splice_init(&aa->aa_otps, &new_aa->aa_otps);
INIT_LIST_HEAD(&new_aa->aa_exts);
list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
new_aa->aa_resends = aa->aa_resends;
struct osc_brw_async_args *aa = args;
struct client_obd *cli = aa->aa_cli;
unsigned long transferred = 0;
- struct osc_async_page *last;
+ struct osc_transfer_page *last;
struct osc_extent *ext;
struct osc_extent *tmp;
struct lov_oinfo *loi;
rc = -EIO;
}
- last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
+ last = brw_page2otp(aa->aa_ppga[aa->aa_page_count - 1]);
obj = osc2cl(ext->oe_obj);
loi = cl2osc(obj)->oo_oinfo;
}
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
- loff_t last_off = last->oap_count + last->oap_obj_off +
- last->oap_page_off;
+ loff_t last_off = last->otp_count + last->otp_obj_off +
+ last->otp_page_off;
/* Change file size if this is an out of quota or
* direct IO write and it extends the file size */
}
/* Extend KMS if it's not a lockless write */
if (loi->loi_kms < last_off &&
- last->oap_srvlock == 0) {
+ last->otp_srvlock == 0) {
attr->cat_kms = last_off;
valid |= CAT_KMS;
}
rc && req->rq_no_delay ? -EAGAIN : rc);
}
LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
+ LASSERT(list_empty(&aa->aa_otps));
transferred = (req->rq_bulk == NULL ? /* short io */
aa->aa_requested_nob :
struct brw_page **pga = NULL;
struct osc_brw_async_args *aa = NULL;
struct obdo *oa = NULL;
- struct osc_async_page *oap;
+ struct osc_transfer_page *otp;
struct osc_object *obj = NULL;
struct cl_req_attr *crattr = NULL;
loff_t starting_offset = OBD_OBJECT_EOF;
i = 0;
list_for_each_entry(ext, ext_list, oe_link) {
- list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ list_for_each_entry(otp, &ext->oe_pages, otp_pending_item) {
if (mem_tight)
- oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ otp->otp_brw_flags |= OBD_BRW_MEMALLOC;
if (soft_sync)
- oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- pga[i] = &oap->oap_brw_page;
- pga[i]->bp_off = oap->oap_obj_off + oap->oap_page_off;
+ otp->otp_brw_flags |= OBD_BRW_SOFT_SYNC;
+ pga[i] = &otp->otp_brw_page;
+ pga[i]->bp_off = otp->otp_obj_off + otp->otp_page_off;
i++;
- list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ list_add_tail(&otp->otp_rpc_item, &rpc_list);
if (starting_offset == OBD_OBJECT_EOF ||
- starting_offset > oap->oap_obj_off) {
- starting_offset = oap->oap_obj_off;
+ starting_offset > otp->otp_obj_off) {
+ starting_offset = otp->otp_obj_off;
} else {
- CDEBUG(D_CACHE, "page i:%d, oap->oap_obj_off %llu, oap->oap_page_off %u\n",
- i, oap->oap_obj_off, oap->oap_page_off);
- LASSERT(oap->oap_page_off == 0);
+ CDEBUG(D_CACHE, "page i:%d, otp->otp_obj_off %llu, otp->otp_page_off %u\n",
+ i, otp->otp_obj_off, otp->otp_page_off);
+ LASSERT(otp->otp_page_off == 0);
}
- if (ending_offset < oap->oap_obj_off + oap->oap_count) {
- ending_offset = oap->oap_obj_off +
- oap->oap_count;
+ if (ending_offset < otp->otp_obj_off + otp->otp_count) {
+ ending_offset = otp->otp_obj_off +
+ otp->otp_count;
} else {
- LASSERT(oap->oap_page_off + oap->oap_count ==
+ LASSERT(otp->otp_page_off + otp->otp_count ==
PAGE_SIZE);
}
}
}
/* first page in the list */
- oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
+ otp = list_first_entry(&rpc_list, typeof(*otp), otp_rpc_item);
crattr = &osc_env_info(env)->oti_req_attr;
memset(crattr, 0, sizeof(*crattr));
crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
crattr->cra_flags = ~0ULL;
- crattr->cra_page = oap2cl_page(oap);
+ crattr->cra_page = otp2cl_page(otp);
crattr->cra_oa = oa;
cl_req_attr_set(env, osc2cl(obj), crattr);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
aa = ptlrpc_req_async_args(aa, req);
- INIT_LIST_HEAD(&aa->aa_oaps);
- list_splice_init(&rpc_list, &aa->aa_oaps);
+ INIT_LIST_HEAD(&aa->aa_otps);
+ list_splice_init(&rpc_list, &aa->aa_otps);
INIT_LIST_HEAD(&aa->aa_exts);
list_splice_init(ext_list, &aa->aa_exts);
aa->aa_request = ptlrpc_request_addref(req);