if (ext->oe_sync && ext->oe_grants > 0)
GOTO(out, rc = 90);
- if (ext->oe_dlmlock != NULL) {
+ if (ext->oe_dlmlock != NULL && !ldlm_is_failed(ext->oe_dlmlock)) {
struct ldlm_extent *extent;
extent = &ext->oe_dlmlock->l_policy_data.l_extent;
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
- int offset = last_off & ~CFS_PAGE_MASK;
+ int offset = last_off & ~PAGE_MASK;
int count = last_count + (offset & (blocksize - 1));
int end = (offset + last_count) & (blocksize - 1);
if (end)
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ cli_name(osc_cli(obj)), state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
if (rc < 0)
GOTO(out, rc);
- /* discard all pages with index greater then trunc_index */
+ /* discard all pages with index greater than trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
oap_pending_item) {
pgoff_t index = osc_index(oap2osc(oap));
{
struct osc_page *opg = oap2osc_page(oap);
struct cl_page *page = oap2cl_page(oap);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
"dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
"lru {in list: %ld, left: %ld, waiters: %d }"fmt"\n", \
- __tmp->cl_import->imp_obd->obd_name, \
+ cli_name(__tmp), \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
osc_io_unplug_async(env, cli, NULL);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
+ cli_name(cli), &ocw, oap);
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
default:
CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived "
"due to %d, fall back to sync i/o\n",
- cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ cli_name(cli), &ocw, rc);
break;
}
EXIT;
static int
osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
+ struct osc_object *osc)
__must_hold(osc)
{
struct list_head rpclist = LIST_HEAD_INIT(rpclist);
if (!list_empty(&rpclist)) {
LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE, pol);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
LASSERT(list_empty(&rpclist));
}
*/
static int
osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
+ struct osc_object *osc)
__must_hold(osc)
{
struct osc_extent *ext;
osc_object_unlock(osc);
LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ, pol);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
LASSERT(list_empty(&rpclist));
osc_object_lock(osc);
}
/* called with the loi list lock held */
-static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
- pdl_policy_t pol)
+static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
__must_hold(&cli->cl_loi_list_lock)
{
struct osc_object *osc;
* do io on writes while there are cache waiters */
osc_object_lock(osc);
if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
- rc = osc_send_write_rpc(env, cli, osc, pol);
+ rc = osc_send_write_rpc(env, cli, osc);
if (rc < 0) {
CERROR("Write request failed with %d\n", rc);
}
}
if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
- rc = osc_send_read_rpc(env, cli, osc, pol);
+ rc = osc_send_read_rpc(env, cli, osc);
if (rc < 0)
CERROR("Read request failed with %d\n", rc);
}
}
static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol, int async)
+ struct osc_object *osc, int async)
{
int rc = 0;
return 0;
if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859 */
- atomic_inc(&cli->cl_lru_shrinkers);
spin_lock(&cli->cl_loi_list_lock);
- osc_check_rpcs(env, cli, pol);
+ osc_check_rpcs(env, cli);
spin_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work != NULL);
static int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli, struct osc_object *osc)
{
- /* XXX: policy is no use actually. */
- return osc_io_unplug0(env, cli, osc, PDL_POLICY_ROUND, 1);
+ return osc_io_unplug0(env, cli, osc, 1);
}
void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
+ struct osc_object *osc)
{
- (void)osc_io_unplug0(env, cli, osc, pol, 0);
+ (void)osc_io_unplug0(env, cli, osc, 0);
}
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
+ LASSERT(!(offset & ~PAGE_MASK));
if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
struct osc_object *obj, struct osc_page *ops)
{
struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
int rc = 0;
ENTRY;
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
oap, ops, osc_index(oap2osc(oap)));
- osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
+ struct osc_extent *ext = NULL;
+
+ osc_object_lock(obj);
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details. */
osc_index(oap2osc(oap)));
rc = -EBUSY;
}
+ if (ext != NULL)
+ osc_extent_put(env, ext);
}
- osc_object_unlock(obj);
- if (ext != NULL)
- osc_extent_put(env, ext);
RETURN(rc);
}
/**
* Called by osc_io_setattr_start() to freeze and destroy covering extents.
*/
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj, __u64 size)
+int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
+ __u64 size, struct osc_extent **extp)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
/* we need to hold this extent in OES_TRUNC state so
* that no writeback will happen. This is to avoid
- * BUG 17397. */
- LASSERT(oio->oi_trunc == NULL);
- oio->oi_trunc = osc_extent_get(ext);
+ * BUG 17397.
+ * Only partial truncate can reach here, if @size is
+ * not zero, the caller should provide a valid @extp. */
+ LASSERT(*extp == NULL);
+ *extp = osc_extent_get(ext);
OSC_EXTENT_DUMP(D_CACHE, ext,
"trunc at "LPU64"\n", size);
}
/**
* Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
*/
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj)
+void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext)
{
- struct osc_extent *ext = oio->oi_trunc;
-
- oio->oi_trunc = NULL;
if (ext != NULL) {
+ struct osc_object *obj = ext->oe_obj;
bool unplug = false;
EASSERT(ext->oe_nr_pages > 0, ext);
}
if (unplug)
- osc_io_unplug(env, osc_cli(obj), obj, PDL_POLICY_ROUND);
+ osc_io_unplug(env, osc_cli(obj), obj);
if (hp || discard) {
int rc;
/* page is top page. */
info->oti_next_index = osc_index(ops) + 1;
if (cl_page_own(env, io, page) == 0) {
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(page))));
+ if (!ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(page))))
+ CL_PAGE_DEBUG(D_ERROR, env, page,
+ "discard dirty page?\n");
/* discard the page */
cl_page_discard(env, io, page);