if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
- /*
- * In the rare case max_pages_per_rpc (mppr) is changed, don't
- * merge extents until after old ones have been sent, or the
- * "extents are aligned to RPCs" checks are unhappy.
- */
- if (cur->oe_mppr != victim->oe_mppr)
- return -ERANGE;
-
LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
cur->oe_urgent |= victim->oe_urgent;
cur->oe_memalloc |= victim->oe_memalloc;
list_splice_init(&victim->oe_pages, &cur->oe_pages);
+ list_del_init(&victim->oe_link);
victim->oe_nr_pages = 0;
osc_extent_get(victim);
cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_grants = chunksize + cli->cl_grant_extent_tax;
+ cur->oe_grants = 0;
cur->oe_mppr = max_pages;
if (olck->ols_dlmlock != NULL) {
LASSERT(olck->ols_hold);
* flushed, try next one. */
continue;
- if (osc_extent_merge(env, ext, cur) == 0) {
+ /* check if they belong to the same rpc slot before trying to
+ * merge. the extents are not overlapped and contiguous at
+ * chunk level to get here. */
+ if (ext->oe_max_end != max_end)
+ /* if they don't belong to the same RPC slot or
+ * max_pages_per_rpc has ever changed, do not merge. */
+ continue;
+
+ /* check whether maximum extent size will be hit */
+ if ((ext_chk_end - ext_chk_start + 1 + 1) << ppc_bits >
+ cli->cl_max_extent_pages)
+ continue;
+
+ /* it's required that an extent must be contiguous at chunk
+ * level so that we know the whole extent is covered by grant
+ * (the pages in the extent are NOT required to be contiguous).
+ * Otherwise, it will be too much difficult to know which
+ * chunks have grants allocated. */
+
+ /* try to do front merge - extend ext's start */
+ if (chunk + 1 == ext_chk_start) {
+ /* ext must be chunk size aligned */
+ EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
+
+ /* pull ext's start back to cover cur */
+ ext->oe_start = cur->oe_start;
+ ext->oe_grants += chunksize;
LASSERT(*grants >= chunksize);
*grants -= chunksize;
+
found = osc_extent_hold(ext);
+ } else if (chunk == ext_chk_end + 1) {
+ /* rear merge */
+ ext->oe_end = cur->oe_end;
+ ext->oe_grants += chunksize;
+ LASSERT(*grants >= chunksize);
+ *grants -= chunksize;
- /*
- * Try to merge with the next one too because we
- * might have just filled in a gap.
- */
+ /* try to merge with the next one because we just fill
+ * in a gap */
if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
/* we can save extent tax from next extent */
*grants += cli->cl_grant_extent_tax;
- break;
+ found = osc_extent_hold(ext);
}
+ if (found != NULL)
+ break;
}
osc_extent_tree_dump(D_CACHE, obj);
} else if (conflict == NULL) {
/* create a new extent */
EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
+ cur->oe_grants = chunksize + cli->cl_grant_extent_tax;
LASSERT(*grants >= cur->oe_grants);
*grants -= cur->oe_grants;
* the size of file. */
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
- LASSERT(last_oap_count > 0);
+ LASSERTF(last_oap_count > 0,
+ "last_oap_count %d\n", last_oap_count);
LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
last->oap_count = last_oap_count;
spin_lock(&last->oap_lock);
{
struct osc_thread_info *info = osc_env_info(env);
struct osc_object *osc = cbdata;
+ struct cl_page *page = ops->ops_cl.cpl_page;
pgoff_t index;
+ bool discard = false;
index = osc_index(ops);
- if (index >= info->oti_fn_index) {
- struct ldlm_lock *tmp;
- struct cl_page *page = ops->ops_cl.cpl_page;
+ /* negative lock caching */
+ if (index < info->oti_ng_index) {
+ discard = true;
+ } else if (index >= info->oti_fn_index) {
+ struct ldlm_lock *tmp;
/* refresh non-overlapped index */
tmp = osc_dlmlock_at_pgoff(env, osc, index,
- OSC_DAP_FL_TEST_LOCK);
+ OSC_DAP_FL_TEST_LOCK |
+ OSC_DAP_FL_AST | OSC_DAP_FL_RIGHT);
if (tmp != NULL) {
__u64 end = tmp->l_policy_data.l_extent.end;
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, oti_fn_index). This is safe
- * because if tmp lock is canceled, it will discard
- * these pages. */
- info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
- if (end == OBD_OBJECT_EOF)
- info->oti_fn_index = CL_PAGE_EOF;
+ __u64 start = tmp->l_policy_data.l_extent.start;
+
+ /* no lock covering this page */
+ if (index < cl_index(osc2cl(osc), start)) {
+ /* no lock at @index, first lock at @start */
+ info->oti_ng_index = cl_index(osc2cl(osc),
+ start);
+ discard = true;
+ } else {
+ /* Cache the first-non-overlapped index so as to
+ * skip all pages within [index, oti_fn_index).
+ * This is safe because if tmp lock is canceled,
+ * it will discard these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc),
+ end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ }
LDLM_LOCK_PUT(tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
+ } else {
+ info->oti_ng_index = CL_PAGE_EOF;
+ discard = true;
+ }
+ }
+
+ if (discard) {
+ if (cl_page_own(env, io, page) == 0) {
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
} else {
cb = discard ? osc_discard_cb : check_and_discard_cb;
info->oti_fn_index = info->oti_next_index = start;
+ info->oti_ng_index = 0;
osc_page_gang_lookup(env, io, osc,
info->oti_next_index, end, cb, osc);