*buf++ = ext->oe_rw ? 'r' : 'w';
if (ext->oe_intree)
*buf++ = 'i';
+ if (ext->oe_sync)
+ *buf++ = 'S';
if (ext->oe_srvlock)
*buf++ = 's';
if (ext->oe_hp)
return flags;
}
-static inline char list_empty_marker(cfs_list_t *list)
+static inline char list_empty_marker(struct list_head *list)
{
- return cfs_list_empty(list) ? '-' : '+';
+ return list_empty(list) ? '-' : '+';
}
#define EXTSTR "[%lu -> %lu/%lu]"
static const char *oes_strings[] = {
"inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
- struct osc_extent *__ext = (extent); \
- char __buf[16]; \
- \
- CDEBUG(lvl, \
- "extent %p@{" EXTSTR ", " \
- "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
- /* ----- extent part 0 ----- */ \
- __ext, EXTPARA(__ext), \
- /* ----- part 1 ----- */ \
- atomic_read(&__ext->oe_refc), \
- atomic_read(&__ext->oe_users), \
- list_empty_marker(&__ext->oe_link), \
- oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
- __ext->oe_obj, \
- /* ----- part 2 ----- */ \
- __ext->oe_grants, __ext->oe_nr_pages, \
- list_empty_marker(&__ext->oe_pages), \
- waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
- /* ----- part 4 ----- */ \
- ## __VA_ARGS__); \
+#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
+ struct osc_extent *__ext = (extent); \
+ char __buf[16]; \
+ \
+ CDEBUG(lvl, \
+ "extent %p@{" EXTSTR ", " \
+ "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
+ /* ----- extent part 0 ----- */ \
+ __ext, EXTPARA(__ext), \
+ /* ----- part 1 ----- */ \
+ atomic_read(&__ext->oe_refc), \
+ atomic_read(&__ext->oe_users), \
+ list_empty_marker(&__ext->oe_link), \
+ oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
+ __ext->oe_obj, \
+ /* ----- part 2 ----- */ \
+ __ext->oe_grants, __ext->oe_nr_pages, \
+ list_empty_marker(&__ext->oe_pages), \
+ waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
+ __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
+ /* ----- part 4 ----- */ \
+ ## __VA_ARGS__); \
+ if (lvl == D_ERROR && __ext->oe_dlmlock != NULL) \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ else \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
} while (0)
#undef EASSERTF
switch (ext->oe_state) {
case OES_INV:
- if (ext->oe_nr_pages > 0 || !cfs_list_empty(&ext->oe_pages))
+ if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
GOTO(out, rc = 35);
GOTO(out, rc = 0);
break;
if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start)
GOTO(out, rc = 80);
- if (ext->oe_osclock == NULL && ext->oe_grants > 0)
+ if (ext->oe_sync && ext->oe_grants > 0)
GOTO(out, rc = 90);
- if (ext->oe_osclock) {
- struct cl_lock_descr *descr;
- descr = &ext->oe_osclock->cll_descr;
- if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end))
+ if (ext->oe_dlmlock != NULL) {
+ struct ldlm_extent *extent;
+
+ extent = &ext->oe_dlmlock->l_policy_data.l_extent;
+ if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
+ extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end)))
GOTO(out, rc = 100);
+
+ if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP)))
+ GOTO(out, rc = 102);
}
if (ext->oe_nr_pages > ext->oe_mppr)
GOTO(out, rc = 0);
page_count = 0;
- cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
pgoff_t index = osc_index(oap2osc(oap));
++page_count;
if (index > ext->oe_end || index < ext->oe_start)
ext->oe_obj = obj;
atomic_set(&ext->oe_refc, 1);
atomic_set(&ext->oe_users, 0);
- CFS_INIT_LIST_HEAD(&ext->oe_link);
+ INIT_LIST_HEAD(&ext->oe_link);
ext->oe_state = OES_INV;
- CFS_INIT_LIST_HEAD(&ext->oe_pages);
+ INIT_LIST_HEAD(&ext->oe_pages);
init_waitqueue_head(&ext->oe_waitq);
- ext->oe_osclock = NULL;
+ ext->oe_dlmlock = NULL;
return ext;
}
{
LASSERT(atomic_read(&ext->oe_refc) > 0);
if (atomic_dec_and_test(&ext->oe_refc)) {
- LASSERT(cfs_list_empty(&ext->oe_link));
+ LASSERT(list_empty(&ext->oe_link));
LASSERT(atomic_read(&ext->oe_users) == 0);
LASSERT(ext->oe_state == OES_INV);
LASSERT(!ext->oe_intree);
- if (ext->oe_osclock) {
- cl_lock_put(env, ext->oe_osclock);
- ext->oe_osclock = NULL;
+ if (ext->oe_dlmlock != NULL) {
+ lu_ref_add(&ext->oe_dlmlock->l_reference,
+ "osc_extent", ext);
+ LDLM_LOCK_PUT(ext->oe_dlmlock);
+ ext->oe_dlmlock = NULL;
}
osc_extent_free(ext);
}
else if (ext->oe_start > tmp->oe_end)
n = &(*n)->rb_right;
else
- EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
+ EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext));
}
rb_link_node(&ext->oe_node, parent, n);
rb_insert_color(&ext->oe_node, &obj->oo_root);
osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
}
atomic_inc(&ext->oe_users);
- cfs_list_del_init(&ext->oe_link);
+ list_del_init(&ext->oe_link);
return osc_extent_get(ext);
}
static void __osc_extent_remove(struct osc_extent *ext)
{
LASSERT(osc_object_is_locked(ext->oe_obj));
- LASSERT(cfs_list_empty(&ext->oe_pages));
+ LASSERT(list_empty(&ext->oe_pages));
osc_extent_erase(ext);
- cfs_list_del_init(&ext->oe_link);
+ list_del_init(&ext->oe_link);
osc_extent_state_set(ext, OES_INV);
OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
}
if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
- LASSERT(cur->oe_osclock == victim->oe_osclock);
+ LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
/* only the following bits are needed to merge */
cur->oe_urgent |= victim->oe_urgent;
cur->oe_memalloc |= victim->oe_memalloc;
- cfs_list_splice_init(&victim->oe_pages, &cur->oe_pages);
- cfs_list_del_init(&victim->oe_link);
+ list_splice_init(&victim->oe_pages, &cur->oe_pages);
+ list_del_init(&victim->oe_link);
victim->oe_nr_pages = 0;
osc_extent_get(victim);
osc_extent_merge(env, ext, next_extent(ext));
if (ext->oe_urgent)
- cfs_list_move_tail(&ext->oe_link,
- &obj->oo_urgent_exts);
+ list_move_tail(&ext->oe_link,
+ &obj->oo_urgent_exts);
}
osc_object_unlock(obj);
struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
int *grants)
-
{
struct client_obd *cli = osc_cli(obj);
- struct cl_lock *lock;
+ struct osc_lock *olck;
+ struct cl_lock_descr *descr;
struct osc_extent *cur;
struct osc_extent *ext;
struct osc_extent *conflict = NULL;
if (cur == NULL)
RETURN(ERR_PTR(-ENOMEM));
- lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock != NULL);
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ olck = osc_env_io(env)->oi_write_osclock;
+ LASSERTF(olck != NULL, "page %lu is not covered by lock\n", index);
+ LASSERT(olck->ols_state == OLS_GRANTED);
+
+ descr = &olck->ols_cl.cls_lock->cll_descr;
+ LASSERT(descr->cld_mode >= CLM_WRITE);
LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
max_pages = cli->cl_max_pages_per_rpc;
LASSERT((max_pages & ~chunk_mask) == 0);
max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
+ max_end = min_t(pgoff_t, max_end, descr->cld_end);
/* initialize new extent by parameters so far */
cur->oe_max_end = max_end;
cur->oe_start = index & chunk_mask;
cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < lock->cll_descr.cld_start)
- cur->oe_start = lock->cll_descr.cld_start;
+ if (cur->oe_start < descr->cld_start)
+ cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_osclock = lock;
cur->oe_grants = 0;
cur->oe_mppr = max_pages;
+ if (olck->ols_dlmlock != NULL) {
+ LASSERT(olck->ols_hold);
+ cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
+ lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
+ }
/* grants has been allocated by caller */
LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
"%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
- LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
+ LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n",
+ EXTPARA(cur));
restart:
osc_object_lock(obj);
break;
/* if covering by different locks, no chance to match */
- if (lock != ext->oe_osclock) {
+ if (olck->ols_dlmlock != ext->oe_dlmlock) {
EASSERTF(!overlapped(ext, cur), ext,
- EXTSTR, EXTPARA(cur));
+ EXTSTR"\n", EXTPARA(cur));
ext = next_extent(ext);
continue;
* full contain. */
EASSERTF((ext->oe_start <= cur->oe_start &&
ext->oe_end >= cur->oe_end),
- ext, EXTSTR, EXTPARA(cur));
+ ext, EXTSTR"\n", EXTPARA(cur));
if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
/* for simplicity, we wait for this extent to
if (found != NULL) {
LASSERT(conflict == NULL);
if (!IS_ERR(found)) {
- LASSERT(found->oe_osclock == cur->oe_osclock);
+ LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
OSC_EXTENT_DUMP(D_CACHE, found,
"found caching ext for %lu.\n", index);
}
found = osc_extent_hold(cur);
osc_extent_insert(obj, cur);
OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, lock->cll_descr.cld_end);
+ index, descr->cld_end);
}
osc_object_unlock(obj);
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
osc_lru_add_batch(cli, &ext->oe_pages);
- cfs_list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
oap_pending_item) {
- cfs_list_del_init(&oap->oap_rpc_item);
- cfs_list_del_init(&oap->oap_pending_item);
+ list_del_init(&oap->oap_rpc_item);
+ list_del_init(&oap->oap_pending_item);
if (last_off <= oap->oap_obj_off) {
last_off = oap->oap_obj_off;
last_count = oap->oap_count;
GOTO(out, rc);
/* discard all pages with index greater then trunc_index */
- cfs_list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
oap_pending_item) {
pgoff_t index = osc_index(oap2osc(oap));
struct cl_page *page = oap2cl_page(oap);
- LASSERT(cfs_list_empty(&oap->oap_rpc_item));
+ LASSERT(list_empty(&oap->oap_rpc_item));
/* only discard the pages with their index greater than
* trunc_index, and ... */
continue;
}
- cfs_list_del_init(&oap->oap_pending_item);
+ list_del_init(&oap->oap_pending_item);
cl_page_get(page);
lu_ref_add(&page->cp_reference, "truncate", current);
OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
- cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
++page_count;
if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
last = oap;
/* for the rest of pages, we don't need to call osf_refresh_count()
* because it's known they are not the last page */
- cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
spin_lock(&oap->oap_lock);
OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
cnt = 1;
- cfs_list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
+ list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
cnt = 1;
- cfs_list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
+ list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
cnt = 1;
- cfs_list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
+ list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
/* osc_object_unlock(obj); */
}
static inline int osc_is_ready(struct osc_object *osc)
{
- return !cfs_list_empty(&osc->oo_ready_item) ||
- !cfs_list_empty(&osc->oo_hp_ready_item);
+ return !list_empty(&osc->oo_ready_item) ||
+ !list_empty(&osc->oo_hp_ready_item);
}
#define OSC_IO_DEBUG(OSC, STR, args...) \
spin_lock(&obj->oo_seatbelt);
LASSERT(opg->ops_submitter != NULL);
- LASSERT(!cfs_list_empty(&opg->ops_inflight));
- cfs_list_del_init(&opg->ops_inflight);
+ LASSERT(!list_empty(&opg->ops_inflight));
+ list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
spin_unlock(&obj->oo_seatbelt);
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
- "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
- "reserved: %ld, flight: %d } lru {in list: %d, " \
- "left: %d, waiters: %d }" fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
+ "lru {in list: %ld, left: %ld, waiters: %d }" fmt, \
__tmp->cl_import->imp_obd->obd_name, \
- __tmp->cl_dirty, __tmp->cl_dirty_max, \
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
- atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
+ __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
+ atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
__tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- atomic_read(&__tmp->cl_lru_in_list), \
- atomic_read(&__tmp->cl_lru_busy), \
+ atomic_long_read(&__tmp->cl_lru_in_list), \
+ atomic_long_read(&__tmp->cl_lru_busy), \
atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
{
assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_CACHE_SIZE;
+ atomic_long_inc(&obd_dirty_pages);
+ cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
PAGE_CACHE_SIZE, pga, pga->pg);
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_CACHE_SIZE;
+ atomic_long_dec(&obd_dirty_pages);
+ cli->cl_dirty_pages--;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+ atomic_long_dec(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit--;
}
EXIT;
}
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
client_obd_list_lock(&cli->cl_loi_list_lock);
- atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+ atomic_long_sub(nr_pages, &obd_dirty_pages);
+ cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty);
+ cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
}
/**
if (rc < 0)
return 0;
- if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+ if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
+ 1 + atomic_long_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += PAGE_CACHE_SIZE;
- atomic_inc(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit++;
+ atomic_long_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
rc = 1;
{
int rc;
client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = cfs_list_empty(&ocw->ocw_entry);
+ rc = list_empty(&ocw->ocw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
return rc;
}
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_CACHE_SIZE ||
+ cli->cl_dirty_max_pages == 0 ||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
GOTO(out, rc = -EDQUOT);
init_waitqueue_head(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_grant = bytes;
- while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
- cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+ while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
+ list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
client_obd_list_unlock(&cli->cl_loi_list_lock);
&ocw, rc);
break;
}
- cfs_list_del_init(&ocw.ocw_entry);
+ list_del_init(&ocw.ocw_entry);
GOTO(out, rc);
}
- LASSERT(cfs_list_empty(&ocw.ocw_entry));
+ LASSERT(list_empty(&ocw.ocw_entry));
rc = ocw.ocw_rc;
if (rc != -EDQUOT)
/* caller must hold loi_list_lock */
void osc_wake_cache_waiters(struct client_obd *cli)
{
- cfs_list_t *l, *tmp;
+ struct list_head *l, *tmp;
struct osc_cache_waiter *ocw;
ENTRY;
- cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
- cfs_list_del_init(&ocw->ocw_entry);
+ list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
+ list_del_init(&ocw->ocw_entry);
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_unstable_pages) + 1 +
- atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
+ if ((cli->cl_dirty_pages >= cli->cl_dirty_max_pages) ||
+ (1 + atomic_long_read(&obd_dirty_pages) >
+ obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
- "osc max %ld, sys max %d\n", cli->cl_dirty,
- cli->cl_dirty_max, obd_max_dirty_pages);
+ "osc max %ld, sys max %ld\n",
+ cli->cl_dirty_pages, cli->cl_dirty_max_pages,
+ obd_max_dirty_pages);
goto wakeup;
}
static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
{
- int hprpc = !!cfs_list_empty(&osc->oo_hp_exts);
+ int hprpc = !!list_empty(&osc->oo_hp_exts);
return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
}
CDEBUG(D_CACHE, "invalid import forcing RPC\n");
RETURN(1);
}
- if (!cfs_list_empty(&osc->oo_hp_exts)) {
+ if (!list_empty(&osc->oo_hp_exts)) {
CDEBUG(D_CACHE, "high prio request forcing RPC\n");
RETURN(1);
}
- if (!cfs_list_empty(&osc->oo_urgent_exts)) {
+ if (!list_empty(&osc->oo_urgent_exts)) {
CDEBUG(D_CACHE, "urgent request forcing RPC\n");
RETURN(1);
}
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
* create more pages to coalesce with what's waiting.. */
- if (!cfs_list_empty(&cli->cl_cache_waiters)) {
+ if (!list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
}
RETURN(1);
}
/* all read are urgent. */
- if (!cfs_list_empty(&osc->oo_reading_exts))
+ if (!list_empty(&osc->oo_reading_exts))
RETURN(1);
}
static int osc_makes_hprpc(struct osc_object *obj)
{
- return !cfs_list_empty(&obj->oo_hp_exts);
+ return !list_empty(&obj->oo_hp_exts);
}
-static void on_list(cfs_list_t *item, cfs_list_t *list, int should_be_on)
+static void on_list(struct list_head *item, struct list_head *list,
+ int should_be_on)
{
- if (cfs_list_empty(item) && should_be_on)
- cfs_list_add_tail(item, list);
- else if (!cfs_list_empty(item) && !should_be_on)
- cfs_list_del_init(item);
+ if (list_empty(item) && should_be_on)
+ list_add_tail(item, list);
+ else if (!list_empty(item) && !should_be_on)
+ list_del_init(item);
}
/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
ar->ar_force_sync = 0;
}
-/* Performs "unstable" page accounting. This function balances the
- * increment operations performed in osc_inc_unstable_pages. It is
- * registered as the RPC request callback, and is executed when the
- * bulk RPC is committed on the server. Thus at this point, the pages
- * involved in the bulk transfer are no longer considered unstable. */
-void osc_dec_unstable_pages(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (cli->cl_cache == NULL)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
-
- atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
-
- atomic_sub(page_count, &cli->cl_unstable_count);
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
-
- atomic_sub(page_count, &obd_unstable_pages);
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
-
- wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
-}
-
-/* "unstable" page accounting. See: osc_dec_unstable_pages. */
-void osc_inc_unstable_pages(struct ptlrpc_request *req)
-{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- int page_count = desc->bd_iov_count;
- int i;
-
- /* No unstable page tracking */
- if (cli->cl_cache == NULL)
- return;
-
- LASSERT(page_count >= 0);
-
- for (i = 0; i < page_count; i++)
- inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
-
- LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
- atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
-
- LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
- atomic_add(page_count, &cli->cl_unstable_count);
-
- LASSERT(atomic_read(&obd_unstable_pages) >= 0);
- atomic_add(page_count, &obd_unstable_pages);
-
- /* If the request has already been committed (i.e. brw_commit
- * called via rq_commit_cb), we need to undo the unstable page
- * increments we just performed because rq_commit_cb wont be
- * called again. */
- spin_lock(&req->rq_lock);
- if (unlikely(req->rq_committed)) {
- spin_unlock(&req->rq_lock);
-
- osc_dec_unstable_pages(req);
- } else {
- req->rq_unstable = 1;
- spin_unlock(&req->rq_lock);
- }
-}
-
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request */
static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
ENTRY;
if (oap->oap_request != NULL) {
- if (rc == 0)
- osc_inc_unstable_pages(oap->oap_request);
-
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
* - extent must be compatible with previous ones
*/
static int try_to_add_extent_for_io(struct client_obd *cli,
- struct osc_extent *ext, cfs_list_t *rpclist,
+ struct osc_extent *ext,
+ struct list_head *rpclist,
int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
+ struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
+ struct osc_async_page,
+ oap_pending_item);
ENTRY;
EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
if (*pc + ext->oe_nr_pages > *max_pages)
RETURN(0);
- cfs_list_for_each_entry(tmp, rpclist, oe_link) {
+ list_for_each_entry(tmp, rpclist, oe_link) {
+ struct osc_async_page *oap2;
+ oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
+ oap_pending_item);
EASSERT(tmp->oe_owner == current, tmp);
#if 0
if (overlapped(tmp, ext)) {
EASSERT(0, ext);
}
#endif
+ if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
+ CDEBUG(D_CACHE, "Do not permit different type of IO"
+ " for a same RPC\n");
+ RETURN(0);
+ }
if (tmp->oe_srvlock != ext->oe_srvlock ||
!tmp->oe_grants != !ext->oe_grants)
}
*pc += ext->oe_nr_pages;
- cfs_list_move_tail(&ext->oe_link, rpclist);
+ list_move_tail(&ext->oe_link, rpclist);
ext->oe_owner = current;
RETURN(1);
}
* 5. Traverse the extent tree from the 1st extent;
* 6. Above steps exit if there is no space in this RPC.
*/
-static int get_write_extents(struct osc_object *obj, cfs_list_t *rpclist)
+static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
LASSERT(osc_object_is_locked(obj));
- while (!cfs_list_empty(&obj->oo_hp_exts)) {
- ext = cfs_list_entry(obj->oo_hp_exts.next, struct osc_extent,
- oe_link);
+ while (!list_empty(&obj->oo_hp_exts)) {
+ ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
+ oe_link);
LASSERT(ext->oe_state == OES_CACHE);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
if (page_count == max_pages)
return page_count;
- while (!cfs_list_empty(&obj->oo_urgent_exts)) {
- ext = cfs_list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
+ while (!list_empty(&obj->oo_urgent_exts)) {
+ ext = list_entry(obj->oo_urgent_exts.next,
+ struct osc_extent, oe_link);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
return page_count;
while ((ext = next_extent(ext)) != NULL) {
if ((ext->oe_state != OES_CACHE) ||
- (!cfs_list_empty(&ext->oe_link) &&
+ (!list_empty(&ext->oe_link) &&
ext->oe_owner != NULL))
continue;
while (ext != NULL) {
if ((ext->oe_state != OES_CACHE) ||
/* this extent may be already in current rpclist */
- (!cfs_list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
+ (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
ext = next_extent(ext);
continue;
}
static int
osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, pdl_policy_t pol)
+__must_hold(osc)
{
- CFS_LIST_HEAD(rpclist);
+ struct list_head rpclist = LIST_HEAD_INIT(rpclist);
struct osc_extent *ext;
struct osc_extent *tmp;
struct osc_extent *first = NULL;
LASSERT(osc_object_is_locked(osc));
page_count = get_write_extents(osc, &rpclist);
- LASSERT(equi(page_count == 0, cfs_list_empty(&rpclist)));
+ LASSERT(equi(page_count == 0, list_empty(&rpclist)));
- if (cfs_list_empty(&rpclist))
+ if (list_empty(&rpclist))
RETURN(0);
osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
- cfs_list_for_each_entry(ext, &rpclist, oe_link) {
+ list_for_each_entry(ext, &rpclist, oe_link) {
LASSERT(ext->oe_state == OES_CACHE ||
ext->oe_state == OES_LOCK_DONE);
if (ext->oe_state == OES_CACHE)
* lock order is page lock -> object lock. */
osc_object_unlock(osc);
- cfs_list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
+ list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
if (ext->oe_state == OES_LOCKING) {
rc = osc_extent_make_ready(env, ext);
if (unlikely(rc < 0)) {
- cfs_list_del_init(&ext->oe_link);
+ list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
continue;
}
}
}
- if (!cfs_list_empty(&rpclist)) {
+ if (!list_empty(&rpclist)) {
LASSERT(page_count > 0);
rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE, pol);
- LASSERT(cfs_list_empty(&rpclist));
+ LASSERT(list_empty(&rpclist));
}
osc_object_lock(osc);
static int
osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc, pdl_policy_t pol)
+__must_hold(osc)
{
struct osc_extent *ext;
struct osc_extent *next;
- CFS_LIST_HEAD(rpclist);
+ struct list_head rpclist = LIST_HEAD_INIT(rpclist);
int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
int rc = 0;
ENTRY;
LASSERT(osc_object_is_locked(osc));
- cfs_list_for_each_entry_safe(ext, next,
+ list_for_each_entry_safe(ext, next,
&osc->oo_reading_exts, oe_link) {
EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
osc_update_pending(osc, OBD_BRW_READ, -page_count);
- if (!cfs_list_empty(&rpclist)) {
+ if (!list_empty(&rpclist)) {
osc_object_unlock(osc);
LASSERT(page_count > 0);
rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ, pol);
- LASSERT(cfs_list_empty(&rpclist));
+ LASSERT(list_empty(&rpclist));
osc_object_lock(osc);
}
}
#define list_to_obj(list, item) ({ \
- cfs_list_t *__tmp = (list)->next; \
- cfs_list_del_init(__tmp); \
- cfs_list_entry(__tmp, struct osc_object, oo_##item); \
+ struct list_head *__tmp = (list)->next; \
+ list_del_init(__tmp); \
+ list_entry(__tmp, struct osc_object, oo_##item); \
})
/* This is called by osc_check_rpcs() to find which objects have pages that
/* First return objects that have blocked locks so that they
* will be flushed quickly and other clients can get the lock,
* then objects which have pages ready to be stuffed into RPCs */
- if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
+ if (!list_empty(&cli->cl_loi_hp_ready_list))
RETURN(list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item));
- if (!cfs_list_empty(&cli->cl_loi_ready_list))
+ if (!list_empty(&cli->cl_loi_ready_list))
RETURN(list_to_obj(&cli->cl_loi_ready_list, ready_item));
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
* they don't pass the nr_pending/object threshhold */
- if (!cfs_list_empty(&cli->cl_cache_waiters) &&
- !cfs_list_empty(&cli->cl_loi_write_list))
+ if (!list_empty(&cli->cl_cache_waiters) &&
+ !list_empty(&cli->cl_loi_write_list))
RETURN(list_to_obj(&cli->cl_loi_write_list, write_item));
/* then return all queued objects when we have an invalid import
* so that they get flushed */
if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
- if (!cfs_list_empty(&cli->cl_loi_write_list))
+ if (!list_empty(&cli->cl_loi_write_list))
RETURN(list_to_obj(&cli->cl_loi_write_list,
write_item));
- if (!cfs_list_empty(&cli->cl_loi_read_list))
+ if (!list_empty(&cli->cl_loi_read_list))
RETURN(list_to_obj(&cli->cl_loi_read_list,
read_item));
}
/* called with the loi list lock held */
static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
pdl_policy_t pol)
+__must_hold(&cli->cl_loi_list_lock)
{
struct osc_object *osc;
int rc = 0;
if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
- CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
- CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
+ INIT_LIST_HEAD(&oap->oap_pending_item);
+ INIT_LIST_HEAD(&oap->oap_rpc_item);
spin_lock_init(&oap->oap_lock);
CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
RETURN(-EIO);
- if (!cfs_list_empty(&oap->oap_pending_item) ||
- !cfs_list_empty(&oap->oap_rpc_item))
+ if (!list_empty(&oap->oap_pending_item) ||
+ !list_empty(&oap->oap_rpc_item))
RETURN(-EBUSY);
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
RETURN(rc);
}
- if (osc_over_unstable_soft_limit(cli))
- brw_flags |= OBD_BRW_SOFT_SYNC;
-
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
else
LASSERT(ext->oe_srvlock == ops->ops_srvlock);
++ext->oe_nr_pages;
- cfs_list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
+ list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
osc_object_unlock(osc);
}
RETURN(rc);
oap, ops, osc_index(oap2osc(oap)));
osc_object_lock(obj);
- if (!cfs_list_empty(&oap->oap_rpc_item)) {
+ if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
- } else if (!cfs_list_empty(&oap->oap_pending_item)) {
+ } else if (!list_empty(&oap->oap_pending_item)) {
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
if (ext->oe_state == OES_CACHE) {
OSC_EXTENT_DUMP(D_CACHE, ext,
"flush page %p make it urgent.\n", oap);
- if (cfs_list_empty(&ext->oe_link))
- cfs_list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ if (list_empty(&ext->oe_link))
+ list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
unplug = true;
}
rc = 0;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
struct osc_extent *found = NULL;
- cfs_list_t *plist;
+ struct list_head *plist;
pgoff_t index = osc_index(ops);
int rc = -EBUSY;
int cmd;
plist = &obj->oo_reading_exts;
cmd = OBD_BRW_READ;
}
- cfs_list_for_each_entry(ext, plist, oe_link) {
+ list_for_each_entry(ext, plist, oe_link) {
if (ext->oe_start <= index && ext->oe_end >= index) {
LASSERT(ext->oe_state == OES_LOCK_DONE);
/* For OES_LOCK_DONE state extent, it has already held
}
}
if (found != NULL) {
- cfs_list_del_init(&found->oe_link);
+ list_del_init(&found->oe_link);
osc_update_pending(obj, cmd, -found->oe_nr_pages);
osc_object_unlock(obj);
}
int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- cfs_list_t *list, int cmd, int brw_flags)
+ struct list_head *list, int cmd, int brw_flags)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
pgoff_t end = 0;
ENTRY;
- cfs_list_for_each_entry(oap, list, oap_pending_item) {
+ list_for_each_entry(oap, list, oap_pending_item) {
pgoff_t index = osc_index(oap2osc(oap));
if (index > end)
end = index;
ext = osc_extent_alloc(obj);
if (ext == NULL) {
- cfs_list_for_each_entry(oap, list, oap_pending_item) {
- cfs_list_del_init(&oap->oap_pending_item);
+ list_for_each_entry(oap, list, oap_pending_item) {
+ list_del_init(&oap->oap_pending_item);
osc_ap_completion(env, cli, oap, 0, -ENOMEM);
}
RETURN(-ENOMEM);
}
ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = ext->oe_max_end = end;
ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
ext->oe_nr_pages = page_count;
ext->oe_mppr = mppr;
- cfs_list_splice_init(list, &ext->oe_pages);
+ list_splice_init(list, &ext->oe_pages);
osc_object_lock(obj);
/* Reuse the initial refcount for RPC, don't drop it */
osc_extent_state_set(ext, OES_LOCK_DONE);
if (cmd & OBD_BRW_WRITE) {
- cfs_list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
osc_update_pending(obj, OBD_BRW_WRITE, page_count);
} else {
- cfs_list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
+ list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
osc_update_pending(obj, OBD_BRW_READ, page_count);
}
osc_object_unlock(obj);
- osc_io_unplug(env, cli, obj, PDL_POLICY_ROUND);
+ osc_io_unplug_async(env, cli, obj);
RETURN(0);
}
struct osc_extent *ext;
struct osc_extent *waiting = NULL;
pgoff_t index;
- CFS_LIST_HEAD(list);
+ struct list_head list = LIST_HEAD_INIT(list);
int result = 0;
bool partial;
ENTRY;
osc_update_pending(obj, OBD_BRW_WRITE,
-ext->oe_nr_pages);
}
- EASSERT(cfs_list_empty(&ext->oe_link), ext);
- cfs_list_add_tail(&ext->oe_link, &list);
+ EASSERT(list_empty(&ext->oe_link), ext);
+ list_add_tail(&ext->oe_link, &list);
ext = next_extent(ext);
}
osc_list_maint(cli, obj);
- while (!cfs_list_empty(&list)) {
+ while (!list_empty(&list)) {
int rc;
- ext = cfs_list_entry(list.next, struct osc_extent, oe_link);
- cfs_list_del_init(&ext->oe_link);
+ ext = list_entry(list.next, struct osc_extent, oe_link);
+ list_del_init(&ext->oe_link);
/* extent may be in OES_ACTIVE state because inode mutex
* is released before osc_io_end() in file write case */
osc_extent_state_set(ext, OES_CACHE);
if (ext->oe_fsync_wait && !ext->oe_urgent) {
ext->oe_urgent = 1;
- cfs_list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
unplug = true;
}
osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
pgoff_t start, pgoff_t end, int hp, int discard)
{
struct osc_extent *ext;
- CFS_LIST_HEAD(discard_list);
+ struct list_head discard_list = LIST_HEAD_INIT(discard_list);
bool unplug = false;
int result = 0;
ENTRY;
case OES_CACHE:
result += ext->oe_nr_pages;
if (!discard) {
- cfs_list_t *list = NULL;
+ struct list_head *list = NULL;
if (hp) {
EASSERT(!ext->oe_hp, ext);
ext->oe_hp = 1;
list = &obj->oo_urgent_exts;
}
if (list != NULL)
- cfs_list_move_tail(&ext->oe_link, list);
+ list_move_tail(&ext->oe_link, list);
unplug = true;
} else {
/* the only discarder is lock cancelling, so
ext->oe_max_end <= end, ext);
osc_extent_state_set(ext, OES_LOCKING);
ext->oe_owner = current;
- cfs_list_move_tail(&ext->oe_link,
+ list_move_tail(&ext->oe_link,
&discard_list);
osc_update_pending(obj, OBD_BRW_WRITE,
-ext->oe_nr_pages);
}
osc_object_unlock(obj);
- LASSERT(ergo(!discard, cfs_list_empty(&discard_list)));
- if (!cfs_list_empty(&discard_list)) {
+ LASSERT(ergo(!discard, list_empty(&discard_list)));
+ if (!list_empty(&discard_list)) {
struct osc_extent *tmp;
int rc;
osc_list_maint(osc_cli(obj), obj);
- cfs_list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
- cfs_list_del_init(&ext->oe_link);
+ list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
+ list_del_init(&ext->oe_link);
EASSERT(ext->oe_state == OES_LOCKING, ext);
/* Discard caching pages. We don't actually write this
struct osc_page *ops, void *cbdata)
{
struct osc_thread_info *info = osc_env_info(env);
- struct cl_lock *lock = cbdata;
+ struct osc_object *osc = cbdata;
pgoff_t index;
index = osc_index(ops);
if (index >= info->oti_fn_index) {
- struct cl_lock *tmp;
+ struct ldlm_lock *tmp;
struct cl_page *page = ops->ops_cl.cpl_page;
/* refresh non-overlapped index */
- tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
+ tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
if (tmp != NULL) {
+ __u64 end = tmp->l_policy_data.l_extent.end;
/* Cache the first-non-overlapped index so as to skip
- * all pages within [index, oti_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages. */
- info->oti_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
+ * all pages within [index, oti_fn_index). This is safe
+ * because if tmp lock is canceled, it will discard
+ * these pages. */
+ info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
+ if (end == OBD_OBJECT_EOF)
info->oti_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
+ LDLM_LOCK_PUT(tmp);
} else if (cl_page_own(env, io, page) == 0) {
/* discard the page */
cl_page_discard(env, io, page);
struct osc_page *ops, void *cbdata)
{
struct osc_thread_info *info = osc_env_info(env);
- struct cl_lock *lock = cbdata;
struct cl_page *page = ops->ops_cl.cpl_page;
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
-
/* page is top page. */
info->oti_next_index = osc_index(ops) + 1;
if (cl_page_own(env, io, page) == 0) {
* If error happens on any step, the process continues anyway (the reasoning
* behind this being that lock cancellation cannot be delayed indefinitely).
*/
-int osc_lock_discard_pages(const struct lu_env *env, struct osc_lock *ols)
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
{
struct osc_thread_info *info = osc_env_info(env);
struct cl_io *io = &info->oti_io;
- struct cl_object *osc = ols->ols_cl.cls_obj;
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_lock_descr *descr = &lock->cll_descr;
osc_page_gang_cbt cb;
int res;
int result;
ENTRY;
- io->ci_obj = cl_object_top(osc);
+ io->ci_obj = cl_object_top(osc2cl(osc));
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (result != 0)
GOTO(out, result);
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->oti_fn_index = info->oti_next_index = descr->cld_start;
+ cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->oti_fn_index = info->oti_next_index = start;
do {
- res = osc_page_gang_lookup(env, io, cl2osc(osc),
- info->oti_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->oti_next_index > descr->cld_end)
+ res = osc_page_gang_lookup(env, io, osc,
+ info->oti_next_index, end, cb, osc);
+ if (info->oti_next_index > end)
break;
if (res == CLP_GANG_RESCHED)