struct obd_histogram cl_read_offset_hist;
struct obd_histogram cl_write_offset_hist;
- /* lru for osc caching pages */
- struct cl_client_cache *cl_cache;
- struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
- atomic_long_t *cl_lru_left;
- atomic_long_t cl_lru_busy;
- atomic_long_t cl_lru_in_list;
- atomic_long_t cl_unstable_count;
- struct list_head cl_lru_list; /* lru page list */
- spinlock_t cl_lru_list_lock; /* page list protector */
- atomic_t cl_lru_shrinkers;
+ /** LRU for osc caching pages */
+ struct cl_client_cache *cl_cache;
+ /** member of cl_cache->ccc_lru */
+ struct list_head cl_lru_osc;
+ /** # of available LRU slots left in the per-OSC cache.
+ * Available LRU slots are shared by all OSCs of the same file system,
+ * therefore this is a pointer to cl_client_cache::ccc_lru_left. */
+ atomic_long_t *cl_lru_left;
+ /** # of busy LRU pages. A page is considered busy if it's in writeback
+ * queue, or in transfer. Busy pages can't be discarded so they are not
+ * in LRU cache. */
+ atomic_long_t cl_lru_busy;
+ /** # of LRU pages in the cache for this client_obd */
+ atomic_long_t cl_lru_in_list;
+ /** # of threads are shrinking LRU cache. To avoid contention, it's not
+ * allowed to have multiple threads shrinking LRU cache. */
+ atomic_t cl_lru_shrinkers;
+ /** The time when this LRU cache was last used. */
+ time_t cl_lru_last_used;
+ /** stats: how many reclaims have happened for this client_obd.
+ * reclaim and shrink - shrink is async, voluntarily rebalancing;
+ * reclaim is sync, initiated by IO thread when the LRU slots are
+ * in shortage. */
+ __u64 cl_lru_reclaim;
+ /** List of LRU pages for this client_obd */
+ struct list_head cl_lru_list;
+ /** Lock for LRU page list */
+ spinlock_t cl_lru_list_lock;
+ /** # of unstable pages in this client_obd.
+ * An unstable page is a page state that WRITE RPC has finished but
+ * the transaction has NOT yet committed. */
+ atomic_long_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
rc = seq_printf(m,
"used_mb: %ld\n"
- "busy_cnt: %ld\n",
+ "busy_cnt: %ld\n"
+ "reclaim: "LPU64"\n",
(atomic_long_read(&cli->cl_lru_in_list) +
- atomic_long_read(&cli->cl_lru_busy)) >> shift,
- atomic_long_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_busy)) >> shift,
+ atomic_long_read(&cli->cl_lru_busy),
+ cli->cl_lru_reclaim);
return rc;
}
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ cli_name(osc_cli(obj)), state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
{
struct osc_page *opg = oap2osc_page(oap);
struct cl_page *page = oap2cl_page(oap);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu " \
"dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
"lru {in list: %ld, left: %ld, waiters: %d }"fmt"\n", \
- __tmp->cl_import->imp_obd->obd_name, \
+ cli_name(__tmp), \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
osc_io_unplug_async(env, cli, NULL);
CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
+ cli_name(cli), &ocw, oap);
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
default:
CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived "
"due to %d, fall back to sync i/o\n",
- cli->cl_import->imp_obd->obd_name, &ocw, rc);
+ cli_name(cli), &ocw, rc);
break;
}
EXIT;
return 0;
if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859 */
- atomic_inc(&cli->cl_lru_shrinkers);
spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
spin_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work != NULL);
struct osc_object *obj, struct osc_page *ops)
{
struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
int rc = 0;
ENTRY;
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
oap, ops, osc_index(oap2osc(oap)));
- osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
+ struct osc_extent *ext = NULL;
+
+ osc_object_lock(obj);
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details. */
osc_index(oap2osc(oap)));
rc = -EBUSY;
}
+ if (ext != NULL)
+ osc_extent_put(env, ext);
}
- osc_object_unlock(obj);
- if (ext != NULL)
- osc_extent_put(env, ext);
RETURN(rc);
}
/** Serialization object for osc_object::oo_debug_io. */
struct mutex oo_debug_mutex;
#endif
- /**
- * List of pages in transfer.
- */
- struct list_head oo_inflight[CRT_NR];
- /**
- * Lock, protecting osc_page::ops_inflight, because a seat-belt is
- * locked during take-off and landing.
- */
- spinlock_t oo_seatbelt;
-
/**
* used by the osc to keep track of what objects to build into rpcs.
* Protected by client_obd->cli_loi_list_lock.
*/
struct list_head ops_lru;
/**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- /**
- * Thread that submitted this page for transfer. For debugging.
- */
- struct task_struct *ops_submitter;
- /**
* Submit time - the time when the page is starting RPC. For debugging.
*/
cfs_time_t ops_submit_time;
struct list_head *ext_list, int cmd);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
-long osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
extern spinlock_t osc_ast_guard;
extern struct lu_kmem_descr osc_caches[];
return cli->cl_r_in_flight + cli->cl_w_in_flight;
}
+static inline char *cli_name(struct client_obd *cli)
+{
+ return cli->cl_import->imp_obd->obd_name;
+}
+
#ifndef min_t
#define min_t(type,x,y) \
({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
struct osc_object *osc = cl2osc(ios->cis_obj);
struct client_obd *cli = osc_cli(osc);
unsigned long c;
- unsigned long npages;
unsigned long max_pages;
+ unsigned long npages;
ENTRY;
if (cl_io_is_append(io))
npages = max_pages;
c = atomic_long_read(cli->cl_lru_left);
- if (c < npages && osc_lru_reclaim(cli) > 0)
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
c = atomic_long_read(cli->cl_lru_left);
while (c >= npages) {
if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
}
c = atomic_long_read(cli->cl_lru_left);
}
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /* If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly. */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
RETURN(0);
}
{
struct osc_object *osc = lu2osc(obj);
const struct cl_object_conf *cconf = lu2cl_conf(conf);
- int i;
osc->oo_oinfo = cconf->u.coc_oinfo;
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
mutex_init(&osc->oo_debug_mutex);
#endif
- spin_lock_init(&osc->oo_seatbelt);
- for (i = 0; i < CRT_NR; ++i)
- INIT_LIST_HEAD(&osc->oo_inflight[i]);
-
INIT_LIST_HEAD(&osc->oo_ready_item);
INIT_LIST_HEAD(&osc->oo_hp_ready_item);
INIT_LIST_HEAD(&osc->oo_write_item);
static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct osc_object *osc = lu2osc(obj);
- int i;
-
- for (i = 0; i < CRT_NR; ++i)
- LASSERT(list_empty(&osc->oo_inflight[i]));
LASSERT(list_empty(&osc->oo_ready_item));
LASSERT(list_empty(&osc->oo_hp_ready_item));
/* ops_lru and ops_inflight share the same field, so take it from LRU
* first and then use it as inflight. */
osc_lru_use(osc_cli(obj), opg);
-
- spin_lock(&obj->oo_seatbelt);
- list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = current;
- spin_unlock(&obj->oo_seatbelt);
}
int osc_page_cache_add(const struct lu_env *env,
return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p %lu: "
"1< %#x %d %u %s %s > "
"2< "LPD64" %u %u %#x %#x | %p %p %p > "
- "3< %s %p %d %lu %d > "
+ "3< %d %lu %d > "
"4< %d %d %d %lu %s | %s %s %s %s > "
"5< %s %s %s %s | %d %s | %d %s %s>\n",
opg, osc_index(opg),
oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
oap->oap_async_flags, oap->oap_brw_flags,
oap->oap_request, oap->oap_cli, obj,
- /* 3 */
- osc_list(&opg->ops_inflight),
- opg->ops_submitter, opg->ops_transfer_pinned,
- osc_submit_duration(opg), opg->ops_srvlock,
+ /* 3 */
+ opg->ops_transfer_pinned,
+ osc_submit_duration(opg), opg->ops_srvlock,
/* 4 */
cli->cl_r_in_flight, cli->cl_w_in_flight,
cli->cl_max_rpcs_in_flight,
LASSERT(0);
}
- spin_lock(&obj->oo_seatbelt);
- if (opg->ops_submitter != NULL) {
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- }
- spin_unlock(&obj->oo_seatbelt);
-
osc_lru_del(osc_cli(obj), opg);
if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
#endif
- /* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-) */
- INIT_LIST_HEAD(&opg->ops_inflight);
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
*/
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
-/* free this number at most otherwise it will take too long time to finsih. */
-static const int lru_shrink_max = 8 << (20 - PAGE_CACHE_SHIFT); /* 8M */
-/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+/**
+ * LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU slots.
+ */
+static inline int lru_shrink_min(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * 2;
+}
+
+/**
+ * free this number at most otherwise it will take too long time to finsih.
+ */
+static inline int lru_shrink_max(struct client_obd *cli)
+{
+ return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+}
+
+/**
+ * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
* step to maintain fairness among OSCs.
*
- * Return how many LRU pages should be freed. */
+ * Return how many LRU pages should be freed.
+ */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
if (pages >= budget)
- return lru_shrink_max;
+ return lru_shrink_max(cli);
else if (pages >= budget / 2)
- return lru_shrink_min;
-#if 0
- } else if (pages >= budget * 2)
- return lru_shrink_min;
-#endif
+ return lru_shrink_min(cli);
+ } else {
+ int duration = cfs_time_current_sec() - cli->cl_lru_last_used;
+
+ /* knock out pages by duration of no IO activity */
+ duration >>= 6; /* approximately 1 minute */
+ if (duration > 0 && pages >= budget / duration)
+ return lru_shrink_min(cli);
}
return 0;
}
int lru_queue_work(const struct lu_env *env, void *data)
{
struct client_obd *cli = data;
+ int count;
- CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+ CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
+ count = osc_cache_too_much(cli);
+ if (count > 0) {
+ int rc = osc_lru_shrink(env, cli, count, false);
- if (osc_cache_too_much(cli))
- osc_lru_shrink(env, cli, lru_shrink_max, true);
+ CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
+ cli_name(cli), rc, count);
+ if (rc >= count) {
+ CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
+ ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+ }
RETURN(0);
}
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
+ cli->cl_lru_last_used = cfs_time_current_sec();
spin_unlock(&cli->cl_lru_list_lock);
- /* XXX: May set force to be true for better performance */
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
}
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them. */
- if (!memory_pressure_get())
+ if (osc_cache_too_much(cli)) {
+ CDEBUG(D_CACHE, "%s: queue LRU workn", cli_name(cli));
(void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
wake_up(&osc_lru_waitq);
} else {
LASSERT(list_empty(&opg->ops_lru));
struct cl_page *page = pvec[i];
LASSERT(cl_page_is_owned(page, io));
+ cl_page_delete(env, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
cl_page_put(env, page);
if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
RETURN(0);
+ CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
+ cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
if (!force) {
if (atomic_read(&cli->cl_lru_shrinkers) > 0)
RETURN(-EBUSY);
io = &osc_env_info(env)->oti_io;
spin_lock(&cli->cl_lru_list_lock);
+ if (force)
+ cli->cl_lru_reclaim++;
maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
+ if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
+ break;
+
if (--maxscan < 0)
break;
RETURN(count > 0 ? count : rc);
}
-long osc_lru_reclaim(struct client_obd *cli)
+/**
+ * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
+ * \@npages of LRU slots. For performance consideration, it's better to drop
+ * LRU pages in batch. Therefore, the actual number is adjusted at least
+ * max_pages_per_rpc.
+ */
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
{
struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
- long rc = 0;
int max_scans;
+ long rc = 0;
ENTRY;
LASSERT(cache != NULL);
if (IS_ERR(env))
RETURN(rc);
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
- if (rc != 0) {
- if (rc == -EBUSY)
- rc = 0;
-
- CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
+ npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
+ CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
+ cli_name(cli), npages);
+ rc = osc_lru_shrink(env, cli, npages, true);
+ if (rc >= npages) {
+ CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
+ cli_name(cli), rc, npages);
+ if (osc_cache_too_much(cli) > 0)
+ ptlrpcd_queue_work(cli->cl_lru_work);
GOTO(out, rc);
+ } else if (rc > 0) {
+ npages -= rc;
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_long_read(&cli->cl_lru_in_list),
- atomic_long_read(&cli->cl_lru_busy));
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
+ cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy), npages);
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
- cli->cl_import->imp_obd->obd_name, cli,
+ cli_name(cli), cli,
atomic_long_read(&cli->cl_lru_in_list),
atomic_long_read(&cli->cl_lru_busy));
if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
- true);
+ rc = osc_lru_shrink(env, cli, npages, true);
spin_lock(&cache->ccc_lru_lock);
- if (rc != 0)
+ if (rc >= npages)
break;
+ if (rc > 0)
+ npages -= rc;
}
}
spin_unlock(&cache->ccc_lru_lock);
out:
cl_env_nested_put(&nest, env);
CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
+ cli_name(cli), cli, rc);
return rc;
}
LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
-
/* run out of LRU spaces, try to drop some by itself */
- rc = osc_lru_reclaim(cli);
+ rc = osc_lru_reclaim(cli, 1);
if (rc < 0)
break;
if (rc > 0)
if (unstable_count == 0)
wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
- if (osc_cache_too_much(cli))
+ if (waitqueue_active(&osc_lru_waitq))
(void)ptlrpcd_queue_work(cli->cl_lru_work);
}
CDEBUG(D_CACHE,
"%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
- cli->cl_import->imp_obd->obd_name, cli,
- unstable_nr, osc_unstable_count);
+ cli_name(cli), cli, unstable_nr, osc_unstable_count);
/* If the LRU slots are in shortage - 25% remaining AND this OSC
* has one full RPC window of unstable pages, it's a good chance
}
CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], rc);
} else {
OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
- cli->cl_import->imp_obd->obd_name,
+ cli_name(cli),
type == USRQUOTA ? "user" : "group",
qid[type], oqi);
}
oa->o_undirty = 0;
} else if (unlikely(atomic_long_read(&obd_dirty_pages) -
atomic_long_read(&obd_dirty_transit_pages) >
- (obd_max_dirty_pages + 1))) {
+ (long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1). */
- CERROR("%s: dirty %ld - %ld > system dirty_max %lu\n",
- cli->cl_import->imp_obd->obd_name,
- atomic_long_read(&obd_dirty_pages),
+ CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
+ cli_name(cli), atomic_long_read(&obd_dirty_pages),
atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
static int osc_add_shrink_grant(struct client_obd *client)
{
- int rc;
+ int rc;
- rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
- TIMEOUT_GRANT,
- osc_grant_shrink_grant_cb, NULL,
- &client->cl_grant_shrink_list);
- if (rc) {
- CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
- return rc;
- }
- CDEBUG(D_CACHE, "add grant client %s \n",
- client->cl_import->imp_obd->obd_name);
- osc_update_next_shrink(client);
- return 0;
+ rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
+ TIMEOUT_GRANT,
+ osc_grant_shrink_grant_cb, NULL,
+ &client->cl_grant_shrink_list);
+ if (rc) {
+ CERROR("add grant client %s error %d\n", cli_name(client), rc);
+ return rc;
+ }
+ CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
+ osc_update_next_shrink(client);
+ return 0;
}
static int osc_del_shrink_grant(struct client_obd *client)
if (cli->cl_avail_grant < 0) {
CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
- cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
+ cli_name(cli), cli->cl_avail_grant,
ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
/* workaround for servers which do not have the patch from
* LU-2679 */
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+ "chunk bits: %d.\n", cli_name(cli), cli->cl_avail_grant,
+ cli->cl_lost_grant, cli->cl_chunkbits);
if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
list_empty(&cli->cl_grant_shrink_list))