void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
/**
- * Data structures for LRU management on lustre client mount
+ * Data structure managing a client's cached clean pages. An LRU of
+ * pages is maintained, along with other statistics.
*/
-struct cl_client_lru {
- cfs_atomic_t ccl_users; /* how many users(OSCs) of this data */
- cfs_atomic_t ccl_page_left;
- unsigned long ccl_page_max;
- cfs_list_t ccl_list; /* entities for lru - actually osc list */
- cfs_spinlock_t ccl_lock; /* lock for list */
- unsigned int ccl_reclaim_count; /* statistics */
+struct cl_client_cache {
+ cfs_atomic_t ccc_users; /* # of users (OSCs) of this data */
+ cfs_list_t ccc_lru; /* LRU list of cached clean pages */
+ cfs_spinlock_t ccc_lru_lock; /* lock for list */
+ cfs_atomic_t ccc_lru_left; /* # of LRU entries available */
+ unsigned long ccc_lru_max; /* Max # of LRU entries possible */
+ unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
};
#endif /*LCLIENT_H */
struct obd_histogram cl_write_offset_hist;
/* lru for osc caching pages */
- struct cl_client_lru *cl_lru;
- cfs_list_t cl_lru_osc; /* member of cl_lru->ccl_list */
+ struct cl_client_cache *cl_cache;
+ cfs_list_t cl_lru_osc; /* member of cl_cache->ccc_lru */
cfs_atomic_t *cl_lru_left;
cfs_atomic_t cl_lru_busy;
cfs_atomic_t cl_lru_shrinkers;
cfs_proc_dir_entry_t *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
- /* cached LRU data from upper layer */
- void *lov_lru;
+ /* Cached LRU pages from upper layer */
+ void *lov_cache;
};
struct lmv_tgt_desc {
#define KEY_CONNECT_FLAG "connect_flags"
#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
-#define KEY_LRU_SET "lru_set"
-#define KEY_LRU_SHRINK "lru_shrink"
+#define KEY_CACHE_SET "cache_set"
+#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
struct lu_context;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
- struct cl_client_lru ll_lru;
+ struct cl_client_cache ll_cache;
struct lprocfs_stats *ll_ra_stats;
}
/* initialize lru data */
- cfs_atomic_set(&sbi->ll_lru.ccl_users, 0);
- sbi->ll_lru.ccl_page_max = lru_page_max;
- cfs_atomic_set(&sbi->ll_lru.ccl_page_left, lru_page_max);
- cfs_spin_lock_init(&sbi->ll_lru.ccl_lock);
- CFS_INIT_LIST_HEAD(&sbi->ll_lru.ccl_list);
+ cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
+ sbi->ll_cache.ccc_lru_max = lru_page_max;
+ cfs_atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
+ cfs_spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
+ CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
NULL);
cl_sb_init(sb);
- err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_LRU_SET),
- KEY_LRU_SET, sizeof(sbi->ll_lru),
- &sbi->ll_lru, NULL);
+ err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
+ KEY_CACHE_SET, sizeof(sbi->ll_cache),
+ &sbi->ll_cache, NULL);
sb->s_root = d_alloc_root(root);
#ifdef HAVE_DCACHE_LOCK
static int ll_rd_max_cached_mb(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_lru *lru = &sbi->ll_lru;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct cl_client_cache *cache = &sbi->ll_cache;
int shift = 20 - CFS_PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
*eof = 1;
- max_cached_mb = lru->ccl_page_max >> shift;
- unused_mb = cfs_atomic_read(&lru->ccl_page_left) >> shift;
+ max_cached_mb = cache->ccc_lru_max >> shift;
+ unused_mb = cfs_atomic_read(&cache->ccc_lru_left) >> shift;
return snprintf(page, count,
"users: %d\n"
"max_cached_mb: %d\n"
"used_mb: %d\n"
"unused_mb: %d\n"
"reclaim_count: %u\n",
- cfs_atomic_read(&lru->ccl_users),
+ cfs_atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
- lru->ccl_reclaim_count);
+ cache->ccc_lru_shrinkers);
}
static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
{
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_lru *lru = &sbi->ll_lru;
+ struct cl_client_cache *cache = &sbi->ll_cache;
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
RETURN(-ENODEV);
cfs_spin_lock(&sbi->ll_lock);
- diff = pages_number - lru->ccl_page_max;
+ diff = pages_number - cache->ccc_lru_max;
cfs_spin_unlock(&sbi->ll_lock);
/* easy - add more LRU slots. */
if (diff >= 0) {
- cfs_atomic_add(diff, &lru->ccl_page_left);
+ cfs_atomic_add(diff, &cache->ccc_lru_left);
GOTO(out, rc = 0);
}
do {
int ov, nv;
- ov = cfs_atomic_read(&lru->ccl_page_left);
+ ov = cfs_atomic_read(&cache->ccc_lru_left);
if (ov == 0)
break;
nv = ov > diff ? ov - diff : 0;
- rc = cfs_atomic_cmpxchg(&lru->ccl_page_left, ov, nv);
+ rc = cfs_atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
if (likely(ov == rc)) {
diff -= ov - nv;
nrpages += ov - nv;
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_LRU_SHRINK), KEY_LRU_SHRINK,
+ sizeof(KEY_CACHE_LRU_SHRINK),
+ KEY_CACHE_LRU_SHRINK,
sizeof(tmp), &tmp, NULL);
if (rc < 0)
break;
out:
if (rc >= 0) {
cfs_spin_lock(&sbi->ll_lock);
- lru->ccl_page_max = pages_number;
+ cache->ccc_lru_max = pages_number;
cfs_spin_unlock(&sbi->ll_lock);
rc = count;
} else {
- cfs_atomic_add(nrpages, &lru->ccl_page_left);
+ cfs_atomic_add(nrpages, &cache->ccc_lru_left);
}
return rc;
}
if (!tgt->ltd_exp)
GOTO(out, rc = 0);
- if (lov->lov_lru != NULL) {
+ if (lov->lov_cache != NULL) {
rc = obd_set_info_async(NULL, tgt->ltd_exp,
- sizeof(KEY_LRU_SET), KEY_LRU_SET,
- sizeof(struct cl_client_lru), lov->lov_lru,
+ sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
+ sizeof(struct cl_client_cache), lov->lov_cache,
NULL);
if (rc < 0)
GOTO(out, rc);
mds_con = 1;
} else if (KEY_IS(KEY_CAPA_KEY)) {
capa = 1;
- } else if (KEY_IS(KEY_LRU_SET)) {
- LASSERT(lov->lov_lru == NULL);
- lov->lov_lru = val;
+ } else if (KEY_IS(KEY_CACHE_SET)) {
+ LASSERT(lov->lov_cache == NULL);
+ lov->lov_cache = val;
do_inactive = 1;
}
* Return how many LRU pages should be freed. */
static int osc_cache_too_much(struct client_obd *cli)
{
- struct cl_client_lru *lru = cli->cl_lru;
+ struct cl_client_cache *cache = cli->cl_cache;
int pages = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
if (cfs_atomic_read(&osc_lru_waiters) > 0 &&
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (cfs_atomic_read(cli->cl_lru_left) < lru->ccl_page_max >> 4) {
- unsigned long budget;
+ if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ unsigned long tmp;
- budget = lru->ccl_page_max / cfs_atomic_read(&lru->ccl_users);
- if (pages > budget)
+ tmp = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
+ if (pages > tmp)
return min(pages, lru_shrink_max);
return pages > lru_shrink_min ? lru_shrink_min : 0;
static int osc_lru_reclaim(struct client_obd *cli)
{
- struct cl_client_lru *lru = cli->cl_lru;
+ struct cl_client_cache *cache = cli->cl_cache;
struct client_obd *victim;
struct client_obd *tmp;
int rc;
- LASSERT(lru != NULL);
- LASSERT(!cfs_list_empty(&lru->ccl_list));
+ LASSERT(cache != NULL);
+ LASSERT(!cfs_list_empty(&cache->ccc_lru));
rc = osc_lru_shrink(cli, lru_shrink_min);
if (rc > 0) {
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
- cfs_spin_lock(&lru->ccl_lock);
- lru->ccl_reclaim_count++;
- cfs_list_move_tail(&cli->cl_lru_osc, &lru->ccl_list);
- cfs_list_for_each_entry_safe(victim, tmp, &lru->ccl_list, cl_lru_osc) {
+ cfs_spin_lock(&cache->ccc_lru_lock);
+ cache->ccc_lru_shrinkers++;
+ cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
if (victim == cli)
break;
cfs_atomic_read(&victim->cl_lru_in_list),
cfs_atomic_read(&victim->cl_lru_busy));
- cfs_list_move_tail(&victim->cl_lru_osc, &lru->ccl_list);
+ cfs_list_move_tail(&victim->cl_lru_osc, &cache->ccc_lru);
if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
break;
}
- cfs_spin_unlock(&lru->ccl_lock);
+ cfs_spin_unlock(&cache->ccc_lru_lock);
if (victim == cli) {
CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
cli->cl_import->imp_obd->obd_name);
int rc = 0;
ENTRY;
- if (cli->cl_lru == NULL) /* shall not be in LRU */
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
RETURN(0);
LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SET)) {
+ if (KEY_IS(KEY_CACHE_SET)) {
struct client_obd *cli = &obd->u.cli;
- LASSERT(cli->cl_lru == NULL); /* only once */
- cli->cl_lru = (struct cl_client_lru *)val;
- cfs_atomic_inc(&cli->cl_lru->ccl_users);
- cli->cl_lru_left = &cli->cl_lru->ccl_page_left;
+ LASSERT(cli->cl_cache == NULL); /* only once */
+ cli->cl_cache = (struct cl_client_cache *)val;
+ cfs_atomic_inc(&cli->cl_cache->ccc_users);
+ cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
LASSERT(cfs_list_empty(&cli->cl_lru_osc));
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
- cfs_list_add(&cli->cl_lru_osc, &cli->cl_lru->ccl_list);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
+ cfs_list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
+ cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
RETURN(0);
}
- if (KEY_IS(KEY_LRU_SHRINK)) {
+ if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
ENTRY;
/* lru cleanup */
- if (cli->cl_lru != NULL) {
- LASSERT(cfs_atomic_read(&cli->cl_lru->ccl_users) > 0);
- cfs_spin_lock(&cli->cl_lru->ccl_lock);
+ if (cli->cl_cache != NULL) {
+ LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
+ cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_del_init(&cli->cl_lru_osc);
- cfs_spin_unlock(&cli->cl_lru->ccl_lock);
+ cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- cfs_atomic_dec(&cli->cl_lru->ccl_users);
- cli->cl_lru = NULL;
+ cfs_atomic_dec(&cli->cl_cache->ccc_users);
+ cli->cl_cache = NULL;
}
/* free memory of osc quota cache */