X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=e04654c962c082d5bb41f5b7e4b5ef89eeffd528;hb=522c1eb4d2f5faf1fa87be07d9617df1439fc0d6;hp=a51dc417b1efc249a2ef19a8039611d205a11dd7;hpb=d8278c699434fd7975609e121a1a75820595a601;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index a51dc41..e04654c 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2014, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -69,6 +69,7 @@ enum { #define LU_SITE_BITS_MIN 12 #define LU_SITE_BITS_MAX 24 +#define LU_SITE_BITS_MAX_CL 19 /** * total 256 buckets, we don't want too many buckets because: * - consume too much memory @@ -86,6 +87,7 @@ CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, "Maximum number of objects in lu_object cache"); static void lu_object_free(const struct lu_env *env, struct lu_object *o); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx); /** * Decrease reference counter on object. If last reference is freed, return @@ -154,6 +156,10 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) LASSERT(list_empty(&top->loh_lru)); list_add_tail(&top->loh_lru, &bkt->lsb_lru); bkt->lsb_lru_len++; + lprocfs_counter_incr(site->ls_stats, LU_SS_LRU_LEN); + CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, " + "lru_len: %ld\n", + o, site->ls_obj_hash, bkt, bkt->lsb_lru_len); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); return; } @@ -202,7 +208,8 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) top = o->lo_header; set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { - struct cfs_hash *obj_hash = o->lo_dev->ld_site->ls_obj_hash; + struct lu_site *site = o->lo_dev->ld_site; + struct cfs_hash *obj_hash = site->ls_obj_hash; struct cfs_hash_bd bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); @@ -212,6 +219,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) list_del_init(&top->loh_lru); bkt = cfs_hash_bd_extra_get(obj_hash, &bd); bkt->lsb_lru_len--; + lprocfs_counter_decr(site->ls_stats, LU_SS_LRU_LEN); } cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(obj_hash, &bd, 1); @@ -350,7 +358,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) struct cfs_hash_bd bd2; struct list_head dispose; int did_sth; - unsigned int start; + unsigned int start = 0; int count; int bnr; unsigned int i; @@ -363,7 +371,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - start = s->ls_purge_start; + if (nr != ~0) + start = s->ls_purge_start; bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; again: /* @@ -389,6 +398,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) &bd2, &h->loh_hash); list_move(&h->loh_lru, &dispose); bkt->lsb_lru_len--; + lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN); if (did_sth == 0) did_sth = 1; @@ -604,6 +614,7 @@ static struct lu_object *htable_lookup(struct lu_site *s, if (!list_empty(&h->loh_lru)) { list_del_init(&h->loh_lru); bkt->lsb_lru_len--; + lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN); } return lu_object_top(h); } @@ -911,6 +922,7 @@ static unsigned long lu_htable_order(struct lu_device *top) { unsigned long cache_size; unsigned long bits; + unsigned long bits_max = LU_SITE_BITS_MAX; /* * For ZFS based OSDs the cache should be disabled by default. This @@ -924,6 +936,9 @@ static unsigned long lu_htable_order(struct lu_device *top) return LU_SITE_BITS_MIN; } + if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0) + bits_max = LU_SITE_BITS_MAX_CL; + /* * Calculate hash table size, assuming that we want reasonable * performance when 20% of total memory is occupied by cache of @@ -954,7 +969,8 @@ static unsigned long lu_htable_order(struct lu_device *top) for (bits = 1; (1 << bits) < cache_size; ++bits) { ; } - return bits; + + return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); } static unsigned lu_obj_hop_hash(struct cfs_hash *hs, @@ -1050,10 +1066,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) memset(s, 0, sizeof *s); mutex_init(&s->ls_purge_mutex); - bits = lu_htable_order(top); snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name); - for (bits = clamp_t(typeof(bits), bits, - LU_SITE_BITS_MIN, LU_SITE_BITS_MAX); + for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) { s->ls_obj_hash = cfs_hash_create(name, bits, bits, bits - LU_SITE_BKT_BITS, @@ -1098,6 +1112,12 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) 0, "cache_death_race", "cache_death_race"); lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, 0, "lru_purged", "lru_purged"); + /* + * Unlike other counters, lru_len can be decremented so + * need lc_sum instead of just lc_count + */ + lprocfs_counter_init(s->ls_stats, LU_SS_LRU_LEN, + LPROCFS_CNTR_AVGMINMAX, "lru_len", "lru_len"); INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; @@ -1364,7 +1384,7 @@ enum { static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static DEFINE_SPINLOCK(lu_keys_guard); +DEFINE_RWLOCK(lu_keys_guard); static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); /** @@ -1389,7 +1409,7 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; @@ -1401,7 +1421,7 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1440,7 +1460,7 @@ void lu_context_key_degister(struct lu_context_key *key) lu_context_key_quiesce(key); ++key_set_version; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key_fini(&lu_shrink_env.le_ctx, key->lct_index); /** @@ -1448,18 +1468,18 @@ void lu_context_key_degister(struct lu_context_key *key) * run lu_context_key::lct_fini() method. */ while (atomic_read(&key->lct_used) > 1) { - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n", key->lct_owner ? key->lct_owner->name : "", key, atomic_read(&key->lct_used)); schedule(); - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); } if (lu_keys[key->lct_index]) { lu_keys[key->lct_index] = NULL; lu_ref_fini(&key->lct_reference); } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n", @@ -1584,7 +1604,7 @@ void lu_context_key_quiesce(struct lu_context_key *key) /* * XXX memory barrier has to go here. */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key->lct_tags |= LCT_QUIESCENT; /** @@ -1592,20 +1612,20 @@ void lu_context_key_quiesce(struct lu_context_key *key) * have completed. */ while (atomic_read(&lu_key_initing_cnt) > 0) { - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\"" " %p, %d (%d)\n", key->lct_owner ? key->lct_owner->name : "", key, atomic_read(&key->lct_used), atomic_read(&lu_key_initing_cnt)); schedule(); - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); } list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); ++key_set_version; } } @@ -1643,9 +1663,9 @@ static int keys_fill(struct lu_context *ctx) * An atomic_t variable is still used, in order not to reacquire the * lock when decrementing the counter. */ - spin_lock(&lu_keys_guard); + read_lock(&lu_keys_guard); atomic_inc(&lu_key_initing_cnt); - spin_unlock(&lu_keys_guard); + read_unlock(&lu_keys_guard); LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { @@ -1714,9 +1734,9 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_state = LCS_INITIALIZED; ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); list_add(&ctx->lc_remember, &lu_context_remembered); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } else { INIT_LIST_HEAD(&ctx->lc_remember); } @@ -1742,10 +1762,10 @@ void lu_context_fini(struct lu_context *ctx) keys_fini(ctx); } else { /* could race with key degister */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); keys_fini(ctx); list_del_init(&ctx->lc_remember); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_fini); @@ -1773,7 +1793,7 @@ void lu_context_exit(struct lu_context *ctx) for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { /* could race with key quiescency */ if (ctx->lc_tags & LCT_REMEMBER) - spin_lock(&lu_keys_guard); + read_lock(&lu_keys_guard); if (ctx->lc_value[i] != NULL) { struct lu_context_key *key; @@ -1784,7 +1804,7 @@ void lu_context_exit(struct lu_context *ctx) key, ctx->lc_value[i]); } if (ctx->lc_tags & LCT_REMEMBER) - spin_unlock(&lu_keys_guard); + read_unlock(&lu_keys_guard); } } } @@ -1812,37 +1832,37 @@ __u32 lu_session_tags_default = 0; void lu_context_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_update); void lu_context_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_clear); void lu_session_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_update); void lu_session_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_clear); @@ -1944,10 +1964,22 @@ static void lu_site_stats_get(struct cfs_hash *hs, } +/* + * lu_cache_shrink_count returns the number of cached objects that are + * candidates to be freed by shrink_slab(). A counter, which tracks + * the number of items in the site's lru, is maintained in the per cpu + * stats of each site. The counter is incremented when an object is added + * to a site's lru and decremented when one is removed. The number of + * free-able objects is the sum of all per cpu counters for all sites. + * + * Using a per cpu counter is a compromise solution to concurrent access: + * lu_object_put() can update the counter without locking the site and + * lu_cache_shrink_count can sum the counters without locking each + * ls_obj_hash bucket. + */ static unsigned long lu_cache_shrink_count(struct shrinker *sk, struct shrink_control *sc) { - lu_site_stats_t stats; struct lu_site *s; struct lu_site *tmp; unsigned long cached = 0; @@ -1957,14 +1989,14 @@ static unsigned long lu_cache_shrink_count(struct shrinker *sk, mutex_lock(&lu_sites_guard); list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 0); - cached += stats.lss_total - stats.lss_busy; + cached += ls_stats_read(s->ls_stats, LU_SS_LRU_LEN); } mutex_unlock(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; - CDEBUG(D_INODE, "%ld objects cached\n", cached); + CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n", + cached, sysctl_vfs_cache_pressure); + return cached; } @@ -2038,11 +2070,10 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); - lu_cache_shrink_scan(shrinker, &scv); + if (scv.nr_to_scan != 0) + lu_cache_shrink_scan(shrinker, &scv); cached = lu_cache_shrink_count(shrinker, &scv); - if (scv.nr_to_scan == 0) - CDEBUG(D_INODE, "%d objects cached\n", cached); return cached; } @@ -2170,12 +2201,19 @@ void lu_global_fini(void) static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) { #ifdef CONFIG_PROC_FS - struct lprocfs_counter ret; + struct lprocfs_counter ret; - lprocfs_stats_collect(stats, idx, &ret); - return (__u32)ret.lc_count; + lprocfs_stats_collect(stats, idx, &ret); + if (idx == LU_SS_LRU_LEN) + /* + * protect against counter on cpu A being decremented + * before counter is incremented on cpu B; unlikely + */ + return (__u32)((ret.lc_sum > 0) ? ret.lc_sum : 0); + else + return (__u32)ret.lc_count; #else - return 0; + return 0; #endif } @@ -2190,7 +2228,7 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) memset(&stats, 0, sizeof(stats)); lu_site_stats_get(s->ls_obj_hash, &stats, 1); - return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n", stats.lss_busy, stats.lss_total, stats.lss_populated, @@ -2201,31 +2239,11 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED), + ls_stats_read(s->ls_stats, LU_SS_LRU_LEN)); } EXPORT_SYMBOL(lu_site_stats_seq_print); -int lu_site_stats_print(const struct lu_site *s, char *page, int count) -{ - lu_site_stats_t stats; - - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 1); - - return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", - stats.lss_busy, - stats.lss_total, - stats.lss_populated, - CFS_HASH_NHLIST(s->ls_obj_hash), - stats.lss_max_search, - ls_stats_read(s->ls_stats, LU_SS_CREATED), - ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), - ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), - ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), - ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); -} - /** * Helper function to initialize a number of kmem slab caches at once. */