X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=e04654c962c082d5bb41f5b7e4b5ef89eeffd528;hb=522c1eb4d2f5faf1fa87be07d9617df1439fc0d6;hp=75e422a9b86d1b0a3d8ea3b8588991082d2799a0;hpb=81f7a3ceea4039f8f81d3aa009079aa27b3fb1b2;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index 75e422a..e04654c 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -69,6 +69,7 @@ enum { #define LU_SITE_BITS_MIN 12 #define LU_SITE_BITS_MAX 24 +#define LU_SITE_BITS_MAX_CL 19 /** * total 256 buckets, we don't want too many buckets because: * - consume too much memory @@ -86,6 +87,7 @@ CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, "Maximum number of objects in lu_object cache"); static void lu_object_free(const struct lu_env *env, struct lu_object *o); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx); /** * Decrease reference counter on object. If last reference is freed, return @@ -98,7 +100,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) struct lu_object_header *top; struct lu_site *site; struct lu_object *orig; - cfs_hash_bd_t bd; + struct cfs_hash_bd bd; const struct lu_fid *fid; top = o->lo_header; @@ -140,8 +142,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) return; } - LASSERT(bkt->lsb_busy > 0); - bkt->lsb_busy--; /* * When last reference is released, iterate over object * layers, and notify them that object is no longer busy. @@ -151,15 +151,21 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) o->lo_ops->loo_object_release(env, o); } - if (!lu_object_is_dying(top)) { + if (!lu_object_is_dying(top) && + (lu_object_exists(orig) || lu_object_is_cl(orig))) { LASSERT(list_empty(&top->loh_lru)); list_add_tail(&top->loh_lru, &bkt->lsb_lru); + bkt->lsb_lru_len++; + lprocfs_counter_incr(site->ls_stats, LU_SS_LRU_LEN); + CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, " + "lru_len: %ld\n", + o, site->ls_obj_hash, bkt, bkt->lsb_lru_len); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); return; } /* - * If object is dying (will not be cached), removed it + * If object is dying (will not be cached) then remove it * from hash table and LRU. * * This is done with hash table and LRU lists locked. As the only @@ -202,11 +208,19 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) top = o->lo_header; set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { - cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; - cfs_hash_bd_t bd; + struct lu_site *site = o->lo_dev->ld_site; + struct cfs_hash *obj_hash = site->ls_obj_hash; + struct cfs_hash_bd bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); - list_del_init(&top->loh_lru); + if (!list_empty(&top->loh_lru)) { + struct lu_site_bkt_data *bkt; + + list_del_init(&top->loh_lru); + bkt = cfs_hash_bd_extra_get(obj_hash, &bd); + bkt->lsb_lru_len--; + lprocfs_counter_decr(site->ls_stats, LU_SS_LRU_LEN); + } cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(obj_hash, &bd, 1); } @@ -340,11 +354,11 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) struct lu_object_header *h; struct lu_object_header *temp; struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; - cfs_hash_bd_t bd2; + struct cfs_hash_bd bd; + struct cfs_hash_bd bd2; struct list_head dispose; int did_sth; - unsigned int start; + unsigned int start = 0; int count; int bnr; unsigned int i; @@ -357,7 +371,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - start = s->ls_purge_start; + if (nr != ~0) + start = s->ls_purge_start; bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; again: /* @@ -382,6 +397,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_del_locked(s->ls_obj_hash, &bd2, &h->loh_hash); list_move(&h->loh_lru, &dispose); + bkt->lsb_lru_len--; + lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN); if (did_sth == 0) did_sth = 1; @@ -461,7 +478,7 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); * Key, holding temporary buffer. This key is registered very early by * lu_global_init(). */ -struct lu_context_key lu_global_key = { +static struct lu_context_key lu_global_key = { .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, .lct_init = lu_global_key_init, @@ -565,10 +582,9 @@ int lu_object_invariant(const struct lu_object *o) } return 1; } -EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - cfs_hash_bd_t *bd, + struct cfs_hash_bd *bd, const struct lu_fid *f, wait_queue_t *waiter, __u64 *version) @@ -595,7 +611,11 @@ static struct lu_object *htable_lookup(struct lu_site *s, if (likely(!lu_object_is_dying(h))) { cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - list_del_init(&h->loh_lru); + if (!list_empty(&h->loh_lru)) { + list_del_init(&h->loh_lru); + bkt->lsb_lru_len--; + lprocfs_counter_decr(s->ls_stats, LU_SS_LRU_LEN); + } return lu_object_top(h); } @@ -606,7 +626,7 @@ static struct lu_object *htable_lookup(struct lu_site *s, */ if (likely(waiter != NULL)) { - init_waitqueue_entry_current(waiter); + init_waitqueue_entry(waiter, current); add_wait_queue(&bkt->lsb_marche_funebre, waiter); set_current_state(TASK_UNINTERRUPTIBLE); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); @@ -615,31 +635,6 @@ static struct lu_object *htable_lookup(struct lu_site *s, return ERR_PTR(-EAGAIN); } -static struct lu_object *htable_lookup_nowait(struct lu_site *s, - cfs_hash_bd_t *bd, - const struct lu_fid *f) -{ - struct hlist_node *hnode; - struct lu_object_header *h; - - /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. */ - hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (hnode == NULL) { - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); - return ERR_PTR(-ENOENT); - } - - h = container_of0(hnode, struct lu_object_header, loh_hash); - if (unlikely(lu_object_is_dying(h))) - return ERR_PTR(-ENOENT); - - cfs_hash_get(s->ls_obj_hash, hnode); - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - list_del_init(&h->loh_lru); - return lu_object_top(h); -} - /** * Search cache for an object with the fid \a f. If such object is found, * return it. Otherwise, create new object, insert it into cache and return @@ -683,9 +678,8 @@ static struct lu_object *lu_object_new(const struct lu_env *env, const struct lu_object_conf *conf) { struct lu_object *o; - cfs_hash_t *hs; - cfs_hash_bd_t bd; - struct lu_site_bkt_data *bkt; + struct cfs_hash *hs; + struct cfs_hash_bd bd; o = lu_object_alloc(env, dev, f, conf); if (unlikely(IS_ERR(o))) @@ -693,9 +687,7 @@ static struct lu_object *lu_object_new(const struct lu_env *env, hs = dev->ld_site->ls_obj_hash; cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); lu_object_limit(env, dev); @@ -715,8 +707,8 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, struct lu_object *o; struct lu_object *shadow; struct lu_site *s; - cfs_hash_t *hs; - cfs_hash_bd_t bd; + struct cfs_hash *hs; + struct cfs_hash_bd bd; __u64 version = 0; /* @@ -764,11 +756,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, shadow = htable_lookup(s, &bd, f, waiter, &version); if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { - struct lu_site_bkt_data *bkt; - - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); lu_object_limit(env, dev); @@ -796,13 +784,10 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, struct lu_object *obj; wait_queue_t wait; - while (1) { - if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) { - obj = lu_object_find_try(env, dev, f, conf, NULL); - - return obj; - } + if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) + return lu_object_find_try(env, dev, f, conf, NULL); + while (1) { obj = lu_object_find_try(env, dev, f, conf, &wait); if (obj != ERR_PTR(-EAGAIN)) return obj; @@ -810,7 +795,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, * lu_object_find_try() already added waiter into the * wait queue. */ - waitq_wait(&wait, TASK_UNINTERRUPTIBLE); + schedule(); bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); remove_wait_queue(&bkt->lsb_marche_funebre, &wait); } @@ -818,30 +803,6 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, EXPORT_SYMBOL(lu_object_find_at); /** - * Try to find the object in cache without waiting for the dead object - * to be released nor allocating object if no cached one was found. - * - * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging. - */ -void lu_object_purge(const struct lu_env *env, struct lu_device *dev, - const struct lu_fid *f) -{ - struct lu_site *s = dev->ld_site; - cfs_hash_t *hs = s->ls_obj_hash; - cfs_hash_bd_t bd; - struct lu_object *o; - - cfs_hash_bd_get_and_lock(hs, f, &bd, 1); - o = htable_lookup_nowait(s, &bd, f); - cfs_hash_bd_unlock(hs, &bd, 1); - if (!IS_ERR(o)) { - set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); - lu_object_put(env, o); - } -} -EXPORT_SYMBOL(lu_object_purge); - -/** * Find object with given fid, and return its slice belonging to given device. */ struct lu_object *lu_object_find_slice(const struct lu_env *env, @@ -918,7 +879,7 @@ struct lu_site_print_arg { }; static int -lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, +lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd, struct hlist_node *hnode, void *data) { struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; @@ -961,6 +922,7 @@ static unsigned long lu_htable_order(struct lu_device *top) { unsigned long cache_size; unsigned long bits; + unsigned long bits_max = LU_SITE_BITS_MAX; /* * For ZFS based OSDs the cache should be disabled by default. This @@ -974,6 +936,9 @@ static unsigned long lu_htable_order(struct lu_device *top) return LU_SITE_BITS_MIN; } + if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0) + bits_max = LU_SITE_BITS_MAX_CL; + /* * Calculate hash table size, assuming that we want reasonable * performance when 20% of total memory is occupied by cache of @@ -1004,10 +969,11 @@ static unsigned long lu_htable_order(struct lu_device *top) for (bits = 1; (1 << bits) < cache_size; ++bits) { ; } - return bits; + + return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); } -static unsigned lu_obj_hop_hash(cfs_hash_t *hs, +static unsigned lu_obj_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask) { struct lu_fid *fid = (struct lu_fid *)key; @@ -1047,27 +1013,20 @@ static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); } -static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) +static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode) { struct lu_object_header *h; h = hlist_entry(hnode, struct lu_object_header, loh_hash); - if (atomic_add_return(1, &h->loh_ref) == 1) { - struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; - - cfs_hash_bd_get(hs, &h->loh_fid, &bd); - bkt = cfs_hash_bd_extra_get(hs, &bd); - bkt->lsb_busy++; - } + atomic_inc(&h->loh_ref); } -static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { LBUG(); /* we should never called it */ } -cfs_hash_ops_t lu_site_hash_ops = { +static struct cfs_hash_ops lu_site_hash_ops = { .hs_hash = lu_obj_hop_hash, .hs_key = lu_obj_hop_key, .hs_keycmp = lu_obj_hop_keycmp, @@ -1099,7 +1058,7 @@ EXPORT_SYMBOL(lu_dev_del_linkage); int lu_site_init(struct lu_site *s, struct lu_device *top) { struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; + struct cfs_hash_bd bd; char name[16]; unsigned long bits; unsigned int i; @@ -1107,10 +1066,8 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) memset(s, 0, sizeof *s); mutex_init(&s->ls_purge_mutex); - bits = lu_htable_order(top); snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name); - for (bits = clamp_t(typeof(bits), bits, - LU_SITE_BITS_MIN, LU_SITE_BITS_MAX); + for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) { s->ls_obj_hash = cfs_hash_create(name, bits, bits, bits - LU_SITE_BKT_BITS, @@ -1155,6 +1112,12 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) 0, "cache_death_race", "cache_death_race"); lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, 0, "lru_purged", "lru_purged"); + /* + * Unlike other counters, lru_len can be decremented so + * need lc_sum instead of just lc_count + */ + lprocfs_counter_init(s->ls_stats, LU_SS_LRU_LEN, + LPROCFS_CNTR_AVGMINMAX, "lru_len", "lru_len"); INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; @@ -1411,7 +1374,6 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) } } } -EXPORT_SYMBOL(lu_stack_fini); enum { /** @@ -1422,7 +1384,8 @@ enum { static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static DEFINE_SPINLOCK(lu_keys_guard); +DEFINE_RWLOCK(lu_keys_guard); +static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); /** * Global counter incremented whenever key is registered, unregistered, @@ -1446,7 +1409,7 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; @@ -1458,7 +1421,7 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1497,13 +1460,26 @@ void lu_context_key_degister(struct lu_context_key *key) lu_context_key_quiesce(key); ++key_set_version; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key_fini(&lu_shrink_env.le_ctx, key->lct_index); + + /** + * Wait until all transient contexts referencing this key have + * run lu_context_key::lct_fini() method. + */ + while (atomic_read(&key->lct_used) > 1) { + write_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n", + key->lct_owner ? key->lct_owner->name : "", key, + atomic_read(&key->lct_used)); + schedule(); + write_lock(&lu_keys_guard); + } if (lu_keys[key->lct_index]) { lu_keys[key->lct_index] = NULL; lu_ref_fini(&key->lct_reference); } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n", @@ -1625,26 +1601,40 @@ void lu_context_key_quiesce(struct lu_context_key *key) * XXX layering violation. */ cl_env_cache_purge(~0); - key->lct_tags |= LCT_QUIESCENT; /* * XXX memory barrier has to go here. */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); + key->lct_tags |= LCT_QUIESCENT; + + /** + * Wait until all lu_context_key::lct_init() methods + * have completed. + */ + while (atomic_read(&lu_key_initing_cnt) > 0) { + write_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\"" + " %p, %d (%d)\n", + key->lct_owner ? key->lct_owner->name : "", + key, atomic_read(&key->lct_used), + atomic_read(&lu_key_initing_cnt)); + schedule(); + write_lock(&lu_keys_guard); + } + list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); ++key_set_version; } } -EXPORT_SYMBOL(lu_context_key_quiesce); void lu_context_key_revive(struct lu_context_key *key) { key->lct_tags &= ~LCT_QUIESCENT; ++key_set_version; } -EXPORT_SYMBOL(lu_context_key_revive); static void keys_fini(struct lu_context *ctx) { @@ -1664,6 +1654,19 @@ static int keys_fill(struct lu_context *ctx) { unsigned int i; + /* + * A serialisation with lu_context_key_quiesce() is needed, but some + * "key->lct_init()" are calling kernel memory allocation routine and + * can't be called while holding a spin_lock. + * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt" + * to ensure the start of the serialisation. + * An atomic_t variable is still used, in order not to reacquire the + * lock when decrementing the counter. + */ + read_lock(&lu_keys_guard); + atomic_inc(&lu_key_initing_cnt); + read_unlock(&lu_keys_guard); + LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -1681,13 +1684,19 @@ static int keys_fill(struct lu_context *ctx) LINVRNT(key->lct_init != NULL); LINVRNT(key->lct_index == i); - value = key->lct_init(ctx, key); - if (unlikely(IS_ERR(value))) - return PTR_ERR(value); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) - try_module_get(key->lct_owner); + if (!(ctx->lc_tags & LCT_NOREF) && + try_module_get(key->lct_owner) == 0) { + /* module is unloading, skip this key */ + continue; + } + + value = key->lct_init(ctx, key); + if (unlikely(IS_ERR(value))) { + atomic_dec(&lu_key_initing_cnt); + return PTR_ERR(value); + } + lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); atomic_inc(&key->lct_used); /* @@ -1701,6 +1710,7 @@ static int keys_fill(struct lu_context *ctx) } ctx->lc_version = key_set_version; } + atomic_dec(&lu_key_initing_cnt); return 0; } @@ -1724,9 +1734,9 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_state = LCS_INITIALIZED; ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); list_add(&ctx->lc_remember, &lu_context_remembered); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } else { INIT_LIST_HEAD(&ctx->lc_remember); } @@ -1752,10 +1762,10 @@ void lu_context_fini(struct lu_context *ctx) keys_fini(ctx); } else { /* could race with key degister */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); keys_fini(ctx); list_del_init(&ctx->lc_remember); - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_fini); @@ -1781,15 +1791,20 @@ void lu_context_exit(struct lu_context *ctx) ctx->lc_state = LCS_LEFT; if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i] != NULL) { - struct lu_context_key *key; - - key = lu_keys[i]; - LASSERT(key != NULL); - if (key->lct_exit != NULL) - key->lct_exit(ctx, - key, ctx->lc_value[i]); - } + /* could race with key quiescency */ + if (ctx->lc_tags & LCT_REMEMBER) + read_lock(&lu_keys_guard); + if (ctx->lc_value[i] != NULL) { + struct lu_context_key *key; + + key = lu_keys[i]; + LASSERT(key != NULL); + if (key->lct_exit != NULL) + key->lct_exit(ctx, + key, ctx->lc_value[i]); + } + if (ctx->lc_tags & LCT_REMEMBER) + read_unlock(&lu_keys_guard); } } } @@ -1804,7 +1819,6 @@ int lu_context_refill(struct lu_context *ctx) { return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); } -EXPORT_SYMBOL(lu_context_refill); /** * lu_ctx_tags/lu_ses_tags will be updated if there are new types of @@ -1818,37 +1832,37 @@ __u32 lu_session_tags_default = 0; void lu_context_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_update); void lu_context_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_clear); void lu_session_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_update); void lu_session_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_clear); @@ -1920,10 +1934,10 @@ typedef struct lu_site_stats{ unsigned lss_busy; } lu_site_stats_t; -static void lu_site_stats_get(cfs_hash_t *hs, +static void lu_site_stats_get(struct cfs_hash *hs, lu_site_stats_t *stats, int populated) { - cfs_hash_bd_t bd; + struct cfs_hash_bd bd; unsigned int i; cfs_hash_for_each_bucket(hs, &bd, i) { @@ -1931,7 +1945,8 @@ static void lu_site_stats_get(cfs_hash_t *hs, struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, 1); - stats->lss_busy += bkt->lsb_busy; + stats->lss_busy += + cfs_hash_bd_count_get(&bd) - bkt->lsb_lru_len; stats->lss_total += cfs_hash_bd_count_get(&bd); stats->lss_max_search = max((int)stats->lss_max_search, cfs_hash_bd_depmax_get(&bd)); @@ -1949,10 +1964,22 @@ static void lu_site_stats_get(cfs_hash_t *hs, } +/* + * lu_cache_shrink_count returns the number of cached objects that are + * candidates to be freed by shrink_slab(). A counter, which tracks + * the number of items in the site's lru, is maintained in the per cpu + * stats of each site. The counter is incremented when an object is added + * to a site's lru and decremented when one is removed. The number of + * free-able objects is the sum of all per cpu counters for all sites. + * + * Using a per cpu counter is a compromise solution to concurrent access: + * lu_object_put() can update the counter without locking the site and + * lu_cache_shrink_count can sum the counters without locking each + * ls_obj_hash bucket. + */ static unsigned long lu_cache_shrink_count(struct shrinker *sk, struct shrink_control *sc) { - lu_site_stats_t stats; struct lu_site *s; struct lu_site *tmp; unsigned long cached = 0; @@ -1962,14 +1989,14 @@ static unsigned long lu_cache_shrink_count(struct shrinker *sk, mutex_lock(&lu_sites_guard); list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 0); - cached += stats.lss_total - stats.lss_busy; + cached += ls_stats_read(s->ls_stats, LU_SS_LRU_LEN); } mutex_unlock(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; - CDEBUG(D_INODE, "%ld objects cached\n", cached); + CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n", + cached, sysctl_vfs_cache_pressure); + return cached; } @@ -2026,7 +2053,7 @@ static unsigned long lu_cache_shrink_scan(struct shrinker *sk, * is safe to take the lu_sites_guard lock. * * Ideally we should accurately return the remaining number of cached - * objects without taking the lu_sites_guard lock, but this is not + * objects without taking the lu_sites_guard lock, but this is not * possible in the current implementation. */ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) @@ -2043,11 +2070,10 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); - lu_cache_shrink_scan(shrinker, &scv); + if (scv.nr_to_scan != 0) + lu_cache_shrink_scan(shrinker, &scv); cached = lu_cache_shrink_count(shrinker, &scv); - if (scv.nr_to_scan == 0) - CDEBUG(D_INODE, "%d objects cached\n", cached); return cached; } @@ -2061,13 +2087,13 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) /** * Environment to be used in debugger, contains all tags. */ -struct lu_env lu_debugging_env; +static struct lu_env lu_debugging_env; /** * Debugging printer function using printk(). */ int lu_printk_printer(const struct lu_env *env, - void *unused, const char *format, ...) + void *unused, const char *format, ...) { va_list args; @@ -2101,7 +2127,6 @@ void lu_context_keys_dump(void) } } } -EXPORT_SYMBOL(lu_context_keys_dump); /** * Initialization of global lu_* data. @@ -2175,13 +2200,20 @@ void lu_global_fini(void) static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) { -#ifdef LPROCFS - struct lprocfs_counter ret; +#ifdef CONFIG_PROC_FS + struct lprocfs_counter ret; - lprocfs_stats_collect(stats, idx, &ret); - return (__u32)ret.lc_count; + lprocfs_stats_collect(stats, idx, &ret); + if (idx == LU_SS_LRU_LEN) + /* + * protect against counter on cpu A being decremented + * before counter is incremented on cpu B; unlikely + */ + return (__u32)((ret.lc_sum > 0) ? ret.lc_sum : 0); + else + return (__u32)ret.lc_count; #else - return 0; + return 0; #endif } @@ -2196,7 +2228,7 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) memset(&stats, 0, sizeof(stats)); lu_site_stats_get(s->ls_obj_hash, &stats, 1); - return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n", stats.lss_busy, stats.lss_total, stats.lss_populated, @@ -2207,32 +2239,11 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED), + ls_stats_read(s->ls_stats, LU_SS_LRU_LEN)); } EXPORT_SYMBOL(lu_site_stats_seq_print); -int lu_site_stats_print(const struct lu_site *s, char *page, int count) -{ - lu_site_stats_t stats; - - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 1); - - return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", - stats.lss_busy, - stats.lss_total, - stats.lss_populated, - CFS_HASH_NHLIST(s->ls_obj_hash), - stats.lss_max_search, - ls_stats_read(s->ls_stats, LU_SS_CREATED), - ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), - ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), - ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), - ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); -} -EXPORT_SYMBOL(lu_site_stats_print); - /** * Helper function to initialize a number of kmem slab caches at once. */ @@ -2280,11 +2291,10 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, { struct lu_site *s = o->lo_dev->ld_site; struct lu_fid *old = &o->lo_header->loh_fid; - struct lu_site_bkt_data *bkt; struct lu_object *shadow; wait_queue_t waiter; - cfs_hash_t *hs; - cfs_hash_bd_t bd; + struct cfs_hash *hs; + struct cfs_hash_bd bd; __u64 version = 0; LASSERT(fid_is_zero(old)); @@ -2295,9 +2305,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, /* supposed to be unique */ LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); *old = *fid; - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); } EXPORT_SYMBOL(lu_object_assign_fid); @@ -2396,5 +2404,3 @@ int lu_buf_check_and_grow(struct lu_buf *buf, size_t len) buf->lb_len = len; return 0; } -EXPORT_SYMBOL(lu_buf_check_and_grow); -