X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=cc890293501711ada8716f996aa10bb232c8319f;hb=e58f8d609a81576eaf5bc9d0fa53bef274a01bf;hp=fe4333186399e2ae0ae95f89bf6e97295cb8ae13;hpb=0123baecc4e2050447f8c4f48f5b33a6d3c524a8;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index fe43331..cc89029 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -44,37 +40,32 @@ #define DEBUG_SUBSYSTEM S_CLASS +#include +#include #include - -#ifdef __KERNEL__ -# include -#endif - -/* hash_long() */ -#include +#include /* hash_long() */ #include #include #include #include #include #include -#include - -extern spinlock_t obd_types_lock; enum { LU_CACHE_PERCENT_MAX = 50, LU_CACHE_PERCENT_DEFAULT = 20 }; -#define LU_CACHE_NR_MAX_ADJUST 128 +#define LU_CACHE_NR_MAX_ADJUST 512 #define LU_CACHE_NR_UNLIMITED -1 #define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED #define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED -#define LU_CACHE_NR_ZFS_LIMIT 256 +/** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */ +#define LU_CACHE_NR_ZFS_LIMIT 10240 #define LU_SITE_BITS_MIN 12 #define LU_SITE_BITS_MAX 24 +#define LU_SITE_BITS_MAX_CL 19 /** * total 256 buckets, we don't want too many buckets because: * - consume too much memory @@ -84,14 +75,15 @@ enum { static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; -CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, - "Percentage of memory to be used as lu_object cache"); +module_param(lu_cache_percent, int, 0644); +MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache"); static long lu_cache_nr = LU_CACHE_NR_DEFAULT; -CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, - "Maximum number of objects in lu_object cache"); +module_param(lu_cache_nr, long, 0644); +MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache"); static void lu_object_free(const struct lu_env *env, struct lu_object *o); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx); /** * Decrease reference counter on object. If last reference is freed, return @@ -104,7 +96,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) struct lu_object_header *top; struct lu_site *site; struct lu_object *orig; - cfs_hash_bd_t bd; + struct cfs_hash_bd bd; const struct lu_fid *fid; top = o->lo_header; @@ -120,10 +112,10 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) if (fid_is_zero(fid)) { LASSERT(top->loh_hash.next == NULL && top->loh_hash.pprev == NULL); - LASSERT(cfs_list_empty(&top->loh_lru)); + LASSERT(list_empty(&top->loh_lru)); if (!atomic_dec_and_test(&top->loh_ref)) return; - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } @@ -146,26 +138,30 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) return; } - LASSERT(bkt->lsb_busy > 0); - bkt->lsb_busy--; /* * When last reference is released, iterate over object * layers, and notify them that object is no longer busy. */ - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } - if (!lu_object_is_dying(top)) { - LASSERT(cfs_list_empty(&top->loh_lru)); - cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru); + if (!lu_object_is_dying(top) && + (lu_object_exists(orig) || lu_object_is_cl(orig))) { + LASSERT(list_empty(&top->loh_lru)); + list_add_tail(&top->loh_lru, &bkt->lsb_lru); + bkt->lsb_lru_len++; + percpu_counter_inc(&site->ls_lru_len_counter); + CDEBUG(D_INODE, "Add %p to site lru. hash: %p, bkt: %p, " + "lru_len: %ld\n", + o, site->ls_obj_hash, bkt, bkt->lsb_lru_len); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); return; } /* - * If object is dying (will not be cached), removed it + * If object is dying (will not be cached) then remove it * from hash table and LRU. * * This is done with hash table and LRU lists locked. As the only @@ -208,11 +204,19 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) top = o->lo_header; set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { - cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; - cfs_hash_bd_t bd; + struct lu_site *site = o->lo_dev->ld_site; + struct cfs_hash *obj_hash = site->ls_obj_hash; + struct cfs_hash_bd bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); - cfs_list_del_init(&top->loh_lru); + if (!list_empty(&top->loh_lru)) { + struct lu_site_bkt_data *bkt; + + list_del_init(&top->loh_lru); + bkt = cfs_hash_bd_extra_get(obj_hash, &bd); + bkt->lsb_lru_len--; + percpu_counter_dec(&site->ls_lru_len_counter); + } cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(obj_hash, &bd, 1); } @@ -232,7 +236,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, { struct lu_object *scan; struct lu_object *top; - cfs_list_t *layers; + struct list_head *layers; unsigned int init_mask = 0; unsigned int init_flag; int clean; @@ -262,7 +266,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ clean = 1; init_flag = 1; - cfs_list_for_each_entry(scan, layers, lo_linkage) { + list_for_each_entry(scan, layers, lo_linkage) { if (init_mask & init_flag) goto next; clean = 0; @@ -278,7 +282,7 @@ next: } } while (!clean); - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_start != NULL) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { @@ -297,11 +301,11 @@ next: */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - struct lu_site_bkt_data *bkt; - struct lu_site *site; - struct lu_object *scan; - cfs_list_t *layers; - cfs_list_t splice; + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + struct list_head *layers; + struct list_head splice; site = o->lo_dev->ld_site; layers = &o->lo_header->loh_layers; @@ -309,7 +313,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) /* * First call ->loo_object_delete() method to release all resources. */ - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_delete != NULL) scan->lo_ops->loo_object_delete(env, scan); } @@ -320,16 +324,16 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * necessary, because lu_object_header is freed together with the * top-level slice. */ - CFS_INIT_LIST_HEAD(&splice); - cfs_list_splice_init(layers, &splice); - while (!cfs_list_empty(&splice)) { + INIT_LIST_HEAD(&splice); + list_splice_init(layers, &splice); + while (!list_empty(&splice)) { /* * Free layers in bottom-to-top order, so that object header * lives as long as possible and ->loo_object_free() methods * can look at its contents. */ o = container_of0(splice.prev, struct lu_object, lo_linkage); - cfs_list_del_init(&o->lo_linkage); + list_del_init(&o->lo_linkage); LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } @@ -340,32 +344,45 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) /** * Free \a nr objects from the cold end of the site LRU list. + * if canblock is 0, then don't block awaiting for another + * instance of lu_site_purge() to complete */ -int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) +int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, + int nr, int canblock) { struct lu_object_header *h; struct lu_object_header *temp; struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; - cfs_hash_bd_t bd2; - cfs_list_t dispose; - int did_sth; - int start; + struct cfs_hash_bd bd; + struct cfs_hash_bd bd2; + struct list_head dispose; + int did_sth; + unsigned int start = 0; int count; int bnr; - int i; + unsigned int i; if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) RETURN(0); - CFS_INIT_LIST_HEAD(&dispose); + INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - start = s->ls_purge_start; - bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; + if (nr != ~0) + start = s->ls_purge_start; + bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; again: + /* + * It doesn't make any sense to make purge threads parallel, that can + * only bring troubles to us. See LU-5331. + */ + if (canblock != 0) + mutex_lock(&s->ls_purge_mutex); + else if (mutex_trylock(&s->ls_purge_mutex) == 0) + goto out; + did_sth = 0; cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { if (i < start) @@ -374,7 +391,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { LASSERT(atomic_read(&h->loh_ref) == 0); cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); @@ -382,7 +399,9 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_del_locked(s->ls_obj_hash, &bd2, &h->loh_hash); - cfs_list_move(&h->loh_lru, &dispose); + list_move(&h->loh_lru, &dispose); + bkt->lsb_lru_len--; + percpu_counter_dec(&s->ls_lru_len_counter); if (did_sth == 0) did_sth = 1; @@ -399,17 +418,18 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) * Free everything on the dispose list. This is safe against * races due to the reasons described in lu_object_put(). */ - while (!cfs_list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - cfs_list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); - } + while (!list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } if (nr == 0) break; } + mutex_unlock(&s->ls_purge_mutex); if (nr != 0 && did_sth && start != 0) { start = 0; /* restart from the first bucket */ @@ -418,9 +438,10 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) /* race on s->ls_purge_start, but nobody cares */ s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); +out: return nr; } -EXPORT_SYMBOL(lu_site_purge); +EXPORT_SYMBOL(lu_site_purge_objects); /* * Object printing. @@ -461,7 +482,7 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); * Key, holding temporary buffer. This key is registered very early by * lu_global_init(). */ -struct lu_context_key lu_global_key = { +static struct lu_context_key lu_global_key = { .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, .lct_init = lu_global_key_init, @@ -493,8 +514,8 @@ int lu_cdebug_printer(const struct lu_env *env, vsnprintf(key->lck_area + used, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { - if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s", key->lck_area); + if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -509,13 +530,13 @@ void lu_object_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct lu_object_header *hdr) { - (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", + (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \ - "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); + PFID(&hdr->loh_fid), + hlist_unhashed(&hdr->loh_hash) ? "" : " hash", + list_empty((struct list_head *)&hdr->loh_lru) ? \ + "" : " lru", + hdr->loh_attr & LOHA_EXISTS ? " exist" : ""); } EXPORT_SYMBOL(lu_object_header_print); @@ -533,7 +554,7 @@ void lu_object_print(const struct lu_env *env, void *cookie, lu_object_header_print(env, cookie, printer, top); (*printer)(env, cookie, "{\n"); - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { /* * print `.' \a depth times followed by type name and address */ @@ -558,25 +579,24 @@ int lu_object_invariant(const struct lu_object *o) struct lu_object_header *top; top = o->lo_header; - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_invariant != NULL && !o->lo_ops->loo_object_invariant(o)) return 0; } return 1; } -EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - cfs_hash_bd_t *bd, + struct cfs_hash_bd *bd, const struct lu_fid *f, wait_queue_t *waiter, __u64 *version) { - struct lu_site_bkt_data *bkt; - struct lu_object_header *h; - cfs_hlist_node_t *hnode; - __u64 ver = cfs_hash_bd_version_get(bd); + struct lu_site_bkt_data *bkt; + struct lu_object_header *h; + struct hlist_node *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); if (*version == ver) return ERR_PTR(-ENOENT); @@ -595,7 +615,11 @@ static struct lu_object *htable_lookup(struct lu_site *s, if (likely(!lu_object_is_dying(h))) { cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - cfs_list_del_init(&h->loh_lru); + if (!list_empty(&h->loh_lru)) { + list_del_init(&h->loh_lru); + bkt->lsb_lru_len--; + percpu_counter_dec(&s->ls_lru_len_counter); + } return lu_object_top(h); } @@ -605,36 +629,14 @@ static struct lu_object *htable_lookup(struct lu_site *s, * drained), and moreover, lookup has to wait until object is freed. */ - init_waitqueue_entry_current(waiter); - add_wait_queue(&bkt->lsb_marche_funebre, waiter); - set_current_state(TASK_UNINTERRUPTIBLE); - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); - return ERR_PTR(-EAGAIN); -} - -static struct lu_object *htable_lookup_nowait(struct lu_site *s, - cfs_hash_bd_t *bd, - const struct lu_fid *f) -{ - cfs_hlist_node_t *hnode; - struct lu_object_header *h; - - /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. */ - hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (hnode == NULL) { - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); - return ERR_PTR(-ENOENT); + if (likely(waiter != NULL)) { + init_waitqueue_entry(waiter, current); + add_wait_queue(&bkt->lsb_marche_funebre, waiter); + set_current_state(TASK_UNINTERRUPTIBLE); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); } - h = container_of0(hnode, struct lu_object_header, loh_hash); - if (unlikely(lu_object_is_dying(h))) - return ERR_PTR(-ENOENT); - - cfs_hash_get(s->ls_obj_hash, hnode); - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - cfs_list_del_init(&h->loh_lru); - return lu_object_top(h); + return ERR_PTR(-EAGAIN); } /** @@ -667,11 +669,11 @@ static void lu_object_limit(const struct lu_env *env, size = cfs_hash_size_get(dev->ld_site->ls_obj_hash); nr = (__u64)lu_cache_nr; - if (size > nr) - lu_site_purge(env, dev->ld_site, - MIN(size - nr, LU_CACHE_NR_MAX_ADJUST)); + if (size <= nr) + return; - return; + lu_site_purge_objects(env, dev->ld_site, + MIN(size - nr, LU_CACHE_NR_MAX_ADJUST), 0); } static struct lu_object *lu_object_new(const struct lu_env *env, @@ -680,9 +682,8 @@ static struct lu_object *lu_object_new(const struct lu_env *env, const struct lu_object_conf *conf) { struct lu_object *o; - cfs_hash_t *hs; - cfs_hash_bd_t bd; - struct lu_site_bkt_data *bkt; + struct cfs_hash *hs; + struct cfs_hash_bd bd; o = lu_object_alloc(env, dev, f, conf); if (unlikely(IS_ERR(o))) @@ -690,9 +691,7 @@ static struct lu_object *lu_object_new(const struct lu_env *env, hs = dev->ld_site->ls_obj_hash; cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); lu_object_limit(env, dev); @@ -712,8 +711,8 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, struct lu_object *o; struct lu_object *shadow; struct lu_site *s; - cfs_hash_t *hs; - cfs_hash_bd_t bd; + struct cfs_hash *hs; + struct cfs_hash_bd bd; __u64 version = 0; /* @@ -761,11 +760,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, shadow = htable_lookup(s, &bd, f, waiter, &version); if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { - struct lu_site_bkt_data *bkt; - - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); lu_object_limit(env, dev); @@ -793,6 +788,9 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, struct lu_object *obj; wait_queue_t wait; + if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) + return lu_object_find_try(env, dev, f, conf, NULL); + while (1) { obj = lu_object_find_try(env, dev, f, conf, &wait); if (obj != ERR_PTR(-EAGAIN)) @@ -801,7 +799,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, * lu_object_find_try() already added waiter into the * wait queue. */ - waitq_wait(&wait, TASK_UNINTERRUPTIBLE); + schedule(); bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); remove_wait_queue(&bkt->lsb_marche_funebre, &wait); } @@ -809,30 +807,6 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, EXPORT_SYMBOL(lu_object_find_at); /** - * Try to find the object in cache without waiting for the dead object - * to be released nor allocating object if no cached one was found. - * - * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging. - */ -void lu_object_purge(const struct lu_env *env, struct lu_device *dev, - const struct lu_fid *f) -{ - struct lu_site *s = dev->ld_site; - cfs_hash_t *hs = s->ls_obj_hash; - cfs_hash_bd_t bd; - struct lu_object *o; - - cfs_hash_bd_get_and_lock(hs, f, &bd, 1); - o = htable_lookup_nowait(s, &bd, f); - cfs_hash_bd_unlock(hs, &bd, 1); - if (!IS_ERR(o)) { - set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); - lu_object_put(env, o); - } -} -EXPORT_SYMBOL(lu_object_purge); - -/** * Find object with given fid, and return its slice belonging to given device. */ struct lu_object *lu_object_find_slice(const struct lu_env *env, @@ -840,49 +814,37 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, const struct lu_fid *f, const struct lu_object_conf *conf) { - struct lu_object *top; - struct lu_object *obj; + struct lu_object *top; + struct lu_object *obj; + + top = lu_object_find(env, dev, f, conf); + if (IS_ERR(top)) + return top; + + obj = lu_object_locate(top->lo_header, dev->ld_type); + if (unlikely(obj == NULL)) { + lu_object_put(env, top); + obj = ERR_PTR(-ENOENT); + } - top = lu_object_find(env, dev, f, conf); - if (!IS_ERR(top)) { - obj = lu_object_locate(top->lo_header, dev->ld_type); - if (obj == NULL) - lu_object_put(env, top); - } else - obj = top; - return obj; + return obj; } EXPORT_SYMBOL(lu_object_find_slice); -/** - * Global list of all device types. - */ -static CFS_LIST_HEAD(lu_device_types); - int lu_device_type_init(struct lu_device_type *ldt) { int result = 0; atomic_set(&ldt->ldt_device_nr, 0); - INIT_LIST_HEAD(&ldt->ldt_linkage); if (ldt->ldt_ops->ldto_init) result = ldt->ldt_ops->ldto_init(ldt); - if (result == 0) { - spin_lock(&obd_types_lock); - list_add(&ldt->ldt_linkage, &lu_device_types); - spin_unlock(&obd_types_lock); - } - return result; } EXPORT_SYMBOL(lu_device_type_init); void lu_device_type_fini(struct lu_device_type *ldt) { - spin_lock(&obd_types_lock); - list_del_init(&ldt->ldt_linkage); - spin_unlock(&obd_types_lock); if (ldt->ldt_ops->ldto_fini) ldt->ldt_ops->ldto_fini(ldt); } @@ -891,8 +853,8 @@ EXPORT_SYMBOL(lu_device_type_fini); /** * Global list of all sites on this node */ -static CFS_LIST_HEAD(lu_sites); -static DEFINE_MUTEX(lu_sites_guard); +static LIST_HEAD(lu_sites); +static DECLARE_RWSEM(lu_sites_guard); /** * Global environment used by site shrinker. @@ -906,24 +868,24 @@ struct lu_site_print_arg { }; static int -lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) -{ - struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; - struct lu_object_header *h; - - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - if (!cfs_list_empty(&h->loh_layers)) { - const struct lu_object *o; - - o = lu_object_top(h); - lu_object_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, o); - } else { - lu_object_header_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, h); - } - return 0; +lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode, void *data) +{ + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!list_empty(&h->loh_layers)) { + const struct lu_object *o; + + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; } /** @@ -945,10 +907,11 @@ EXPORT_SYMBOL(lu_site_print); /** * Return desired hash table order. */ -static int lu_htable_order(struct lu_device *top) +static unsigned long lu_htable_order(struct lu_device *top) { - unsigned long cache_size; - int bits; + unsigned long cache_size; + unsigned long bits; + unsigned long bits_max = LU_SITE_BITS_MAX; /* * For ZFS based OSDs the cache should be disabled by default. This @@ -962,6 +925,9 @@ static int lu_htable_order(struct lu_device *top) return LU_SITE_BITS_MIN; } + if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0) + bits_max = LU_SITE_BITS_MAX_CL; + /* * Calculate hash table size, assuming that we want reasonable * performance when 20% of total memory is occupied by cache of @@ -973,8 +939,8 @@ static int lu_htable_order(struct lu_device *top) #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) - cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_SHIFT)) + cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -987,15 +953,16 @@ static int lu_htable_order(struct lu_device *top) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_CACHE_SIZE / 1024); + (PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; } - return bits; + + return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max); } -static unsigned lu_obj_hop_hash(cfs_hash_t *hs, +static unsigned lu_obj_hop_hash(struct cfs_hash *hs, const void *key, unsigned mask) { struct lu_fid *fid = (struct lu_fid *)key; @@ -1014,48 +981,41 @@ static unsigned lu_obj_hop_hash(cfs_hash_t *hs, return hash & mask; } -static void *lu_obj_hop_object(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + return hlist_entry(hnode, struct lu_object_header, loh_hash); } -static void *lu_obj_hop_key(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_key(struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return &h->loh_fid; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; } -static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode) +static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); } -static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode) { - struct lu_object_header *h; - - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - if (atomic_add_return(1, &h->loh_ref) == 1) { - struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; + struct lu_object_header *h; - cfs_hash_bd_get(hs, &h->loh_fid, &bd); - bkt = cfs_hash_bd_extra_get(hs, &bd); - bkt->lsb_busy++; - } + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + atomic_inc(&h->loh_ref); } -static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode) { LBUG(); /* we should never called it */ } -cfs_hash_ops_t lu_site_hash_ops = { +static struct cfs_hash_ops lu_site_hash_ops = { .hs_hash = lu_obj_hop_hash, .hs_key = lu_obj_hop_key, .hs_keycmp = lu_obj_hop_keycmp, @@ -1067,8 +1027,8 @@ cfs_hash_ops_t lu_site_hash_ops = { void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - if (cfs_list_empty(&d->ld_linkage)) - cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage); + if (list_empty(&d->ld_linkage)) + list_add(&d->ld_linkage, &s->ls_ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_add_linkage); @@ -1076,7 +1036,7 @@ EXPORT_SYMBOL(lu_dev_add_linkage); void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - cfs_list_del_init(&d->ld_linkage); + list_del_init(&d->ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_del_linkage); @@ -1087,16 +1047,26 @@ EXPORT_SYMBOL(lu_dev_del_linkage); int lu_site_init(struct lu_site *s, struct lu_device *top) { struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; + struct cfs_hash_bd bd; char name[16]; - int bits; - int i; + unsigned long bits; + unsigned int i; + int rc; ENTRY; memset(s, 0, sizeof *s); - bits = lu_htable_order(top); - snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); - for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX); + mutex_init(&s->ls_purge_mutex); + +#ifdef HAVE_PERCPU_COUNTER_INIT_GFP_FLAG + rc = percpu_counter_init(&s->ls_lru_len_counter, 0, GFP_NOFS); +#else + rc = percpu_counter_init(&s->ls_lru_len_counter, 0); +#endif + if (rc) + return -ENOMEM; + + snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name); + for (bits = lu_htable_order(top); bits >= LU_SITE_BITS_MIN; bits--) { s->ls_obj_hash = cfs_hash_create(name, bits, bits, bits - LU_SITE_BKT_BITS, @@ -1112,13 +1082,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) } if (s->ls_obj_hash == NULL) { - CERROR("failed to create lu_site hash with bits: %d\n", bits); + CERROR("failed to create lu_site hash with bits: %lu\n", bits); return -ENOMEM; } cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - CFS_INIT_LIST_HEAD(&bkt->lsb_lru); + INIT_LIST_HEAD(&bkt->lsb_lru); init_waitqueue_head(&bkt->lsb_marche_funebre); } @@ -1142,13 +1112,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, 0, "lru_purged", "lru_purged"); - CFS_INIT_LIST_HEAD(&s->ls_linkage); + INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); lu_ref_add(&top->ld_reference, "site-top", s); - CFS_INIT_LIST_HEAD(&s->ls_ld_linkage); + INIT_LIST_HEAD(&s->ls_ld_linkage); spin_lock_init(&s->ls_ld_lock); lu_dev_add_linkage(s, top); @@ -1162,9 +1132,11 @@ EXPORT_SYMBOL(lu_site_init); */ void lu_site_fini(struct lu_site *s) { - mutex_lock(&lu_sites_guard); - cfs_list_del_init(&s->ls_linkage); - mutex_unlock(&lu_sites_guard); + down_write(&lu_sites_guard); + list_del_init(&s->ls_linkage); + up_write(&lu_sites_guard); + + percpu_counter_destroy(&s->ls_lru_len_counter); if (s->ls_obj_hash != NULL) { cfs_hash_putref(s->ls_obj_hash); @@ -1189,11 +1161,11 @@ EXPORT_SYMBOL(lu_site_fini); int lu_site_init_finish(struct lu_site *s) { int result; - mutex_lock(&lu_sites_guard); + down_write(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) - cfs_list_add(&s->ls_linkage, &lu_sites); - mutex_unlock(&lu_sites_guard); + list_add(&s->ls_linkage, &lu_sites); + up_write(&lu_sites_guard); return result; } EXPORT_SYMBOL(lu_site_init_finish); @@ -1270,7 +1242,7 @@ int lu_object_init(struct lu_object *o, struct lu_object_header *h, o->lo_dev = d; lu_device_get(d); lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); - CFS_INIT_LIST_HEAD(&o->lo_linkage); + INIT_LIST_HEAD(&o->lo_linkage); return 0; } @@ -1283,7 +1255,7 @@ void lu_object_fini(struct lu_object *o) { struct lu_device *dev = o->lo_dev; - LASSERT(cfs_list_empty(&o->lo_linkage)); + LASSERT(list_empty(&o->lo_linkage)); if (dev != NULL) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, @@ -1302,7 +1274,7 @@ EXPORT_SYMBOL(lu_object_fini); */ void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &h->loh_layers); + list_move(&o->lo_linkage, &h->loh_layers); } EXPORT_SYMBOL(lu_object_add_top); @@ -1314,7 +1286,7 @@ EXPORT_SYMBOL(lu_object_add_top); */ void lu_object_add(struct lu_object *before, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &before->lo_linkage); + list_move(&o->lo_linkage, &before->lo_linkage); } EXPORT_SYMBOL(lu_object_add); @@ -1325,9 +1297,9 @@ int lu_object_header_init(struct lu_object_header *h) { memset(h, 0, sizeof *h); atomic_set(&h->loh_ref, 1); - CFS_INIT_HLIST_NODE(&h->loh_hash); - CFS_INIT_LIST_HEAD(&h->loh_lru); - CFS_INIT_LIST_HEAD(&h->loh_layers); + INIT_HLIST_NODE(&h->loh_hash); + INIT_LIST_HEAD(&h->loh_lru); + INIT_LIST_HEAD(&h->loh_layers); lu_ref_init(&h->loh_reference); return 0; } @@ -1338,9 +1310,9 @@ EXPORT_SYMBOL(lu_object_header_init); */ void lu_object_header_fini(struct lu_object_header *h) { - LASSERT(cfs_list_empty(&h->loh_layers)); - LASSERT(cfs_list_empty(&h->loh_lru)); - LASSERT(cfs_hlist_unhashed(&h->loh_hash)); + LASSERT(list_empty(&h->loh_layers)); + LASSERT(list_empty(&h->loh_lru)); + LASSERT(hlist_unhashed(&h->loh_hash)); lu_ref_fini(&h->loh_reference); } EXPORT_SYMBOL(lu_object_header_fini); @@ -1352,18 +1324,16 @@ EXPORT_SYMBOL(lu_object_header_fini); struct lu_object *lu_object_locate(struct lu_object_header *h, const struct lu_device_type *dtype) { - struct lu_object *o; + struct lu_object *o; - cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; + list_for_each_entry(o, &h->loh_layers, lo_linkage) { + if (o->lo_dev->ld_type == dtype) + return o; + } + return NULL; } EXPORT_SYMBOL(lu_object_locate); - - /** * Finalize and free devices in the device stack. * @@ -1399,7 +1369,6 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) } } } -EXPORT_SYMBOL(lu_stack_fini); enum { /** @@ -1410,7 +1379,8 @@ enum { static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static DEFINE_SPINLOCK(lu_keys_guard); +DEFINE_RWLOCK(lu_keys_guard); +static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0); /** * Global counter incremented whenever key is registered, unregistered, @@ -1425,8 +1395,8 @@ static unsigned key_set_version = 0; */ int lu_context_key_register(struct lu_context_key *key) { - int result; - int i; + int result; + unsigned int i; LASSERT(key->lct_init != NULL); LASSERT(key->lct_fini != NULL); @@ -1434,7 +1404,7 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; @@ -1446,7 +1416,7 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1485,13 +1455,26 @@ void lu_context_key_degister(struct lu_context_key *key) lu_context_key_quiesce(key); ++key_set_version; - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); key_fini(&lu_shrink_env.le_ctx, key->lct_index); + + /** + * Wait until all transient contexts referencing this key have + * run lu_context_key::lct_fini() method. + */ + while (atomic_read(&key->lct_used) > 1) { + write_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n", + key->lct_owner ? key->lct_owner->name : "", key, + atomic_read(&key->lct_used)); + schedule(); + write_lock(&lu_keys_guard); + } if (lu_keys[key->lct_index]) { lu_keys[key->lct_index] = NULL; lu_ref_fini(&key->lct_reference); } - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n", @@ -1596,7 +1579,7 @@ EXPORT_SYMBOL(lu_context_key_get); /** * List of remembered contexts. XXX document me. */ -static CFS_LIST_HEAD(lu_context_remembered); +static LIST_HEAD(lu_context_remembered); /** * Destroy \a key in all remembered contexts. This is used to destroy key @@ -1605,38 +1588,50 @@ static CFS_LIST_HEAD(lu_context_remembered); */ void lu_context_key_quiesce(struct lu_context_key *key) { - struct lu_context *ctx; - extern unsigned cl_env_cache_purge(unsigned nr); + struct lu_context *ctx; - if (!(key->lct_tags & LCT_QUIESCENT)) { - /* - * XXX layering violation. - */ - cl_env_cache_purge(~0); - key->lct_tags |= LCT_QUIESCENT; + if (!(key->lct_tags & LCT_QUIESCENT)) { /* * XXX memory barrier has to go here. */ - spin_lock(&lu_keys_guard); - cfs_list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + write_lock(&lu_keys_guard); + key->lct_tags |= LCT_QUIESCENT; + + /** + * Wait until all lu_context_key::lct_init() methods + * have completed. + */ + while (atomic_read(&lu_key_initing_cnt) > 0) { + write_unlock(&lu_keys_guard); + CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\"" + " %p, %d (%d)\n", + key->lct_owner ? key->lct_owner->name : "", + key, atomic_read(&key->lct_used), + atomic_read(&lu_key_initing_cnt)); + schedule(); + write_lock(&lu_keys_guard); + } + + list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) key_fini(ctx, key->lct_index); - spin_unlock(&lu_keys_guard); + ++key_set_version; + write_unlock(&lu_keys_guard); } } -EXPORT_SYMBOL(lu_context_key_quiesce); void lu_context_key_revive(struct lu_context_key *key) { - key->lct_tags &= ~LCT_QUIESCENT; - ++key_set_version; + write_lock(&lu_keys_guard); + key->lct_tags &= ~LCT_QUIESCENT; + ++key_set_version; + write_unlock(&lu_keys_guard); } -EXPORT_SYMBOL(lu_context_key_revive); static void keys_fini(struct lu_context *ctx) { - int i; + unsigned int i; if (ctx->lc_value == NULL) return; @@ -1650,46 +1645,77 @@ static void keys_fini(struct lu_context *ctx) static int keys_fill(struct lu_context *ctx) { - int i; - - LINVRNT(ctx->lc_value != NULL); - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - struct lu_context_key *key; + unsigned int i; + unsigned pre_version; - key = lu_keys[i]; - if (ctx->lc_value[i] == NULL && key != NULL && - (key->lct_tags & ctx->lc_tags) && - /* - * Don't create values for a LCT_QUIESCENT key, as this - * will pin module owning a key. - */ - !(key->lct_tags & LCT_QUIESCENT)) { - void *value; - - LINVRNT(key->lct_init != NULL); - LINVRNT(key->lct_index == i); - - value = key->lct_init(ctx, key); - if (unlikely(IS_ERR(value))) - return PTR_ERR(value); + /* + * A serialisation with lu_context_key_quiesce() is needed, but some + * "key->lct_init()" are calling kernel memory allocation routine and + * can't be called while holding a spin_lock. + * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt" + * to ensure the start of the serialisation. + * An atomic_t variable is still used, in order not to reacquire the + * lock when decrementing the counter. + */ + read_lock(&lu_keys_guard); + atomic_inc(&lu_key_initing_cnt); + pre_version = key_set_version; + read_unlock(&lu_keys_guard); + +refill: + LINVRNT(ctx->lc_value != NULL); + for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { + struct lu_context_key *key; + + key = lu_keys[i]; + if (ctx->lc_value[i] == NULL && key != NULL && + (key->lct_tags & ctx->lc_tags) && + /* + * Don't create values for a LCT_QUIESCENT key, as this + * will pin module owning a key. + */ + !(key->lct_tags & LCT_QUIESCENT)) { + void *value; + + LINVRNT(key->lct_init != NULL); + LINVRNT(key->lct_index == i); LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) - try_module_get(key->lct_owner); + if (!(ctx->lc_tags & LCT_NOREF) && + try_module_get(key->lct_owner) == 0) { + /* module is unloading, skip this key */ + continue; + } + + value = key->lct_init(ctx, key); + if (unlikely(IS_ERR(value))) { + atomic_dec(&lu_key_initing_cnt); + return PTR_ERR(value); + } + lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); atomic_inc(&key->lct_used); - /* - * This is the only place in the code, where an - * element of ctx->lc_value[] array is set to non-NULL - * value. - */ - ctx->lc_value[i] = value; - if (key->lct_exit != NULL) - ctx->lc_tags |= LCT_HAS_EXIT; - } - ctx->lc_version = key_set_version; - } - return 0; + /* + * This is the only place in the code, where an + * element of ctx->lc_value[] array is set to non-NULL + * value. + */ + ctx->lc_value[i] = value; + if (key->lct_exit != NULL) + ctx->lc_tags |= LCT_HAS_EXIT; + } + } + + read_lock(&lu_keys_guard); + if (pre_version != key_set_version) { + pre_version = key_set_version; + read_unlock(&lu_keys_guard); + goto refill; + } + + atomic_dec(&lu_key_initing_cnt); + read_unlock(&lu_keys_guard); + return 0; } static int keys_init(struct lu_context *ctx) @@ -1712,11 +1738,11 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_state = LCS_INITIALIZED; ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { - spin_lock(&lu_keys_guard); - cfs_list_add(&ctx->lc_remember, &lu_context_remembered); - spin_unlock(&lu_keys_guard); + write_lock(&lu_keys_guard); + list_add(&ctx->lc_remember, &lu_context_remembered); + write_unlock(&lu_keys_guard); } else { - CFS_INIT_LIST_HEAD(&ctx->lc_remember); + INIT_LIST_HEAD(&ctx->lc_remember); } rc = keys_init(ctx); @@ -1736,14 +1762,14 @@ void lu_context_fini(struct lu_context *ctx) ctx->lc_state = LCS_FINALIZED; if ((ctx->lc_tags & LCT_REMEMBER) == 0) { - LASSERT(cfs_list_empty(&ctx->lc_remember)); + LASSERT(list_empty(&ctx->lc_remember)); keys_fini(ctx); } else { /* could race with key degister */ - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); keys_fini(ctx); - cfs_list_del_init(&ctx->lc_remember); - spin_unlock(&lu_keys_guard); + list_del_init(&ctx->lc_remember); + write_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_fini); @@ -1763,22 +1789,29 @@ EXPORT_SYMBOL(lu_context_enter); */ void lu_context_exit(struct lu_context *ctx) { - int i; + unsigned int i; LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { + /* could race with key quiescency */ + if (ctx->lc_tags & LCT_REMEMBER) + read_lock(&lu_keys_guard); + for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i] != NULL) { - struct lu_context_key *key; - - key = lu_keys[i]; - LASSERT(key != NULL); - if (key->lct_exit != NULL) - key->lct_exit(ctx, - key, ctx->lc_value[i]); - } + if (ctx->lc_value[i] != NULL) { + struct lu_context_key *key; + + key = lu_keys[i]; + LASSERT(key != NULL); + if (key->lct_exit != NULL) + key->lct_exit(ctx, + key, ctx->lc_value[i]); + } } + + if (ctx->lc_tags & LCT_REMEMBER) + read_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_exit); @@ -1792,7 +1825,6 @@ int lu_context_refill(struct lu_context *ctx) { return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); } -EXPORT_SYMBOL(lu_context_refill); /** * lu_ctx_tags/lu_ses_tags will be updated if there are new types of @@ -1806,37 +1838,37 @@ __u32 lu_session_tags_default = 0; void lu_context_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_update); void lu_context_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_context_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_clear); void lu_session_tags_update(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default |= tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_update); void lu_session_tags_clear(__u32 tags) { - spin_lock(&lu_keys_guard); + write_lock(&lu_keys_guard); lu_session_tags_default &= ~tags; key_set_version++; - spin_unlock(&lu_keys_guard); + write_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_clear); @@ -1908,18 +1940,19 @@ typedef struct lu_site_stats{ unsigned lss_busy; } lu_site_stats_t; -static void lu_site_stats_get(cfs_hash_t *hs, +static void lu_site_stats_get(struct cfs_hash *hs, lu_site_stats_t *stats, int populated) { - cfs_hash_bd_t bd; - int i; + struct cfs_hash_bd bd; + unsigned int i; cfs_hash_for_each_bucket(hs, &bd, i) { struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); - cfs_hlist_head_t *hhead; + struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, 1); - stats->lss_busy += bkt->lsb_busy; + stats->lss_busy += + cfs_hash_bd_count_get(&bd) - bkt->lsb_lru_len; stats->lss_total += cfs_hash_bd_count_get(&bd); stats->lss_max_search = max((int)stats->lss_max_search, cfs_hash_bd_depmax_get(&bd)); @@ -1929,19 +1962,33 @@ static void lu_site_stats_get(cfs_hash_t *hs, } cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - if (!cfs_hlist_empty(hhead)) + if (!hlist_empty(hhead)) stats->lss_populated++; } cfs_hash_bd_unlock(hs, &bd, 1); } } -#ifdef __KERNEL__ +/* + * lu_cache_shrink_count() returns an approximate number of cached objects + * that can be freed by shrink_slab(). A counter, which tracks the + * number of items in the site's lru, is maintained in a percpu_counter + * for each site. The percpu values are incremented and decremented as + * objects are added or removed from the lru. The percpu values are summed + * and saved whenever a percpu value exceeds a threshold. Thus the saved, + * summed value at any given time may not accurately reflect the current + * lru length. But this value is sufficiently accurate for the needs of + * a shrinker. + * + * Using a per cpu counter is a compromise solution to concurrent access: + * lu_object_put() can update the counter without locking the site and + * lu_cache_shrink_count can sum the counters without locking each + * ls_obj_hash bucket. + */ static unsigned long lu_cache_shrink_count(struct shrinker *sk, struct shrink_control *sc) { - lu_site_stats_t stats; struct lu_site *s; struct lu_site *tmp; unsigned long cached = 0; @@ -1949,16 +1996,15 @@ static unsigned long lu_cache_shrink_count(struct shrinker *sk, if (!(sc->gfp_mask & __GFP_FS)) return 0; - mutex_lock(&lu_sites_guard); - list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 0); - cached += stats.lss_total - stats.lss_busy; - } - mutex_unlock(&lu_sites_guard); + down_read(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) + cached += percpu_counter_read_positive(&s->ls_lru_len_counter); + up_read(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; - CDEBUG(D_INODE, "%ld objects cached\n", cached); + CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n", + cached, sysctl_vfs_cache_pressure); + return cached; } @@ -1984,7 +2030,7 @@ static unsigned long lu_cache_shrink_scan(struct shrinker *sk, */ return SHRINK_STOP; - mutex_lock(&lu_sites_guard); + down_write(&lu_sites_guard); list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { remain = lu_site_purge(&lu_shrink_env, s, remain); /* @@ -1994,7 +2040,7 @@ static unsigned long lu_cache_shrink_scan(struct shrinker *sk, list_move_tail(&s->ls_linkage, &splice); } list_splice(&splice, lu_sites.prev); - mutex_unlock(&lu_sites_guard); + up_write(&lu_sites_guard); return sc->nr_to_scan - remain; } @@ -2015,7 +2061,7 @@ static unsigned long lu_cache_shrink_scan(struct shrinker *sk, * is safe to take the lu_sites_guard lock. * * Ideally we should accurately return the remaining number of cached - * objects without taking the lu_sites_guard lock, but this is not + * objects without taking the lu_sites_guard lock, but this is not * possible in the current implementation. */ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) @@ -2032,11 +2078,10 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); - lu_cache_shrink_scan(shrinker, &scv); + if (scv.nr_to_scan != 0) + lu_cache_shrink_scan(shrinker, &scv); cached = lu_cache_shrink_count(shrinker, &scv); - if (scv.nr_to_scan == 0) - CDEBUG(D_INODE, "%d objects cached\n", cached); return cached; } @@ -2050,13 +2095,13 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) /** * Environment to be used in debugger, contains all tags. */ -struct lu_env lu_debugging_env; +static struct lu_env lu_debugging_env; /** * Debugging printer function using printk(). */ int lu_printk_printer(const struct lu_env *env, - void *unused, const char *format, ...) + void *unused, const char *format, ...) { va_list args; @@ -2073,7 +2118,7 @@ int lu_debugging_setup(void) void lu_context_keys_dump(void) { - int i; + unsigned int i; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -2090,8 +2135,6 @@ void lu_context_keys_dump(void) } } } -EXPORT_SYMBOL(lu_context_keys_dump); -#endif /* __KERNEL__ */ /** * Initialization of global lu_* data. @@ -2118,9 +2161,9 @@ int lu_global_init(void) * conservatively. This should not be too bad, because this * environment is global. */ - mutex_lock(&lu_sites_guard); + down_write(&lu_sites_guard); result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); - mutex_unlock(&lu_sites_guard); + up_write(&lu_sites_guard); if (result != 0) return result; @@ -2152,22 +2195,22 @@ void lu_global_fini(void) * Tear shrinker environment down _after_ de-registering * lu_global_key, because the latter has a value in the former. */ - mutex_lock(&lu_sites_guard); + down_write(&lu_sites_guard); lu_env_fini(&lu_shrink_env); - mutex_unlock(&lu_sites_guard); + up_write(&lu_sites_guard); lu_ref_global_fini(); } static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) { -#ifdef LPROCFS - struct lprocfs_counter ret; +#ifdef CONFIG_PROC_FS + struct lprocfs_counter ret; - lprocfs_stats_collect(stats, idx, &ret); - return (__u32)ret.lc_count; + lprocfs_stats_collect(stats, idx, &ret); + return (__u32)ret.lc_count; #else - return 0; + return 0; #endif } @@ -2182,43 +2225,22 @@ int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) memset(&stats, 0, sizeof(stats)); lu_site_stats_get(s->ls_obj_hash, &stats, 1); - return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", - stats.lss_busy, - stats.lss_total, - stats.lss_populated, - CFS_HASH_NHLIST(s->ls_obj_hash), - stats.lss_max_search, - ls_stats_read(s->ls_stats, LU_SS_CREATED), - ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), - ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), - ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), - ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); + seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); + return 0; } EXPORT_SYMBOL(lu_site_stats_seq_print); -int lu_site_stats_print(const struct lu_site *s, char *page, int count) -{ - lu_site_stats_t stats; - - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 1); - - return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", - stats.lss_busy, - stats.lss_total, - stats.lss_populated, - CFS_HASH_NHLIST(s->ls_obj_hash), - stats.lss_max_search, - ls_stats_read(s->ls_stats, LU_SS_CREATED), - ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), - ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), - ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), - ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), - ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); -} -EXPORT_SYMBOL(lu_site_stats_print); - /** * Helper function to initialize a number of kmem slab caches at once. */ @@ -2266,24 +2288,26 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, { struct lu_site *s = o->lo_dev->ld_site; struct lu_fid *old = &o->lo_header->loh_fid; - struct lu_site_bkt_data *bkt; - struct lu_object *shadow; - wait_queue_t waiter; - cfs_hash_t *hs; - cfs_hash_bd_t bd; - __u64 version = 0; + struct cfs_hash *hs; + struct cfs_hash_bd bd; LASSERT(fid_is_zero(old)); + /* supposed to be unique */ hs = s->ls_obj_hash; cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); - shadow = htable_lookup(s, &bd, fid, &waiter, &version); - /* supposed to be unique */ - LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); +#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK + { + __u64 version = 0; + wait_queue_t waiter; + struct lu_object *shadow; + shadow = htable_lookup(s, &bd, fid, &waiter, &version); + /* supposed to be unique */ + LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); + } +#endif *old = *fid; - bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); - bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); } EXPORT_SYMBOL(lu_object_assign_fid); @@ -2325,7 +2349,7 @@ void lu_buf_free(struct lu_buf *buf) } EXPORT_SYMBOL(lu_buf_free); -void lu_buf_alloc(struct lu_buf *buf, int size) +void lu_buf_alloc(struct lu_buf *buf, size_t size) { LASSERT(buf); LASSERT(buf->lb_buf == NULL); @@ -2336,14 +2360,14 @@ void lu_buf_alloc(struct lu_buf *buf, int size) } EXPORT_SYMBOL(lu_buf_alloc); -void lu_buf_realloc(struct lu_buf *buf, int size) +void lu_buf_realloc(struct lu_buf *buf, size_t size) { lu_buf_free(buf); lu_buf_alloc(buf, size); } EXPORT_SYMBOL(lu_buf_realloc); -struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len) +struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len) { if (buf->lb_buf == NULL && buf->lb_len == 0) lu_buf_alloc(buf, len); @@ -2361,7 +2385,7 @@ EXPORT_SYMBOL(lu_buf_check_and_alloc); * old buffer remains unchanged on error * \retval 0 or -ENOMEM */ -int lu_buf_check_and_grow(struct lu_buf *buf, int len) +int lu_buf_check_and_grow(struct lu_buf *buf, size_t len) { char *ptr; @@ -2383,4 +2407,3 @@ int lu_buf_check_and_grow(struct lu_buf *buf, int len) return 0; } EXPORT_SYMBOL(lu_buf_check_and_grow); -