X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=3993c1871babc7c5b9155db91b35d807437139f8;hb=8ca651d636c37339cc50bffa4532b7cbd0ead570;hp=a0f93ec5116533bb63519eb99d5db25fe55cde4a;hpb=4aae7fae328fbb395601a8d323b831a3f12c7206;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index a0f93ec..3993c18 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -26,8 +26,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -73,60 +75,67 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o); */ void lu_object_put(const struct lu_env *env, struct lu_object *o) { + struct lu_site_bkt_data *bkt; struct lu_object_header *top; struct lu_site *site; struct lu_object *orig; - int kill_it; + cfs_hash_bd_t bd; - top = o->lo_header; + top = o->lo_header; site = o->lo_dev->ld_site; orig = o; - kill_it = 0; - cfs_write_lock(&site->ls_guard); - if (cfs_atomic_dec_and_test(&top->loh_ref)) { - /* - * When last reference is released, iterate over object - * layers, and notify them that object is no longer busy. - */ - cfs_list_for_each_entry_reverse(o, &top->loh_layers, - lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) - o->lo_ops->loo_object_release(env, o); - } - -- site->ls_busy; + + cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); + + if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { if (lu_object_is_dying(top)) { + /* - * If object is dying (will not be cached), removed it - * from hash table and LRU. - * - * This is done with hash table and LRU lists - * locked. As the only way to acquire first reference - * to previously unreferenced object is through - * hash-table lookup (lu_object_find()), or LRU - * scanning (lu_site_purge()), that are done under - * hash-table and LRU lock, no race with concurrent - * object lookup is possible and we can safely destroy - * object below. + * somebody may be waiting for this, currently only + * used for cl_object, see cl_object_put_last(). */ - cfs_hlist_del_init(&top->loh_hash); - cfs_list_del_init(&top->loh_lru); - -- site->ls_total; - kill_it = 1; + cfs_waitq_broadcast(&bkt->lsb_marche_funebre); } - } else if (lu_object_is_dying(top)) { - /* - * somebody may be waiting for this, currently only used - * for cl_object, see cl_object_put_last(). - */ - cfs_waitq_broadcast(&site->ls_marche_funebre); + return; } - cfs_write_unlock(&site->ls_guard); - if (kill_it) - /* - * Object was already removed from hash and lru above, can - * kill it. - */ - lu_object_free(env, orig); + + LASSERT(bkt->lsb_busy > 0); + bkt->lsb_busy--; + /* + * When last reference is released, iterate over object + * layers, and notify them that object is no longer busy. + */ + cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); + } + + if (!lu_object_is_dying(top)) { + LASSERT(cfs_list_empty(&top->loh_lru)); + cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + return; + } + + /* + * If object is dying (will not be cached), removed it + * from hash table and LRU. + * + * This is done with hash table and LRU lists locked. As the only + * way to acquire first reference to previously unreferenced + * object is through hash-table lookup (lu_object_find()), + * or LRU scanning (lu_site_purge()), that are done under hash-table + * and LRU lock, no race with concurrent object lookup is possible + * and we can safely destroy object below. + */ + cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + /* + * Object was already removed from hash and lru above, can + * kill it. + */ + lu_object_free(env, orig); } EXPORT_SYMBOL(lu_object_put); @@ -160,7 +169,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, * after this point. */ LASSERT(fid_is_igif(f) || fid_ver(f) == 0); - top->lo_header->loh_fid = *f; + top->lo_header->loh_fid = *f; layers = &top->lo_header->loh_layers; do { /* @@ -192,7 +201,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, } } - dev->ld_site->ls_stats.s_created ++; + lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED); RETURN(top); } @@ -201,13 +210,15 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - cfs_list_t splice; - struct lu_object *scan; - struct lu_site *site; - cfs_list_t *layers; + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + cfs_list_t *layers; + cfs_list_t splice; site = o->lo_dev->ld_site; layers = &o->lo_header->loh_layers; + bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid); /* * First call ->loo_object_delete() method to release all resources. */ @@ -235,7 +246,9 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } - cfs_waitq_broadcast(&site->ls_marche_funebre); + + if (cfs_waitq_active(&bkt->lsb_marche_funebre)) + cfs_waitq_broadcast(&bkt->lsb_marche_funebre); } /** @@ -243,47 +256,78 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) */ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) { - cfs_list_t dispose; struct lu_object_header *h; struct lu_object_header *temp; + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + cfs_hash_bd_t bd2; + cfs_list_t dispose; + int did_sth; + int start; + int count; + int bnr; + int i; CFS_INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - cfs_write_lock(&s->ls_guard); - cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) { + start = s->ls_purge_start; + bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; + again: + did_sth = 0; + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + if (i < start) + continue; + count = bnr; + cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + + cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + LASSERT(cfs_atomic_read(&h->loh_ref) == 0); + + cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); + LASSERT(bd.bd_bucket == bd2.bd_bucket); + + cfs_hash_bd_del_locked(s->ls_obj_hash, + &bd2, &h->loh_hash); + cfs_list_move(&h->loh_lru, &dispose); + if (did_sth == 0) + did_sth = 1; + + if (nr != ~0 && --nr == 0) + break; + + if (count > 0 && --count == 0) + break; + + } + cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); + cfs_cond_resched(); /* - * Objects are sorted in lru order, and "busy" objects (ones - * with h->loh_ref > 0) naturally tend to live near hot end - * that we scan last. Unfortunately, sites usually have small - * (less then ten) number of busy yet rarely accessed objects - * (some global objects, accessed directly through pointers, - * bypassing hash table). Currently algorithm scans them over - * and over again. Probably we should move busy objects out of - * LRU, or we can live with that. + * Free everything on the dispose list. This is safe against + * races due to the reasons described in lu_object_put(). */ - if (nr-- == 0) + while (!cfs_list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + cfs_list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } + + if (nr == 0) break; - if (cfs_atomic_read(&h->loh_ref) > 0) - continue; - cfs_hlist_del_init(&h->loh_hash); - cfs_list_move(&h->loh_lru, &dispose); - s->ls_total --; } - cfs_write_unlock(&s->ls_guard); - /* - * Free everything on the dispose list. This is safe against races due - * to the reasons described in lu_object_put(). - */ - while (!cfs_list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - cfs_list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - s->ls_stats.s_lru_purged ++; + + if (nr != 0 && did_sth && start != 0) { + start = 0; /* restart from the first bucket */ + goto again; } + /* race on s->ls_purge_start, but nobody cares */ + s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); + return nr; } EXPORT_SYMBOL(lu_site_purge); @@ -310,7 +354,7 @@ enum { * * XXX overflow is not handled correctly. */ - LU_CDEBUG_LINE = 256 + LU_CDEBUG_LINE = 512 }; struct lu_cdebug_data { @@ -339,8 +383,8 @@ struct lu_context_key lu_global_key = { int lu_cdebug_printer(const struct lu_env *env, void *cookie, const char *format, ...) { - struct lu_cdebug_print_info *info = cookie; - struct lu_cdebug_data *key; + struct libcfs_debug_msg_data *msgdata = cookie; + struct lu_cdebug_data *key; int used; int complete; va_list args; @@ -358,10 +402,8 @@ int lu_cdebug_printer(const struct lu_env *env, vsnprintf(key->lck_area + used, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { - if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys)) - libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask, - (char *)info->lpi_file, info->lpi_fn, - info->lpi_line, "%s", key->lck_area); + if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) + libcfs_debug_msg(msgdata, "%s", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -433,52 +475,48 @@ int lu_object_invariant(const struct lu_object *o) EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - const cfs_hlist_head_t *bucket, + cfs_hash_bd_t *bd, const struct lu_fid *f, - cfs_waitlink_t *waiter) + cfs_waitlink_t *waiter, + __u64 *version) { + struct lu_site_bkt_data *bkt; struct lu_object_header *h; - cfs_hlist_node_t *scan; - - cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) { - s->ls_stats.s_cache_check ++; - if (likely(lu_fid_eq(&h->loh_fid, f))) { - if (unlikely(lu_object_is_dying(h))) { - /* - * Lookup found an object being destroyed; - * this object cannot be returned (to assure - * that references to dying objects are - * eventually drained), and moreover, lookup - * has to wait until object is freed. - */ - cfs_waitlink_init(waiter); - cfs_waitq_add(&s->ls_marche_funebre, waiter); - cfs_set_current_state(CFS_TASK_UNINT); - s->ls_stats.s_cache_death_race ++; - return ERR_PTR(-EAGAIN); - } - /* bump reference count... */ - if (cfs_atomic_add_return(1, &h->loh_ref) == 1) - ++ s->ls_busy; - /* and move to the head of the LRU */ - /* - * XXX temporary disable this to measure effects of - * read-write locking. - */ - /* list_move_tail(&h->loh_lru, &s->ls_lru); */ - s->ls_stats.s_cache_hit ++; - return lu_object_top(h); - } + cfs_hlist_node_t *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); + + if (*version == ver) + return NULL; + + *version = ver; + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); + /* cfs_hash_bd_lookup_intent is a somehow "internal" function + * of cfs_hash, but we don't want refcount on object right now */ + hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return NULL; } - s->ls_stats.s_cache_miss ++; - return NULL; -} -static __u32 fid_hash(const struct lu_fid *f, int bits) -{ - /* all objects with same id and different versions will belong to same - * collisions list. */ - return cfs_hash_long(fid_flatten(f), bits); + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (likely(!lu_object_is_dying(h))) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + cfs_list_del_init(&h->loh_lru); + return lu_object_top(h); + } + + /* + * Lookup found an object being destroyed this object cannot be + * returned (to assure that references to dying objects are eventually + * drained), and moreover, lookup has to wait until object is freed. + */ + cfs_atomic_dec(&h->loh_ref); + + cfs_waitlink_init(waiter); + cfs_waitq_add(&bkt->lsb_marche_funebre, waiter); + cfs_set_current_state(CFS_TASK_UNINT); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); + return ERR_PTR(-EAGAIN); } /** @@ -494,6 +532,29 @@ struct lu_object *lu_object_find(const struct lu_env *env, } EXPORT_SYMBOL(lu_object_find); +static struct lu_object *lu_object_new(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_object *o; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + struct lu_site_bkt_data *bkt; + + o = lu_object_alloc(env, dev, f, conf); + if (unlikely(IS_ERR(o))) + return o; + + hs = dev->ld_site->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + return o; +} + /** * Core logic of lu_object_find*() functions. */ @@ -503,10 +564,12 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, const struct lu_object_conf *conf, cfs_waitlink_t *waiter) { - struct lu_site *s; struct lu_object *o; struct lu_object *shadow; - cfs_hlist_head_t *bucket; + struct lu_site *s; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; /* * This uses standard index maintenance protocol: @@ -521,17 +584,21 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * - unlock index; * - return object. * + * For "LOC_F_NEW" case, we are sure the object is new established. + * It is unnecessary to perform lookup-alloc-lookup-insert, instead, + * just alloc and insert directly. + * * If dying object is found during index search, add @waiter to the * site wait-queue and return ERR_PTR(-EAGAIN). */ - - s = dev->ld_site; - bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits); - - cfs_read_lock(&s->ls_guard); - o = htable_lookup(s, bucket, f, waiter); - cfs_read_unlock(&s->ls_guard); - + if (conf != NULL && conf->loc_flags & LOC_F_NEW) + return lu_object_new(env, dev, f, conf); + + s = dev->ld_site; + hs = s->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + o = htable_lookup(s, &bd, f, waiter, &version); + cfs_hash_bd_unlock(hs, &bd, 1); if (o != NULL) return o; @@ -545,20 +612,22 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, LASSERT(lu_fid_eq(lu_object_fid(o), f)); - cfs_write_lock(&s->ls_guard); - shadow = htable_lookup(s, bucket, f, waiter); + cfs_hash_bd_lock(hs, &bd, 1); + + shadow = htable_lookup(s, &bd, f, waiter, &version); if (likely(shadow == NULL)) { - cfs_hlist_add_head(&o->lo_header->loh_hash, bucket); - cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru); - ++ s->ls_busy; - ++ s->ls_total; - shadow = o; - o = NULL; - } else - s->ls_stats.s_cache_race ++; - cfs_write_unlock(&s->ls_guard); - if (o != NULL) - lu_object_free(env, o); + struct lu_site_bkt_data *bkt; + + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + return o; + } + + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE); + cfs_hash_bd_unlock(hs, &bd, 1); + lu_object_free(env, o); return shadow; } @@ -572,22 +641,22 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, const struct lu_fid *f, const struct lu_object_conf *conf) { - struct lu_object *obj; - cfs_waitlink_t wait; + struct lu_site_bkt_data *bkt; + struct lu_object *obj; + cfs_waitlink_t wait; while (1) { obj = lu_object_find_try(env, dev, f, conf, &wait); - if (obj == ERR_PTR(-EAGAIN)) { - /* - * lu_object_find_try() already added waiter into the - * wait queue. - */ - cfs_waitq_wait(&wait, CFS_TASK_UNINT); - cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait); - } else - break; + if (obj != ERR_PTR(-EAGAIN)) + return obj; + /* + * lu_object_find_try() already added waiter into the + * wait queue. + */ + cfs_waitq_wait(&wait, CFS_TASK_UNINT); + bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); + cfs_waitq_del(&bkt->lsb_marche_funebre, &wait); } - return obj; } EXPORT_SYMBOL(lu_object_find_at); @@ -652,45 +721,65 @@ EXPORT_SYMBOL(lu_types_stop); * Global list of all sites on this node */ static CFS_LIST_HEAD(lu_sites); -static CFS_DECLARE_MUTEX(lu_sites_guard); +static CFS_DEFINE_MUTEX(lu_sites_guard); /** * Global environment used by site shrinker. */ static struct lu_env lu_shrink_env; +struct lu_site_print_arg { + struct lu_env *lsp_env; + void *lsp_cookie; + lu_printer_t lsp_printer; +}; + +static int +lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, + cfs_hlist_node_t *hnode, void *data) +{ + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; + + h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!cfs_list_empty(&h->loh_layers)) { + const struct lu_object *o; + + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; +} + /** * Print all objects in \a s. */ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, lu_printer_t printer) { - int i; - - for (i = 0; i < s->ls_hash_size; ++i) { - struct lu_object_header *h; - cfs_hlist_node_t *scan; - - cfs_read_lock(&s->ls_guard); - cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) { + struct lu_site_print_arg arg = { + .lsp_env = (struct lu_env *)env, + .lsp_cookie = cookie, + .lsp_printer = printer, + }; - if (!cfs_list_empty(&h->loh_layers)) { - const struct lu_object *obj; - - obj = lu_object_top(h); - lu_object_print(env, cookie, printer, obj); - } else - lu_object_header_print(env, cookie, printer, h); - } - cfs_read_unlock(&s->ls_guard); - } + cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg); } EXPORT_SYMBOL(lu_site_print); enum { - LU_CACHE_PERCENT = 20, + LU_CACHE_PERCENT_MAX = 50, + LU_CACHE_PERCENT_DEFAULT = 20 }; +static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; +CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, + "Percentage of memory to be used as lu_object cache"); + /** * Return desired hash table order. */ @@ -714,7 +803,16 @@ static int lu_htable_order(void) cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; #endif - cache_size = cache_size / 100 * LU_CACHE_PERCENT * + /* clear off unreasonable cache setting. */ + if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) { + CWARN("obdclass: invalid lu_cache_percent: %u, it must be in" + " the range of (0, %u]. Will use default value: %u.\n", + lu_cache_percent, LU_CACHE_PERCENT_MAX, + LU_CACHE_PERCENT_DEFAULT); + + lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; + } + cache_size = cache_size / 100 * lu_cache_percent * (CFS_PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { @@ -723,45 +821,156 @@ static int lu_htable_order(void) return bits; } -static cfs_lock_class_key_t lu_site_guard_class; +static unsigned lu_obj_hop_hash(cfs_hash_t *hs, + const void *key, unsigned mask) +{ + struct lu_fid *fid = (struct lu_fid *)key; + __u32 hash; + + hash = fid_flatten32(fid); + hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ + hash = cfs_hash_long(hash, hs->hs_bkt_bits); + + /* give me another random factor */ + hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); + + hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; + hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); + + return hash & mask; +} + +static void *lu_obj_hop_object(cfs_hlist_node_t *hnode) +{ + return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); +} + +static void *lu_obj_hop_key(cfs_hlist_node_t *hnode) +{ + struct lu_object_header *h; + + h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; +} + +static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode) +{ + struct lu_object_header *h; + + h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); +} + +static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +{ + struct lu_object_header *h; + + h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + if (cfs_atomic_add_return(1, &h->loh_ref) == 1) { + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + + cfs_hash_bd_get(hs, &h->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(hs, &bd); + bkt->lsb_busy++; + } +} + +static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +{ + LBUG(); /* we should never called it */ +} + +cfs_hash_ops_t lu_site_hash_ops = { + .hs_hash = lu_obj_hop_hash, + .hs_key = lu_obj_hop_key, + .hs_keycmp = lu_obj_hop_keycmp, + .hs_object = lu_obj_hop_object, + .hs_get = lu_obj_hop_get, + .hs_put_locked = lu_obj_hop_put_locked, +}; /** * Initialize site \a s, with \a d as the top level device. */ +#define LU_SITE_BITS_MIN 12 +#define LU_SITE_BITS_MAX 24 +/** + * total 256 buckets, we don't want too many buckets because: + * - consume too much memory + * - avoid unbalanced LRU list + */ +#define LU_SITE_BKT_BITS 8 + int lu_site_init(struct lu_site *s, struct lu_device *top) { + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + char name[16]; int bits; - int size; int i; ENTRY; memset(s, 0, sizeof *s); - cfs_rwlock_init(&s->ls_guard); - cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class); - CFS_INIT_LIST_HEAD(&s->ls_lru); + bits = lu_htable_order(); + snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); + for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX); + bits >= LU_SITE_BITS_MIN; bits--) { + s->ls_obj_hash = cfs_hash_create(name, bits, bits, + bits - LU_SITE_BKT_BITS, + sizeof(*bkt), 0, 0, + &lu_site_hash_ops, + CFS_HASH_SPIN_BKTLOCK | + CFS_HASH_NO_ITEMREF | + CFS_HASH_DEPTH | + CFS_HASH_ASSERT_EMPTY); + if (s->ls_obj_hash != NULL) + break; + } + + if (s->ls_obj_hash == NULL) { + CERROR("failed to create lu_site hash with bits: %d\n", bits); + return -ENOMEM; + } + + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + CFS_INIT_LIST_HEAD(&bkt->lsb_lru); + cfs_waitq_init(&bkt->lsb_marche_funebre); + } + + s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); + if (s->ls_stats == NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; + return -ENOMEM; + } + + lprocfs_counter_init(s->ls_stats, LU_SS_CREATED, + 0, "created", "created"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT, + 0, "cache_hit", "cache_hit"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS, + 0, "cache_miss", "cache_miss"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE, + 0, "cache_race", "cache_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE, + 0, "cache_death_race", "cache_death_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, + 0, "lru_purged", "lru_purged"); + CFS_INIT_LIST_HEAD(&s->ls_linkage); - cfs_waitq_init(&s->ls_marche_funebre); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); lu_ref_add(&top->ld_reference, "site-top", s); - for (bits = lu_htable_order(), size = 1 << bits; - (s->ls_hash = - cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL; - --bits, size >>= 1) { - /* - * Scale hash table down, until allocation succeeds. - */ - ; - } - - s->ls_hash_size = size; - s->ls_hash_bits = bits; - s->ls_hash_mask = size - 1; + CFS_INIT_LIST_HEAD(&s->ls_ld_linkage); + cfs_spin_lock_init(&s->ls_ld_lock); - for (i = 0; i < size; i++) - CFS_INIT_HLIST_HEAD(&s->ls_hash[i]); + cfs_spin_lock(&s->ls_ld_lock); + cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage); + cfs_spin_unlock(&s->ls_ld_lock); RETURN(0); } @@ -772,26 +981,24 @@ EXPORT_SYMBOL(lu_site_init); */ void lu_site_fini(struct lu_site *s) { - LASSERT(cfs_list_empty(&s->ls_lru)); - LASSERT(s->ls_total == 0); - - cfs_down(&lu_sites_guard); + cfs_mutex_lock(&lu_sites_guard); cfs_list_del_init(&s->ls_linkage); - cfs_up(&lu_sites_guard); - - if (s->ls_hash != NULL) { - int i; - for (i = 0; i < s->ls_hash_size; i++) - LASSERT(cfs_hlist_empty(&s->ls_hash[i])); - cfs_free_large(s->ls_hash); - s->ls_hash = NULL; + cfs_mutex_unlock(&lu_sites_guard); + + if (s->ls_obj_hash != NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; } + if (s->ls_top_dev != NULL) { s->ls_top_dev->ld_site = NULL; lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } + + if (s->ls_stats != NULL) + lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); @@ -801,11 +1008,11 @@ EXPORT_SYMBOL(lu_site_fini); int lu_site_init_finish(struct lu_site *s) { int result; - cfs_down(&lu_sites_guard); + cfs_mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) cfs_list_add(&s->ls_linkage, &lu_sites); - cfs_up(&lu_sites_guard); + cfs_mutex_unlock(&lu_sites_guard); return result; } EXPORT_SYMBOL(lu_site_init_finish); @@ -840,6 +1047,7 @@ int lu_device_init(struct lu_device *d, struct lu_device_type *t) cfs_atomic_set(&d->ld_ref, 0); d->ld_type = t; lu_ref_init(&d->ld_reference); + CFS_INIT_LIST_HEAD(&d->ld_linkage); return 0; } EXPORT_SYMBOL(lu_device_init); @@ -994,13 +1202,13 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) /* purge again. */ lu_site_purge(env, site, ~0); - if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) { + if (!cfs_hash_is_empty(site->ls_obj_hash)) { /* * Uh-oh, objects still exist. */ - static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR); + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL); - lu_site_print(env, site, &cookie, lu_cdebug_printer); + lu_site_print(env, site, &msgdata, lu_cdebug_printer); } for (scan = top; scan != NULL; scan = next) { @@ -1268,6 +1476,7 @@ static int keys_fill(struct lu_context *ctx) { int i; + LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -1391,15 +1600,61 @@ EXPORT_SYMBOL(lu_context_exit); /** * Allocate for context all missing keys that were registered after context - * creation. + * creation. key_set_version is only changed in rare cases when modules + * are loaded and removed. */ int lu_context_refill(struct lu_context *ctx) { - LINVRNT(ctx->lc_value != NULL); - return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx); + return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); } EXPORT_SYMBOL(lu_context_refill); +/** + * lu_ctx_tags/lu_ses_tags will be updated if there are new types of + * obd being added. Currently, this is only used on client side, specifically + * for echo device client, for other stack (like ptlrpc threads), context are + * predefined when the lu_device type are registered, during the module probe + * phase. + */ +__u32 lu_context_tags_default = 0; +__u32 lu_session_tags_default = 0; + +void lu_context_tags_update(__u32 tags) +{ + cfs_spin_lock(&lu_keys_guard); + lu_context_tags_default |= tags; + key_set_version ++; + cfs_spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_update); + +void lu_context_tags_clear(__u32 tags) +{ + cfs_spin_lock(&lu_keys_guard); + lu_context_tags_default &= ~tags; + key_set_version ++; + cfs_spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_clear); + +void lu_session_tags_update(__u32 tags) +{ + cfs_spin_lock(&lu_keys_guard); + lu_session_tags_default |= tags; + key_set_version ++; + cfs_spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_session_tags_update); + +void lu_session_tags_clear(__u32 tags) +{ + cfs_spin_lock(&lu_keys_guard); + lu_session_tags_default &= ~tags; + key_set_version ++; + cfs_spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_session_tags_clear); + int lu_env_init(struct lu_env *env, __u32 tags) { int result; @@ -1431,26 +1686,91 @@ int lu_env_refill(struct lu_env *env) } EXPORT_SYMBOL(lu_env_refill); +/** + * Currently, this API will only be used by echo client. + * Because echo client and normal lustre client will share + * same cl_env cache. So echo client needs to refresh + * the env context after it get one from the cache, especially + * when normal client and echo client co-exist in the same client. + */ +int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, + __u32 stags) +{ + int result; + + if ((env->le_ctx.lc_tags & ctags) != ctags) { + env->le_ctx.lc_version = 0; + env->le_ctx.lc_tags |= ctags; + } + + if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) { + env->le_ses->lc_version = 0; + env->le_ses->lc_tags |= stags; + } + + result = lu_env_refill(env); + + return result; +} +EXPORT_SYMBOL(lu_env_refill_by_tags); + static struct cfs_shrinker *lu_site_shrinker = NULL; +typedef struct lu_site_stats{ + unsigned lss_populated; + unsigned lss_max_search; + unsigned lss_total; + unsigned lss_busy; +} lu_site_stats_t; + +static void lu_site_stats_get(cfs_hash_t *hs, + lu_site_stats_t *stats, int populated) +{ + cfs_hash_bd_t bd; + int i; + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hlist_head_t *hhead; + + cfs_hash_bd_lock(hs, &bd, 1); + stats->lss_busy += bkt->lsb_busy; + stats->lss_total += cfs_hash_bd_count_get(&bd); + stats->lss_max_search = max((int)stats->lss_max_search, + cfs_hash_bd_depmax_get(&bd)); + if (!populated) { + cfs_hash_bd_unlock(hs, &bd, 1); + continue; + } + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + if (!cfs_hlist_empty(hhead)) + stats->lss_populated++; + } + cfs_hash_bd_unlock(hs, &bd, 1); + } +} + #ifdef __KERNEL__ -static int lu_cache_shrink(int nr, unsigned int gfp_mask) + +static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { + lu_site_stats_t stats; struct lu_site *s; struct lu_site *tmp; int cached = 0; - int remain = nr; + int remain = shrink_param(sc, nr_to_scan); CFS_LIST_HEAD(splice); - if (nr != 0) { - if (!(gfp_mask & __GFP_FS)) + if (remain != 0) { + if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) return -1; - CDEBUG(D_INODE, "Shrink %d objects\n", nr); + CDEBUG(D_INODE, "Shrink %d objects\n", remain); } - cfs_down(&lu_sites_guard); + cfs_mutex_lock(&lu_sites_guard); cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - if (nr != 0) { + if (shrink_param(sc, nr_to_scan) != 0) { remain = lu_site_purge(&lu_shrink_env, s, remain); /* * Move just shrunk site to the tail of site list to @@ -1458,17 +1778,18 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask) */ cfs_list_move_tail(&s->ls_linkage, &splice); } - cfs_read_lock(&s->ls_guard); - cached += s->ls_total - s->ls_busy; - cfs_read_unlock(&s->ls_guard); - if (nr && remain <= 0) + + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 0); + cached += stats.lss_total - stats.lss_busy; + if (shrink_param(sc, nr_to_scan) && remain <= 0) break; } cfs_list_splice(&splice, lu_sites.prev); - cfs_up(&lu_sites_guard); + cfs_mutex_unlock(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; - if (nr == 0) + if (shrink_param(sc, nr_to_scan) == 0) CDEBUG(D_INODE, "%d objects cached\n", cached); return cached; } @@ -1510,7 +1831,7 @@ void lu_context_keys_dump(void) key = lu_keys[i]; if (key != NULL) { - CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n", + CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n", i, key, key->lct_tags, key->lct_init, key->lct_fini, key->lct_exit, key->lct_index, cfs_atomic_read(&key->lct_used), @@ -1546,7 +1867,7 @@ int lu_global_init(void) { int result; - CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys); + CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); result = lu_ref_global_init(); if (result != 0) @@ -1561,9 +1882,9 @@ int lu_global_init(void) * conservatively. This should not be too bad, because this * environment is global. */ - cfs_down(&lu_sites_guard); + cfs_mutex_lock(&lu_sites_guard); result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); - cfs_up(&lu_sites_guard); + cfs_mutex_unlock(&lu_sites_guard); if (result != 0) return result; @@ -1617,9 +1938,9 @@ void lu_global_fini(void) * Tear shrinker environment down _after_ de-registering * lu_global_key, because the latter has a value in the former. */ - cfs_down(&lu_sites_guard); + cfs_mutex_lock(&lu_sites_guard); lu_env_fini(&lu_shrink_env); - cfs_up(&lu_sites_guard); + cfs_mutex_unlock(&lu_sites_guard); lu_ref_global_fini(); } @@ -1630,34 +1951,41 @@ struct lu_buf LU_BUF_NULL = { }; EXPORT_SYMBOL(LU_BUF_NULL); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) +{ +#ifdef LPROCFS + struct lprocfs_counter ret; + + lprocfs_stats_collect(stats, idx, &ret); + return (__u32)ret.lc_count; +#else + return 0; +#endif +} + /** * Output site statistical counters into a buffer. Suitable for * lprocfs_rd_*()-style functions. */ int lu_site_stats_print(const struct lu_site *s, char *page, int count) { - int i; - int populated; + lu_site_stats_t stats; - /* - * How many hash buckets are not-empty? Don't bother with locks: it's - * an estimation anyway. - */ - for (i = 0, populated = 0; i < s->ls_hash_size; i++) - populated += !cfs_hlist_empty(&s->ls_hash[i]); - - return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n", - s->ls_total, - s->ls_busy, - populated, - s->ls_hash_size, - s->ls_stats.s_created, - s->ls_stats.s_cache_hit, - s->ls_stats.s_cache_miss, - s->ls_stats.s_cache_check, - s->ls_stats.s_cache_race, - s->ls_stats.s_cache_death_race, - s->ls_stats.s_lru_purged); + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); } EXPORT_SYMBOL(lu_site_stats_print); @@ -1674,13 +2002,16 @@ EXPORT_SYMBOL(lu_time_names); int lu_kmem_init(struct lu_kmem_descr *caches) { int result; + struct lu_kmem_descr *iter = caches; - for (result = 0; caches->ckd_cache != NULL; ++caches) { - *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name, - caches->ckd_size, - 0, 0); - if (*caches->ckd_cache == NULL) { + for (result = 0; iter->ckd_cache != NULL; ++iter) { + *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0); + if (*iter->ckd_cache == NULL) { result = -ENOMEM; + /* free all previously allocated caches */ + lu_kmem_fini(caches); break; } }