X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=3e17c051ceed592338ec65410bf80ae30263dba6;hp=f673e9dd629345a9b8f680344a9c38fb2a7450f0;hb=d11360f4cc5d38cd748a97ca05e10121353ae616;hpb=2711e4428bc9b1574202ccd8c566557fb102424a diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index f673e9d..3e17c05 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -45,13 +45,8 @@ #define DEBUG_SUBSYSTEM S_CLASS #include - -#ifdef __KERNEL__ -# include -#endif - -/* hash_long() */ -#include +#include +#include /* hash_long() */ #include #include #include @@ -60,6 +55,36 @@ #include #include +enum { + LU_CACHE_PERCENT_MAX = 50, + LU_CACHE_PERCENT_DEFAULT = 20 +}; + +#define LU_CACHE_NR_MAX_ADJUST 128 +#define LU_CACHE_NR_UNLIMITED -1 +#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED +#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED +/** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */ +#define LU_CACHE_NR_ZFS_LIMIT 10240 + +#define LU_SITE_BITS_MIN 12 +#define LU_SITE_BITS_MAX 24 +/** + * total 256 buckets, we don't want too many buckets because: + * - consume too much memory + * - avoid unbalanced LRU list + */ +#define LU_SITE_BKT_BITS 8 + + +static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; +CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, + "Percentage of memory to be used as lu_object cache"); + +static long lu_cache_nr = LU_CACHE_NR_DEFAULT; +CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, + "Maximum number of objects in lu_object cache"); + static void lu_object_free(const struct lu_env *env, struct lu_object *o); /** @@ -89,10 +114,10 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) if (fid_is_zero(fid)) { LASSERT(top->loh_hash.next == NULL && top->loh_hash.pprev == NULL); - LASSERT(cfs_list_empty(&top->loh_lru)); - if (!cfs_atomic_dec_and_test(&top->loh_ref)) + LASSERT(list_empty(&top->loh_lru)); + if (!atomic_dec_and_test(&top->loh_ref)) return; - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } @@ -103,17 +128,17 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); - if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { - if (lu_object_is_dying(top)) { + if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { + if (lu_object_is_dying(top)) { - /* - * somebody may be waiting for this, currently only - * used for cl_object, see cl_object_put_last(). - */ - cfs_waitq_broadcast(&bkt->lsb_marche_funebre); - } - return; - } + /* + * somebody may be waiting for this, currently only + * used for cl_object, see cl_object_put_last(). + */ + wake_up_all(&bkt->lsb_marche_funebre); + } + return; + } LASSERT(bkt->lsb_busy > 0); bkt->lsb_busy--; @@ -121,14 +146,15 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * When last reference is released, iterate over object * layers, and notify them that object is no longer busy. */ - cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_release != NULL) o->lo_ops->loo_object_release(env, o); } - if (!lu_object_is_dying(top)) { - LASSERT(cfs_list_empty(&top->loh_lru)); - cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru); + if (!lu_object_is_dying(top) && + (lu_object_exists(orig) || lu_object_is_cl(orig))) { + LASSERT(list_empty(&top->loh_lru)); + list_add_tail(&top->loh_lru, &bkt->lsb_lru); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); return; } @@ -181,7 +207,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o) cfs_hash_bd_t bd; cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); - cfs_list_del_init(&top->loh_lru); + list_del_init(&top->loh_lru); cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(obj_hash, &bd, 1); } @@ -195,16 +221,18 @@ EXPORT_SYMBOL(lu_object_unhash); * struct lu_device_operations definition. */ static struct lu_object *lu_object_alloc(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_object *scan; - struct lu_object *top; - cfs_list_t *layers; - int clean; - int result; - ENTRY; + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_object *scan; + struct lu_object *top; + struct list_head *layers; + unsigned int init_mask = 0; + unsigned int init_flag; + int clean; + int result; + ENTRY; /* * Create top-level object slice. This will also create @@ -221,27 +249,31 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ top->lo_header->loh_fid = *f; layers = &top->lo_header->loh_layers; - do { - /* - * Call ->loo_object_init() repeatedly, until no more new - * object slices are created. - */ - clean = 1; - cfs_list_for_each_entry(scan, layers, lo_linkage) { - if (scan->lo_flags & LU_OBJECT_ALLOCATED) - continue; - clean = 0; - scan->lo_header = top->lo_header; - result = scan->lo_ops->loo_object_init(env, scan, conf); - if (result != 0) { - lu_object_free(env, top); - RETURN(ERR_PTR(result)); - } - scan->lo_flags |= LU_OBJECT_ALLOCATED; - } - } while (!clean); - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + do { + /* + * Call ->loo_object_init() repeatedly, until no more new + * object slices are created. + */ + clean = 1; + init_flag = 1; + list_for_each_entry(scan, layers, lo_linkage) { + if (init_mask & init_flag) + goto next; + clean = 0; + scan->lo_header = top->lo_header; + result = scan->lo_ops->loo_object_init(env, scan, conf); + if (result != 0) { + lu_object_free(env, top); + RETURN(ERR_PTR(result)); + } + init_mask |= init_flag; +next: + init_flag <<= 1; + } + } while (!clean); + + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_start != NULL) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { @@ -260,11 +292,11 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - struct lu_site_bkt_data *bkt; - struct lu_site *site; - struct lu_object *scan; - cfs_list_t *layers; - cfs_list_t splice; + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + struct list_head *layers; + struct list_head splice; site = o->lo_dev->ld_site; layers = &o->lo_header->loh_layers; @@ -272,7 +304,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) /* * First call ->loo_object_delete() method to release all resources. */ - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_delete != NULL) scan->lo_ops->loo_object_delete(env, scan); } @@ -283,22 +315,22 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * necessary, because lu_object_header is freed together with the * top-level slice. */ - CFS_INIT_LIST_HEAD(&splice); - cfs_list_splice_init(layers, &splice); - while (!cfs_list_empty(&splice)) { - /* - * Free layers in bottom-to-top order, so that object header - * lives as long as possible and ->loo_object_free() methods - * can look at its contents. - */ - o = container_of0(splice.prev, struct lu_object, lo_linkage); - cfs_list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); - o->lo_ops->loo_object_free(env, o); - } + INIT_LIST_HEAD(&splice); + list_splice_init(layers, &splice); + while (!list_empty(&splice)) { + /* + * Free layers in bottom-to-top order, so that object header + * lives as long as possible and ->loo_object_free() methods + * can look at its contents. + */ + o = container_of0(splice.prev, struct lu_object, lo_linkage); + list_del_init(&o->lo_linkage); + LASSERT(o->lo_ops->loo_object_free != NULL); + o->lo_ops->loo_object_free(env, o); + } - if (cfs_waitq_active(&bkt->lsb_marche_funebre)) - cfs_waitq_broadcast(&bkt->lsb_marche_funebre); + if (waitqueue_active(&bkt->lsb_marche_funebre)) + wake_up_all(&bkt->lsb_marche_funebre); } /** @@ -311,24 +343,29 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) struct lu_site_bkt_data *bkt; cfs_hash_bd_t bd; cfs_hash_bd_t bd2; - cfs_list_t dispose; - int did_sth; - int start; + struct list_head dispose; + int did_sth; + unsigned int start; int count; int bnr; - int i; + unsigned int i; if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) RETURN(0); - CFS_INIT_LIST_HEAD(&dispose); + INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ start = s->ls_purge_start; - bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; + bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1; again: + /* + * It doesn't make any sense to make purge threads parallel, that can + * only bring troubles to us. See LU-5331. + */ + mutex_lock(&s->ls_purge_mutex); did_sth = 0; cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { if (i < start) @@ -337,15 +374,15 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { - LASSERT(cfs_atomic_read(&h->loh_ref) == 0); + list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + LASSERT(atomic_read(&h->loh_ref) == 0); cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); LASSERT(bd.bd_bucket == bd2.bd_bucket); cfs_hash_bd_del_locked(s->ls_obj_hash, &bd2, &h->loh_hash); - cfs_list_move(&h->loh_lru, &dispose); + list_move(&h->loh_lru, &dispose); if (did_sth == 0) did_sth = 1; @@ -355,24 +392,25 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) if (count > 0 && --count == 0) break; - } - cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); - cfs_cond_resched(); - /* - * Free everything on the dispose list. This is safe against - * races due to the reasons described in lu_object_put(). - */ - while (!cfs_list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - cfs_list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); - } + } + cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); + cond_resched(); + /* + * Free everything on the dispose list. This is safe against + * races due to the reasons described in lu_object_put(). + */ + while (!list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } if (nr == 0) break; } + mutex_unlock(&s->ls_purge_mutex); if (nr != 0 && did_sth && start != 0) { start = 0; /* restart from the first bucket */ @@ -424,11 +462,11 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); * Key, holding temporary buffer. This key is registered very early by * lu_global_init(). */ -struct lu_context_key lu_global_key = { - .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | - LCT_MG_THREAD | LCT_CL_THREAD, - .lct_init = lu_global_key_init, - .lct_fini = lu_global_key_fini +static struct lu_context_key lu_global_key = { + .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | + LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, + .lct_init = lu_global_key_init, + .lct_fini = lu_global_key_fini }; /** @@ -456,8 +494,8 @@ int lu_cdebug_printer(const struct lu_env *env, vsnprintf(key->lck_area + used, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { - if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s", key->lck_area); + if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -472,13 +510,13 @@ void lu_object_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct lu_object_header *hdr) { - (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", - hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \ - "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); + (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", + hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), + PFID(&hdr->loh_fid), + hlist_unhashed(&hdr->loh_hash) ? "" : " hash", + list_empty((struct list_head *)&hdr->loh_lru) ? \ + "" : " lru", + hdr->loh_attr & LOHA_EXISTS ? " exist" : ""); } EXPORT_SYMBOL(lu_object_header_print); @@ -486,28 +524,30 @@ EXPORT_SYMBOL(lu_object_header_print); * Print human readable representation of the \a o to the \a printer. */ void lu_object_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct lu_object *o) + lu_printer_t printer, const struct lu_object *o) { - static const char ruler[] = "........................................"; - struct lu_object_header *top; - int depth; + static const char ruler[] = "........................................"; + struct lu_object_header *top; + int depth = 4; - top = o->lo_header; - lu_object_header_print(env, cookie, printer, top); - (*printer)(env, cookie, "{ \n"); - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { - depth = o->lo_depth + 4; + top = o->lo_header; + lu_object_header_print(env, cookie, printer, top); + (*printer)(env, cookie, "{\n"); - /* - * print `.' \a depth times followed by type name and address - */ - (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, - o->lo_dev->ld_type->ldt_name, o); - if (o->lo_ops->loo_object_print != NULL) - o->lo_ops->loo_object_print(env, cookie, printer, o); - (*printer)(env, cookie, "\n"); - } - (*printer)(env, cookie, "} header@%p\n", top); + list_for_each_entry(o, &top->loh_layers, lo_linkage) { + /* + * print `.' \a depth times followed by type name and address + */ + (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, + o->lo_dev->ld_type->ldt_name, o); + + if (o->lo_ops->loo_object_print != NULL) + (*o->lo_ops->loo_object_print)(env, cookie, printer, o); + + (*printer)(env, cookie, "\n"); + } + + (*printer)(env, cookie, "} header@%p\n", top); } EXPORT_SYMBOL(lu_object_print); @@ -519,7 +559,7 @@ int lu_object_invariant(const struct lu_object *o) struct lu_object_header *top; top = o->lo_header; - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_invariant != NULL && !o->lo_ops->loo_object_invariant(o)) return 0; @@ -529,18 +569,18 @@ int lu_object_invariant(const struct lu_object *o) EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - cfs_hash_bd_t *bd, - const struct lu_fid *f, - cfs_waitlink_t *waiter, - __u64 *version) + cfs_hash_bd_t *bd, + const struct lu_fid *f, + wait_queue_t *waiter, + __u64 *version) { - struct lu_site_bkt_data *bkt; - struct lu_object_header *h; - cfs_hlist_node_t *hnode; - __u64 ver = cfs_hash_bd_version_get(bd); + struct lu_site_bkt_data *bkt; + struct lu_object_header *h; + struct hlist_node *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); if (*version == ver) - return NULL; + return ERR_PTR(-ENOENT); *version = ver; bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); @@ -549,14 +589,14 @@ static struct lu_object *htable_lookup(struct lu_site *s, hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); if (hnode == NULL) { lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); - return NULL; + return ERR_PTR(-ENOENT); } h = container_of0(hnode, struct lu_object_header, loh_hash); if (likely(!lu_object_is_dying(h))) { cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); - cfs_list_del_init(&h->loh_lru); + list_del_init(&h->loh_lru); return lu_object_top(h); } @@ -566,11 +606,14 @@ static struct lu_object *htable_lookup(struct lu_site *s, * drained), and moreover, lookup has to wait until object is freed. */ - cfs_waitlink_init(waiter); - cfs_waitq_add(&bkt->lsb_marche_funebre, waiter); - cfs_set_current_state(CFS_TASK_UNINT); - lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); - return ERR_PTR(-EAGAIN); + if (likely(waiter != NULL)) { + init_waitqueue_entry_current(waiter); + add_wait_queue(&bkt->lsb_marche_funebre, waiter); + set_current_state(TASK_UNINTERRUPTIBLE); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); + } + + return ERR_PTR(-EAGAIN); } /** @@ -586,6 +629,30 @@ struct lu_object *lu_object_find(const struct lu_env *env, } EXPORT_SYMBOL(lu_object_find); +/* + * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because + * the calculation for the number of objects to reclaim is not covered by + * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST. + * This ensures that many concurrent threads will not accidentally purge + * the entire cache. + */ +static void lu_object_limit(const struct lu_env *env, + struct lu_device *dev) +{ + __u64 size, nr; + + if (lu_cache_nr == LU_CACHE_NR_UNLIMITED) + return; + + size = cfs_hash_size_get(dev->ld_site->ls_obj_hash); + nr = (__u64)lu_cache_nr; + if (size > nr) + lu_site_purge(env, dev->ld_site, + MIN(size - nr, LU_CACHE_NR_MAX_ADJUST)); + + return; +} + static struct lu_object *lu_object_new(const struct lu_env *env, struct lu_device *dev, const struct lu_fid *f, @@ -606,6 +673,9 @@ static struct lu_object *lu_object_new(const struct lu_env *env, cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + return o; } @@ -613,17 +683,17 @@ static struct lu_object *lu_object_new(const struct lu_env *env, * Core logic of lu_object_find*() functions. */ static struct lu_object *lu_object_find_try(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf, - cfs_waitlink_t *waiter) -{ - struct lu_object *o; - struct lu_object *shadow; - struct lu_site *s; - cfs_hash_t *hs; - cfs_hash_bd_t bd; - __u64 version = 0; + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf, + wait_queue_t *waiter) +{ + struct lu_object *o; + struct lu_object *shadow; + struct lu_site *s; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; /* * This uses standard index maintenance protocol: @@ -653,7 +723,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); o = htable_lookup(s, &bd, f, waiter, &version); cfs_hash_bd_unlock(hs, &bd, 1); - if (o != NULL) + if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT) return o; /* @@ -669,13 +739,16 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, cfs_hash_bd_lock(hs, &bd, 1); shadow = htable_lookup(s, &bd, f, waiter, &version); - if (likely(shadow == NULL)) { + if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { struct lu_site_bkt_data *bkt; bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); bkt->lsb_busy++; cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + return o; } @@ -691,26 +764,29 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * objects of different "stacking" to be created within the same site. */ struct lu_object *lu_object_find_at(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_site_bkt_data *bkt; - struct lu_object *obj; - cfs_waitlink_t wait; - - while (1) { - obj = lu_object_find_try(env, dev, f, conf, &wait); - if (obj != ERR_PTR(-EAGAIN)) - return obj; - /* - * lu_object_find_try() already added waiter into the - * wait queue. - */ - cfs_waitq_wait(&wait, CFS_TASK_UNINT); - bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); - cfs_waitq_del(&bkt->lsb_marche_funebre, &wait); - } + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_site_bkt_data *bkt; + struct lu_object *obj; + wait_queue_t wait; + + if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) + return lu_object_find_try(env, dev, f, conf, NULL); + + while (1) { + obj = lu_object_find_try(env, dev, f, conf, &wait); + if (obj != ERR_PTR(-EAGAIN)) + return obj; + /* + * lu_object_find_try() already added waiter into the + * wait queue. + */ + waitq_wait(&wait, TASK_UNINTERRUPTIBLE); + bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); + remove_wait_queue(&bkt->lsb_marche_funebre, &wait); + } } EXPORT_SYMBOL(lu_object_find_at); @@ -722,61 +798,61 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, const struct lu_fid *f, const struct lu_object_conf *conf) { - struct lu_object *top; - struct lu_object *obj; + struct lu_object *top; + struct lu_object *obj; + + top = lu_object_find(env, dev, f, conf); + if (IS_ERR(top)) + return top; + + obj = lu_object_locate(top->lo_header, dev->ld_type); + if (unlikely(obj == NULL)) { + lu_object_put(env, top); + obj = ERR_PTR(-ENOENT); + } - top = lu_object_find(env, dev, f, conf); - if (!IS_ERR(top)) { - obj = lu_object_locate(top->lo_header, dev->ld_type); - if (obj == NULL) - lu_object_put(env, top); - } else - obj = top; - return obj; + return obj; } EXPORT_SYMBOL(lu_object_find_slice); /** * Global list of all device types. */ -static CFS_LIST_HEAD(lu_device_types); +static struct list_head lu_device_types; int lu_device_type_init(struct lu_device_type *ldt) { int result = 0; - CFS_INIT_LIST_HEAD(&ldt->ldt_linkage); + atomic_set(&ldt->ldt_device_nr, 0); + INIT_LIST_HEAD(&ldt->ldt_linkage); if (ldt->ldt_ops->ldto_init) result = ldt->ldt_ops->ldto_init(ldt); - if (result == 0) - cfs_list_add(&ldt->ldt_linkage, &lu_device_types); + + if (result == 0) { + spin_lock(&obd_types_lock); + list_add(&ldt->ldt_linkage, &lu_device_types); + spin_unlock(&obd_types_lock); + } + return result; } EXPORT_SYMBOL(lu_device_type_init); void lu_device_type_fini(struct lu_device_type *ldt) { - cfs_list_del_init(&ldt->ldt_linkage); + spin_lock(&obd_types_lock); + list_del_init(&ldt->ldt_linkage); + spin_unlock(&obd_types_lock); if (ldt->ldt_ops->ldto_fini) ldt->ldt_ops->ldto_fini(ldt); } EXPORT_SYMBOL(lu_device_type_fini); -void lu_types_stop(void) -{ - struct lu_device_type *ldt; - - cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) { - if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop) - ldt->ldt_ops->ldto_stop(ldt); - } -} -EXPORT_SYMBOL(lu_types_stop); - /** * Global list of all sites on this node */ -static CFS_LIST_HEAD(lu_sites); +static struct list_head lu_sites; static DEFINE_MUTEX(lu_sites_guard); /** @@ -792,23 +868,23 @@ struct lu_site_print_arg { static int lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, - cfs_hlist_node_t *hnode, void *data) + struct hlist_node *hnode, void *data) { - struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; - struct lu_object_header *h; + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - if (!cfs_list_empty(&h->loh_layers)) { - const struct lu_object *o; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!list_empty(&h->loh_layers)) { + const struct lu_object *o; - o = lu_object_top(h); - lu_object_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, o); - } else { - lu_object_header_print(arg->lsp_env, arg->lsp_cookie, - arg->lsp_printer, h); - } - return 0; + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; } /** @@ -827,22 +903,25 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, } EXPORT_SYMBOL(lu_site_print); -enum { - LU_CACHE_PERCENT_MAX = 50, - LU_CACHE_PERCENT_DEFAULT = 20 -}; - -static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; -CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, - "Percentage of memory to be used as lu_object cache"); - /** * Return desired hash table order. */ -static int lu_htable_order(void) +static unsigned long lu_htable_order(struct lu_device *top) { - unsigned long cache_size; - int bits; + unsigned long cache_size; + unsigned long bits; + + /* + * For ZFS based OSDs the cache should be disabled by default. This + * allows the ZFS ARC maximum flexibility in determining what buffers + * to cache. If Lustre has objects or buffer which it wants to ensure + * always stay cached it must maintain a hold on them. + */ + if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) { + lu_cache_percent = 1; + lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT; + return LU_SITE_BITS_MIN; + } /* * Calculate hash table size, assuming that we want reasonable @@ -851,12 +930,12 @@ static int lu_htable_order(void) * * Size of lu_object is (arbitrary) taken as 1K (together with inode). */ - cache_size = cfs_num_physpages; + cache_size = totalram_pages; #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - CFS_PAGE_SHIFT)) - cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) + cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -869,7 +948,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (CFS_PAGE_SIZE / 1024); + (PAGE_CACHE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -878,66 +957,66 @@ static int lu_htable_order(void) } static unsigned lu_obj_hop_hash(cfs_hash_t *hs, - const void *key, unsigned mask) + const void *key, unsigned mask) { - struct lu_fid *fid = (struct lu_fid *)key; - __u32 hash; + struct lu_fid *fid = (struct lu_fid *)key; + __u32 hash; - hash = fid_flatten32(fid); - hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ - hash = cfs_hash_long(hash, hs->hs_bkt_bits); + hash = fid_flatten32(fid); + hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ + hash = hash_long(hash, hs->hs_bkt_bits); - /* give me another random factor */ - hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); + /* give me another random factor */ + hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); - hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; - hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); + hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; + hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); - return hash & mask; + return hash & mask; } -static void *lu_obj_hop_object(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_object(struct hlist_node *hnode) { - return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); + return hlist_entry(hnode, struct lu_object_header, loh_hash); } -static void *lu_obj_hop_key(cfs_hlist_node_t *hnode) +static void *lu_obj_hop_key(struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return &h->loh_fid; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; } -static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode) +static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); } -static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) { - struct lu_object_header *h; + struct lu_object_header *h; - h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash); - if (cfs_atomic_add_return(1, &h->loh_ref) == 1) { - struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (atomic_add_return(1, &h->loh_ref) == 1) { + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; - cfs_hash_bd_get(hs, &h->loh_fid, &bd); - bkt = cfs_hash_bd_extra_get(hs, &bd); - bkt->lsb_busy++; - } + cfs_hash_bd_get(hs, &h->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(hs, &bd); + bkt->lsb_busy++; + } } -static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) +static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) { LBUG(); /* we should never called it */ } -cfs_hash_ops_t lu_site_hash_ops = { +static cfs_hash_ops_t lu_site_hash_ops = { .hs_hash = lu_obj_hop_hash, .hs_key = lu_obj_hop_key, .hs_keycmp = lu_obj_hop_keycmp, @@ -949,8 +1028,8 @@ cfs_hash_ops_t lu_site_hash_ops = { void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - if (cfs_list_empty(&d->ld_linkage)) - cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage); + if (list_empty(&d->ld_linkage)) + list_add(&d->ld_linkage, &s->ls_ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_add_linkage); @@ -958,59 +1037,53 @@ EXPORT_SYMBOL(lu_dev_add_linkage); void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) { spin_lock(&s->ls_ld_lock); - cfs_list_del_init(&d->ld_linkage); + list_del_init(&d->ld_linkage); spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_del_linkage); /** - * Initialize site \a s, with \a d as the top level device. - */ -#define LU_SITE_BITS_MIN 12 -#define LU_SITE_BITS_MAX 24 -/** - * total 256 buckets, we don't want too many buckets because: - * - consume too much memory - * - avoid unbalanced LRU list - */ -#define LU_SITE_BKT_BITS 8 - + * Initialize site \a s, with \a d as the top level device. + */ int lu_site_init(struct lu_site *s, struct lu_device *top) { - struct lu_site_bkt_data *bkt; - cfs_hash_bd_t bd; - char name[16]; - int bits; - int i; - ENTRY; - - memset(s, 0, sizeof *s); - bits = lu_htable_order(); - snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); - for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX); - bits >= LU_SITE_BITS_MIN; bits--) { - s->ls_obj_hash = cfs_hash_create(name, bits, bits, - bits - LU_SITE_BKT_BITS, - sizeof(*bkt), 0, 0, - &lu_site_hash_ops, - CFS_HASH_SPIN_BKTLOCK | - CFS_HASH_NO_ITEMREF | - CFS_HASH_DEPTH | - CFS_HASH_ASSERT_EMPTY); - if (s->ls_obj_hash != NULL) - break; - } + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + char name[16]; + unsigned long bits; + unsigned int i; + ENTRY; + + memset(s, 0, sizeof *s); + mutex_init(&s->ls_purge_mutex); + bits = lu_htable_order(top); + snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name); + for (bits = clamp_t(typeof(bits), bits, + LU_SITE_BITS_MIN, LU_SITE_BITS_MAX); + bits >= LU_SITE_BITS_MIN; bits--) { + s->ls_obj_hash = cfs_hash_create(name, bits, bits, + bits - LU_SITE_BKT_BITS, + sizeof(*bkt), 0, 0, + &lu_site_hash_ops, + CFS_HASH_SPIN_BKTLOCK | + CFS_HASH_NO_ITEMREF | + CFS_HASH_DEPTH | + CFS_HASH_ASSERT_EMPTY | + CFS_HASH_COUNTER); + if (s->ls_obj_hash != NULL) + break; + } - if (s->ls_obj_hash == NULL) { - CERROR("failed to create lu_site hash with bits: %d\n", bits); - return -ENOMEM; - } + if (s->ls_obj_hash == NULL) { + CERROR("failed to create lu_site hash with bits: %lu\n", bits); + return -ENOMEM; + } - cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { - bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); - CFS_INIT_LIST_HEAD(&bkt->lsb_lru); - cfs_waitq_init(&bkt->lsb_marche_funebre); - } + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + INIT_LIST_HEAD(&bkt->lsb_lru); + init_waitqueue_head(&bkt->lsb_marche_funebre); + } s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); if (s->ls_stats == NULL) { @@ -1032,13 +1105,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, 0, "lru_purged", "lru_purged"); - CFS_INIT_LIST_HEAD(&s->ls_linkage); + INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); lu_ref_add(&top->ld_reference, "site-top", s); - CFS_INIT_LIST_HEAD(&s->ls_ld_linkage); + INIT_LIST_HEAD(&s->ls_ld_linkage); spin_lock_init(&s->ls_ld_lock); lu_dev_add_linkage(s, top); @@ -1053,7 +1126,7 @@ EXPORT_SYMBOL(lu_site_init); void lu_site_fini(struct lu_site *s) { mutex_lock(&lu_sites_guard); - cfs_list_del_init(&s->ls_linkage); + list_del_init(&s->ls_linkage); mutex_unlock(&lu_sites_guard); if (s->ls_obj_hash != NULL) { @@ -1082,7 +1155,7 @@ int lu_site_init_finish(struct lu_site *s) mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) - cfs_list_add(&s->ls_linkage, &lu_sites); + list_add(&s->ls_linkage, &lu_sites); mutex_unlock(&lu_sites_guard); return result; } @@ -1093,7 +1166,7 @@ EXPORT_SYMBOL(lu_site_init_finish); */ void lu_device_get(struct lu_device *d) { - cfs_atomic_inc(&d->ld_ref); + atomic_inc(&d->ld_ref); } EXPORT_SYMBOL(lu_device_get); @@ -1102,8 +1175,8 @@ EXPORT_SYMBOL(lu_device_get); */ void lu_device_put(struct lu_device *d) { - LASSERT(cfs_atomic_read(&d->ld_ref) > 0); - cfs_atomic_dec(&d->ld_ref); + LASSERT(atomic_read(&d->ld_ref) > 0); + atomic_dec(&d->ld_ref); } EXPORT_SYMBOL(lu_device_put); @@ -1112,14 +1185,16 @@ EXPORT_SYMBOL(lu_device_put); */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) - t->ldt_ops->ldto_start(t); - memset(d, 0, sizeof *d); - cfs_atomic_set(&d->ld_ref, 0); - d->ld_type = t; - lu_ref_init(&d->ld_reference); - CFS_INIT_LIST_HEAD(&d->ld_linkage); - return 0; + if (atomic_inc_return(&t->ldt_device_nr) == 1 && + t->ldt_ops->ldto_start != NULL) + t->ldt_ops->ldto_start(t); + + memset(d, 0, sizeof *d); + d->ld_type = t; + lu_ref_init(&d->ld_reference); + INIT_LIST_HEAD(&d->ld_linkage); + + return 0; } EXPORT_SYMBOL(lu_device_init); @@ -1128,20 +1203,21 @@ EXPORT_SYMBOL(lu_device_init); */ void lu_device_fini(struct lu_device *d) { - struct lu_device_type *t; + struct lu_device_type *t = d->ld_type; - t = d->ld_type; - if (d->ld_obd != NULL) { - d->ld_obd->obd_lu_dev = NULL; - d->ld_obd = NULL; - } + if (d->ld_obd != NULL) { + d->ld_obd->obd_lu_dev = NULL; + d->ld_obd = NULL; + } - lu_ref_fini(&d->ld_reference); - LASSERTF(cfs_atomic_read(&d->ld_ref) == 0, - "Refcount is %u\n", cfs_atomic_read(&d->ld_ref)); - LASSERT(t->ldt_device_nr > 0); - if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) - t->ldt_ops->ldto_stop(t); + lu_ref_fini(&d->ld_reference); + LASSERTF(atomic_read(&d->ld_ref) == 0, + "Refcount is %u\n", atomic_read(&d->ld_ref)); + LASSERT(atomic_read(&t->ldt_device_nr) > 0); + + if (atomic_dec_and_test(&t->ldt_device_nr) && + t->ldt_ops->ldto_stop != NULL) + t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); @@ -1157,7 +1233,7 @@ int lu_object_init(struct lu_object *o, struct lu_object_header *h, o->lo_dev = d; lu_device_get(d); lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); - CFS_INIT_LIST_HEAD(&o->lo_linkage); + INIT_LIST_HEAD(&o->lo_linkage); return 0; } @@ -1170,7 +1246,7 @@ void lu_object_fini(struct lu_object *o) { struct lu_device *dev = o->lo_dev; - LASSERT(cfs_list_empty(&o->lo_linkage)); + LASSERT(list_empty(&o->lo_linkage)); if (dev != NULL) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, @@ -1189,7 +1265,7 @@ EXPORT_SYMBOL(lu_object_fini); */ void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &h->loh_layers); + list_move(&o->lo_linkage, &h->loh_layers); } EXPORT_SYMBOL(lu_object_add_top); @@ -1201,7 +1277,7 @@ EXPORT_SYMBOL(lu_object_add_top); */ void lu_object_add(struct lu_object *before, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &before->lo_linkage); + list_move(&o->lo_linkage, &before->lo_linkage); } EXPORT_SYMBOL(lu_object_add); @@ -1211,10 +1287,10 @@ EXPORT_SYMBOL(lu_object_add); int lu_object_header_init(struct lu_object_header *h) { memset(h, 0, sizeof *h); - cfs_atomic_set(&h->loh_ref, 1); - CFS_INIT_HLIST_NODE(&h->loh_hash); - CFS_INIT_LIST_HEAD(&h->loh_lru); - CFS_INIT_LIST_HEAD(&h->loh_layers); + atomic_set(&h->loh_ref, 1); + INIT_HLIST_NODE(&h->loh_hash); + INIT_LIST_HEAD(&h->loh_lru); + INIT_LIST_HEAD(&h->loh_layers); lu_ref_init(&h->loh_reference); return 0; } @@ -1225,9 +1301,9 @@ EXPORT_SYMBOL(lu_object_header_init); */ void lu_object_header_fini(struct lu_object_header *h) { - LASSERT(cfs_list_empty(&h->loh_layers)); - LASSERT(cfs_list_empty(&h->loh_lru)); - LASSERT(cfs_hlist_unhashed(&h->loh_hash)); + LASSERT(list_empty(&h->loh_layers)); + LASSERT(list_empty(&h->loh_lru)); + LASSERT(hlist_unhashed(&h->loh_hash)); lu_ref_fini(&h->loh_reference); } EXPORT_SYMBOL(lu_object_header_fini); @@ -1239,18 +1315,16 @@ EXPORT_SYMBOL(lu_object_header_fini); struct lu_object *lu_object_locate(struct lu_object_header *h, const struct lu_device_type *dtype) { - struct lu_object *o; + struct lu_object *o; - cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; + list_for_each_entry(o, &h->loh_layers, lo_linkage) { + if (o->lo_dev->ld_type == dtype) + return o; + } + return NULL; } EXPORT_SYMBOL(lu_object_locate); - - /** * Finalize and free devices in the device stack. * @@ -1312,8 +1386,8 @@ static unsigned key_set_version = 0; */ int lu_context_key_register(struct lu_context_key *key) { - int result; - int i; + int result; + unsigned int i; LASSERT(key->lct_init != NULL); LASSERT(key->lct_fini != NULL); @@ -1325,7 +1399,7 @@ int lu_context_key_register(struct lu_context_key *key) for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; - cfs_atomic_set(&key->lct_used, 1); + atomic_set(&key->lct_used, 1); lu_keys[i] = key; lu_ref_init(&key->lct_reference); result = 0; @@ -1346,16 +1420,16 @@ static void key_fini(struct lu_context *ctx, int index) key = lu_keys[index]; LASSERT(key != NULL); LASSERT(key->lct_fini != NULL); - LASSERT(cfs_atomic_read(&key->lct_used) > 1); + LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); lu_ref_del(&key->lct_reference, "ctx", ctx); - cfs_atomic_dec(&key->lct_used); + atomic_dec(&key->lct_used); LASSERT(key->lct_owner != NULL); if ((ctx->lc_tags & LCT_NOREF) == 0) { - LINVRNT(cfs_module_refcount(key->lct_owner) > 0); - cfs_module_put(key->lct_owner); + LINVRNT(module_refcount(key->lct_owner) > 0); + module_put(key->lct_owner); } ctx->lc_value[index] = NULL; } @@ -1366,7 +1440,7 @@ static void key_fini(struct lu_context *ctx, int index) */ void lu_context_key_degister(struct lu_context_key *key) { - LASSERT(cfs_atomic_read(&key->lct_used) >= 1); + LASSERT(atomic_read(&key->lct_used) >= 1); LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); lu_context_key_quiesce(key); @@ -1380,9 +1454,9 @@ void lu_context_key_degister(struct lu_context_key *key) } spin_unlock(&lu_keys_guard); - LASSERTF(cfs_atomic_read(&key->lct_used) == 1, + LASSERTF(atomic_read(&key->lct_used) == 1, "key has instances: %d\n", - cfs_atomic_read(&key->lct_used)); + atomic_read(&key->lct_used)); } EXPORT_SYMBOL(lu_context_key_degister); @@ -1483,7 +1557,7 @@ EXPORT_SYMBOL(lu_context_key_get); /** * List of remembered contexts. XXX document me. */ -static CFS_LIST_HEAD(lu_context_remembered); +static struct list_head lu_context_remembered; /** * Destroy \a key in all remembered contexts. This is used to destroy key @@ -1493,18 +1567,20 @@ static CFS_LIST_HEAD(lu_context_remembered); void lu_context_key_quiesce(struct lu_context_key *key) { struct lu_context *ctx; + extern unsigned cl_env_cache_purge(unsigned nr); if (!(key->lct_tags & LCT_QUIESCENT)) { /* * XXX layering violation. */ + cl_env_cache_purge(~0); key->lct_tags |= LCT_QUIESCENT; /* * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); - cfs_list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); ++key_set_version; @@ -1521,7 +1597,7 @@ EXPORT_SYMBOL(lu_context_key_revive); static void keys_fini(struct lu_context *ctx) { - int i; + unsigned int i; if (ctx->lc_value == NULL) return; @@ -1535,7 +1611,7 @@ static void keys_fini(struct lu_context *ctx) static int keys_fill(struct lu_context *ctx) { - int i; + unsigned int i; LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { @@ -1558,11 +1634,11 @@ static int keys_fill(struct lu_context *ctx) if (unlikely(IS_ERR(value))) return PTR_ERR(value); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) - cfs_try_module_get(key->lct_owner); - lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); - cfs_atomic_inc(&key->lct_used); + LASSERT(key->lct_owner != NULL); + if (!(ctx->lc_tags & LCT_NOREF)) + try_module_get(key->lct_owner); + lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); + atomic_inc(&key->lct_used); /* * This is the only place in the code, where an * element of ctx->lc_value[] array is set to non-NULL @@ -1598,10 +1674,10 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) ctx->lc_tags = tags; if (tags & LCT_REMEMBER) { spin_lock(&lu_keys_guard); - cfs_list_add(&ctx->lc_remember, &lu_context_remembered); + list_add(&ctx->lc_remember, &lu_context_remembered); spin_unlock(&lu_keys_guard); } else { - CFS_INIT_LIST_HEAD(&ctx->lc_remember); + INIT_LIST_HEAD(&ctx->lc_remember); } rc = keys_init(ctx); @@ -1621,13 +1697,13 @@ void lu_context_fini(struct lu_context *ctx) ctx->lc_state = LCS_FINALIZED; if ((ctx->lc_tags & LCT_REMEMBER) == 0) { - LASSERT(cfs_list_empty(&ctx->lc_remember)); + LASSERT(list_empty(&ctx->lc_remember)); keys_fini(ctx); } else { /* could race with key degister */ spin_lock(&lu_keys_guard); keys_fini(ctx); - cfs_list_del_init(&ctx->lc_remember); + list_del_init(&ctx->lc_remember); spin_unlock(&lu_keys_guard); } } @@ -1648,7 +1724,7 @@ EXPORT_SYMBOL(lu_context_enter); */ void lu_context_exit(struct lu_context *ctx) { - int i; + unsigned int i; LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; @@ -1784,7 +1860,7 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, } EXPORT_SYMBOL(lu_env_refill_by_tags); -static struct cfs_shrinker *lu_site_shrinker = NULL; +static struct shrinker *lu_site_shrinker; typedef struct lu_site_stats{ unsigned lss_populated; @@ -1796,12 +1872,12 @@ typedef struct lu_site_stats{ static void lu_site_stats_get(cfs_hash_t *hs, lu_site_stats_t *stats, int populated) { - cfs_hash_bd_t bd; - int i; + cfs_hash_bd_t bd; + unsigned int i; cfs_hash_for_each_bucket(hs, &bd, i) { struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); - cfs_hlist_head_t *hhead; + struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, 1); stats->lss_busy += bkt->lsb_busy; @@ -1814,15 +1890,76 @@ static void lu_site_stats_get(cfs_hash_t *hs, } cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - if (!cfs_hlist_empty(hhead)) + if (!hlist_empty(hhead)) stats->lss_populated++; } cfs_hash_bd_unlock(hs, &bd, 1); } } -#ifdef __KERNEL__ +static unsigned long lu_cache_shrink_count(struct shrinker *sk, + struct shrink_control *sc) +{ + lu_site_stats_t stats; + struct lu_site *s; + struct lu_site *tmp; + unsigned long cached = 0; + + if (!(sc->gfp_mask & __GFP_FS)) + return 0; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 0); + cached += stats.lss_total - stats.lss_busy; + } + mutex_unlock(&lu_sites_guard); + + cached = (cached / 100) * sysctl_vfs_cache_pressure; + CDEBUG(D_INODE, "%ld objects cached\n", cached); + return cached; +} + +static unsigned long lu_cache_shrink_scan(struct shrinker *sk, + struct shrink_control *sc) +{ + struct lu_site *s; + struct lu_site *tmp; + unsigned long remain = sc->nr_to_scan; + LIST_HEAD(splice); + + if (!(sc->gfp_mask & __GFP_FS)) + /* We must not take the lu_sites_guard lock when + * __GFP_FS is *not* set because of the deadlock + * possibility detailed above. Additionally, + * since we cannot determine the number of + * objects in the cache without taking this + * lock, we're in a particularly tough spot. As + * a result, we'll just lie and say our cache is + * empty. This _should_ be ok, as we can't + * reclaim objects when __GFP_FS is *not* set + * anyways. + */ + return SHRINK_STOP; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + remain = lu_site_purge(&lu_shrink_env, s, remain); + /* + * Move just shrunk site to the tail of site list to + * assure shrinking fairness. + */ + list_move_tail(&s->ls_linkage, &splice); + } + list_splice(&splice, lu_sites.prev); + mutex_unlock(&lu_sites_guard); + + return sc->nr_to_scan - remain; +} + +#ifndef HAVE_SHRINKER_COUNT /* * There exists a potential lock inversion deadlock scenario when using * Lustre on top of ZFS. This occurs between one of ZFS's @@ -1843,59 +1980,29 @@ static void lu_site_stats_get(cfs_hash_t *hs, */ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { - lu_site_stats_t stats; - struct lu_site *s; - struct lu_site *tmp; int cached = 0; - int remain = shrink_param(sc, nr_to_scan); - CFS_LIST_HEAD(splice); - - if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) { - if (remain != 0) - return -1; - else - /* We must not take the lu_sites_guard lock when - * __GFP_FS is *not* set because of the deadlock - * possibility detailed above. Additionally, - * since we cannot determine the number of - * objects in the cache without taking this - * lock, we're in a particularly tough spot. As - * a result, we'll just lie and say our cache is - * empty. This _should_ be ok, as we can't - * reclaim objects when __GFP_FS is *not* set - * anyways. - */ - return 0; - } + struct shrink_control scv = { + .nr_to_scan = shrink_param(sc, nr_to_scan), + .gfp_mask = shrink_param(sc, gfp_mask) + }; +#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL) + struct shrinker* shrinker = NULL; +#endif - CDEBUG(D_INODE, "Shrink %d objects\n", remain); - mutex_lock(&lu_sites_guard); - cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - if (shrink_param(sc, nr_to_scan) != 0) { - remain = lu_site_purge(&lu_shrink_env, s, remain); - /* - * Move just shrunk site to the tail of site list to - * assure shrinking fairness. - */ - cfs_list_move_tail(&s->ls_linkage, &splice); - } + CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); - memset(&stats, 0, sizeof(stats)); - lu_site_stats_get(s->ls_obj_hash, &stats, 0); - cached += stats.lss_total - stats.lss_busy; - if (shrink_param(sc, nr_to_scan) && remain <= 0) - break; - } - cfs_list_splice(&splice, lu_sites.prev); - mutex_unlock(&lu_sites_guard); + lu_cache_shrink_scan(shrinker, &scv); - cached = (cached / 100) * sysctl_vfs_cache_pressure; - if (shrink_param(sc, nr_to_scan) == 0) - CDEBUG(D_INODE, "%d objects cached\n", cached); - return cached; + cached = lu_cache_shrink_count(shrinker, &scv); + if (scv.nr_to_scan == 0) + CDEBUG(D_INODE, "%d objects cached\n", cached); + return cached; } +#endif /* HAVE_SHRINKER_COUNT */ + + /* * Debugging stuff. */ @@ -1903,13 +2010,13 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) /** * Environment to be used in debugger, contains all tags. */ -struct lu_env lu_debugging_env; +static struct lu_env lu_debugging_env; /** * Debugging printer function using printk(). */ int lu_printk_printer(const struct lu_env *env, - void *unused, const char *format, ...) + void *unused, const char *format, ...) { va_list args; @@ -1926,7 +2033,7 @@ int lu_debugging_setup(void) void lu_context_keys_dump(void) { - int i; + unsigned int i; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -1936,7 +2043,7 @@ void lu_context_keys_dump(void) CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n", i, key, key->lct_tags, key->lct_init, key->lct_fini, key->lct_exit, - key->lct_index, cfs_atomic_read(&key->lct_used), + key->lct_index, atomic_read(&key->lct_used), key->lct_owner ? key->lct_owner->name : "", key->lct_owner); lu_ref_print(&key->lct_reference); @@ -1944,12 +2051,6 @@ void lu_context_keys_dump(void) } } EXPORT_SYMBOL(lu_context_keys_dump); -#else /* !__KERNEL__ */ -static int lu_cache_shrink(int nr, unsigned int gfp_mask) -{ - return 0; -} -#endif /* __KERNEL__ */ /** * Initialization of global lu_* data. @@ -1957,9 +2058,15 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask) int lu_global_init(void) { int result; + DEF_SHRINKER_VAR(shvar, lu_cache_shrink, + lu_cache_shrink_count, lu_cache_shrink_scan); CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); + INIT_LIST_HEAD(&lu_device_types); + INIT_LIST_HEAD(&lu_context_remembered); + INIT_LIST_HEAD(&lu_sites); + result = lu_ref_global_init(); if (result != 0) return result; @@ -1985,7 +2092,7 @@ int lu_global_init(void) * inode, one for ea. Unfortunately setting this high value results in * lu_object/inode cache consuming all the memory. */ - lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink); + lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar); if (lu_site_shrinker == NULL) return -ENOMEM; @@ -1998,7 +2105,7 @@ int lu_global_init(void) void lu_global_fini(void) { if (lu_site_shrinker != NULL) { - cfs_remove_shrinker(lu_site_shrinker); + remove_shrinker(lu_site_shrinker); lu_site_shrinker = NULL; } @@ -2017,7 +2124,7 @@ void lu_global_fini(void) static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) { -#ifdef LPROCFS +#ifdef CONFIG_PROC_FS struct lprocfs_counter ret; lprocfs_stats_collect(stats, idx, &ret); @@ -2031,6 +2138,28 @@ static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) * Output site statistical counters into a buffer. Suitable for * lprocfs_rd_*()-style functions. */ +int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) +{ + lu_site_stats_t stats; + + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); +} +EXPORT_SYMBOL(lu_site_stats_seq_print); + int lu_site_stats_print(const struct lu_site *s, char *page, int count) { lu_site_stats_t stats; @@ -2062,9 +2191,9 @@ int lu_kmem_init(struct lu_kmem_descr *caches) struct lu_kmem_descr *iter = caches; for (result = 0; iter->ckd_cache != NULL; ++iter) { - *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name, - iter->ckd_size, - 0, 0); + *iter->ckd_cache = kmem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0, NULL); if (*iter->ckd_cache == NULL) { result = -ENOMEM; /* free all previously allocated caches */ @@ -2082,13 +2211,9 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - int rc; - for (; caches->ckd_cache != NULL; ++caches) { if (*caches->ckd_cache != NULL) { - rc = cfs_mem_cache_destroy(*caches->ckd_cache); - LASSERTF(rc == 0, "couldn't destroy %s slab\n", - caches->ckd_name); + kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } } @@ -2106,7 +2231,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, struct lu_fid *old = &o->lo_header->loh_fid; struct lu_site_bkt_data *bkt; struct lu_object *shadow; - cfs_waitlink_t waiter; + wait_queue_t waiter; cfs_hash_t *hs; cfs_hash_bd_t bd; __u64 version = 0; @@ -2117,7 +2242,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); shadow = htable_lookup(s, &bd, fid, &waiter, &version); /* supposed to be unique */ - LASSERT(shadow == NULL); + LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); *old = *fid; bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); @@ -2163,7 +2288,7 @@ void lu_buf_free(struct lu_buf *buf) } EXPORT_SYMBOL(lu_buf_free); -void lu_buf_alloc(struct lu_buf *buf, int size) +void lu_buf_alloc(struct lu_buf *buf, size_t size) { LASSERT(buf); LASSERT(buf->lb_buf == NULL); @@ -2174,14 +2299,14 @@ void lu_buf_alloc(struct lu_buf *buf, int size) } EXPORT_SYMBOL(lu_buf_alloc); -void lu_buf_realloc(struct lu_buf *buf, int size) +void lu_buf_realloc(struct lu_buf *buf, size_t size) { lu_buf_free(buf); lu_buf_alloc(buf, size); } EXPORT_SYMBOL(lu_buf_realloc); -struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len) +struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len) { if (buf->lb_buf == NULL && buf->lb_len == 0) lu_buf_alloc(buf, len); @@ -2199,7 +2324,7 @@ EXPORT_SYMBOL(lu_buf_check_and_alloc); * old buffer remains unchanged on error * \retval 0 or -ENOMEM */ -int lu_buf_check_and_grow(struct lu_buf *buf, int len) +int lu_buf_check_and_grow(struct lu_buf *buf, size_t len) { char *ptr; @@ -2221,4 +2346,3 @@ int lu_buf_check_and_grow(struct lu_buf *buf, int len) return 0; } EXPORT_SYMBOL(lu_buf_check_and_grow); -