X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=638442148d84b50b60f3211d8368c6c7f421fccf;hp=fe5ee834726080412073cb8d7efbbfb6ef715924;hb=555eb580e547fb263faee0a4d88482789171211b;hpb=6e3ec5812ebd1b5ecf7cae584f429b013ffe7431;ds=sidebyside diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index fe5ee83..6384421 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -43,26 +43,47 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #include - -#ifdef __KERNEL__ -# include -#endif - -/* hash_long() */ -#include +#include +#include /* hash_long() */ #include #include #include #include #include +#include #include -/* lu_time_global_{init,fini}() */ -#include + +enum { + LU_CACHE_PERCENT_MAX = 50, + LU_CACHE_PERCENT_DEFAULT = 20 +}; + +#define LU_CACHE_NR_MAX_ADJUST 128 +#define LU_CACHE_NR_UNLIMITED -1 +#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED +#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED +/** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */ +#define LU_CACHE_NR_ZFS_LIMIT 10240 + +#define LU_SITE_BITS_MIN 12 +#define LU_SITE_BITS_MAX 24 +/** + * total 256 buckets, we don't want too many buckets because: + * - consume too much memory + * - avoid unbalanced LRU list + */ +#define LU_SITE_BKT_BITS 8 + + +static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; +CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, + "Percentage of memory to be used as lu_object cache"); + +static long lu_cache_nr = LU_CACHE_NR_DEFAULT; +CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, + "Maximum number of objects in lu_object cache"); static void lu_object_free(const struct lu_env *env, struct lu_object *o); @@ -73,110 +94,185 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o); */ void lu_object_put(const struct lu_env *env, struct lu_object *o) { + struct lu_site_bkt_data *bkt; struct lu_object_header *top; struct lu_site *site; struct lu_object *orig; - int kill_it; + cfs_hash_bd_t bd; + const struct lu_fid *fid; - top = o->lo_header; + top = o->lo_header; site = o->lo_dev->ld_site; orig = o; - kill_it = 0; - cfs_write_lock(&site->ls_guard); - if (cfs_atomic_dec_and_test(&top->loh_ref)) { - /* - * When last reference is released, iterate over object - * layers, and notify them that object is no longer busy. - */ - cfs_list_for_each_entry_reverse(o, &top->loh_layers, - lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) - o->lo_ops->loo_object_release(env, o); - } - -- site->ls_busy; - if (lu_object_is_dying(top)) { - /* - * If object is dying (will not be cached), removed it - * from hash table and LRU. - * - * This is done with hash table and LRU lists - * locked. As the only way to acquire first reference - * to previously unreferenced object is through - * hash-table lookup (lu_object_find()), or LRU - * scanning (lu_site_purge()), that are done under - * hash-table and LRU lock, no race with concurrent - * object lookup is possible and we can safely destroy - * object below. - */ - cfs_hlist_del_init(&top->loh_hash); - cfs_list_del_init(&top->loh_lru); - -- site->ls_total; - kill_it = 1; - } + + /* + * till we have full fids-on-OST implemented anonymous objects + * are possible in OSP. such an object isn't listed in the site + * so we should not remove it from the site. + */ + fid = lu_object_fid(o); + if (fid_is_zero(fid)) { + LASSERT(top->loh_hash.next == NULL + && top->loh_hash.pprev == NULL); + LASSERT(list_empty(&top->loh_lru)); + if (!atomic_dec_and_test(&top->loh_ref)) + return; + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); + } + lu_object_free(env, orig); + return; + } + + cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); + + if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { + if (lu_object_is_dying(top)) { + + /* + * somebody may be waiting for this, currently only + * used for cl_object, see cl_object_put_last(). + */ + wake_up_all(&bkt->lsb_marche_funebre); + } + return; + } + + LASSERT(bkt->lsb_busy > 0); + bkt->lsb_busy--; + /* + * When last reference is released, iterate over object + * layers, and notify them that object is no longer busy. + */ + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); } - cfs_write_unlock(&site->ls_guard); - if (kill_it) - /* - * Object was already removed from hash and lru above, can - * kill it. - */ - lu_object_free(env, orig); + + if (!lu_object_is_dying(top)) { + LASSERT(list_empty(&top->loh_lru)); + list_add_tail(&top->loh_lru, &bkt->lsb_lru); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + return; + } + + /* + * If object is dying (will not be cached), removed it + * from hash table and LRU. + * + * This is done with hash table and LRU lists locked. As the only + * way to acquire first reference to previously unreferenced + * object is through hash-table lookup (lu_object_find()), + * or LRU scanning (lu_site_purge()), that are done under hash-table + * and LRU lock, no race with concurrent object lookup is possible + * and we can safely destroy object below. + */ + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) + cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + /* + * Object was already removed from hash and lru above, can + * kill it. + */ + lu_object_free(env, orig); } EXPORT_SYMBOL(lu_object_put); /** + * Put object and don't keep in cache. This is temporary solution for + * multi-site objects when its layering is not constant. + */ +void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o) +{ + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); + return lu_object_put(env, o); +} +EXPORT_SYMBOL(lu_object_put_nocache); + +/** + * Kill the object and take it out of LRU cache. + * Currently used by client code for layout change. + */ +void lu_object_unhash(const struct lu_env *env, struct lu_object *o) +{ + struct lu_object_header *top; + + top = o->lo_header; + set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { + cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; + cfs_hash_bd_t bd; + + cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); + list_del_init(&top->loh_lru); + cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(obj_hash, &bd, 1); + } +} +EXPORT_SYMBOL(lu_object_unhash); + +/** * Allocate new object. * * This follows object creation protocol, described in the comment within * struct lu_device_operations definition. */ static struct lu_object *lu_object_alloc(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) -{ - struct lu_object *scan; - struct lu_object *top; - cfs_list_t *layers; - int clean; - int result; - ENTRY; - - /* - * Create top-level object slice. This will also create - * lu_object_header. - */ - top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (top == NULL) - RETURN(ERR_PTR(-ENOMEM)); + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_object *scan; + struct lu_object *top; + struct list_head *layers; + unsigned int init_mask = 0; + unsigned int init_flag; + int clean; + int result; + ENTRY; + + /* + * Create top-level object slice. This will also create + * lu_object_header. + */ + top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); + if (top == NULL) + RETURN(ERR_PTR(-ENOMEM)); + if (IS_ERR(top)) + RETURN(top); /* * This is the only place where object fid is assigned. It's constant * after this point. */ - LASSERT(fid_is_igif(f) || fid_ver(f) == 0); - top->lo_header->loh_fid = *f; + top->lo_header->loh_fid = *f; layers = &top->lo_header->loh_layers; - do { - /* - * Call ->loo_object_init() repeatedly, until no more new - * object slices are created. - */ - clean = 1; - cfs_list_for_each_entry(scan, layers, lo_linkage) { - if (scan->lo_flags & LU_OBJECT_ALLOCATED) - continue; - clean = 0; - scan->lo_header = top->lo_header; - result = scan->lo_ops->loo_object_init(env, scan, conf); - if (result != 0) { - lu_object_free(env, top); - RETURN(ERR_PTR(result)); - } - scan->lo_flags |= LU_OBJECT_ALLOCATED; - } - } while (!clean); - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + do { + /* + * Call ->loo_object_init() repeatedly, until no more new + * object slices are created. + */ + clean = 1; + init_flag = 1; + list_for_each_entry(scan, layers, lo_linkage) { + if (init_mask & init_flag) + goto next; + clean = 0; + scan->lo_header = top->lo_header; + result = scan->lo_ops->loo_object_init(env, scan, conf); + if (result != 0) { + lu_object_free(env, top); + RETURN(ERR_PTR(result)); + } + init_mask |= init_flag; +next: + init_flag <<= 1; + } + } while (!clean); + + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_start != NULL) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { @@ -186,7 +282,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, } } - dev->ld_site->ls_stats.s_created ++; + lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED); RETURN(top); } @@ -195,17 +291,19 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - cfs_list_t splice; - struct lu_object *scan; - struct lu_site *site; - cfs_list_t *layers; + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + struct list_head *layers; + struct list_head splice; site = o->lo_dev->ld_site; layers = &o->lo_header->loh_layers; + bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid); /* * First call ->loo_object_delete() method to release all resources. */ - cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_delete != NULL) scan->lo_ops->loo_object_delete(env, scan); } @@ -216,20 +314,22 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * necessary, because lu_object_header is freed together with the * top-level slice. */ - CFS_INIT_LIST_HEAD(&splice); - cfs_list_splice_init(layers, &splice); - while (!cfs_list_empty(&splice)) { - /* - * Free layers in bottom-to-top order, so that object header - * lives as long as possible and ->loo_object_free() methods - * can look at its contents. - */ - o = container_of0(splice.prev, struct lu_object, lo_linkage); - cfs_list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); - o->lo_ops->loo_object_free(env, o); - } - cfs_waitq_broadcast(&site->ls_marche_funebre); + INIT_LIST_HEAD(&splice); + list_splice_init(layers, &splice); + while (!list_empty(&splice)) { + /* + * Free layers in bottom-to-top order, so that object header + * lives as long as possible and ->loo_object_free() methods + * can look at its contents. + */ + o = container_of0(splice.prev, struct lu_object, lo_linkage); + list_del_init(&o->lo_linkage); + LASSERT(o->lo_ops->loo_object_free != NULL); + o->lo_ops->loo_object_free(env, o); + } + + if (waitqueue_active(&bkt->lsb_marche_funebre)) + wake_up_all(&bkt->lsb_marche_funebre); } /** @@ -237,47 +337,87 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) */ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) { - cfs_list_t dispose; struct lu_object_header *h; struct lu_object_header *temp; - - CFS_INIT_LIST_HEAD(&dispose); + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + cfs_hash_bd_t bd2; + struct list_head dispose; + int did_sth; + int start; + int count; + int bnr; + unsigned int i; + + if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) + RETURN(0); + + INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - cfs_write_lock(&s->ls_guard); - cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) { - /* - * Objects are sorted in lru order, and "busy" objects (ones - * with h->loh_ref > 0) naturally tend to live near hot end - * that we scan last. Unfortunately, sites usually have small - * (less then ten) number of busy yet rarely accessed objects - * (some global objects, accessed directly through pointers, - * bypassing hash table). Currently algorithm scans them over - * and over again. Probably we should move busy objects out of - * LRU, or we can live with that. - */ - if (nr-- == 0) - break; - if (cfs_atomic_read(&h->loh_ref) > 0) + start = s->ls_purge_start; + bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; + again: + /* + * It doesn't make any sense to make purge threads parallel, that can + * only bring troubles to us. See LU-5331. + */ + mutex_lock(&s->ls_purge_mutex); + did_sth = 0; + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + if (i < start) continue; - cfs_hlist_del_init(&h->loh_hash); - cfs_list_move(&h->loh_lru, &dispose); - s->ls_total --; + count = bnr; + cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + + list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + LASSERT(atomic_read(&h->loh_ref) == 0); + + cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); + LASSERT(bd.bd_bucket == bd2.bd_bucket); + + cfs_hash_bd_del_locked(s->ls_obj_hash, + &bd2, &h->loh_hash); + list_move(&h->loh_lru, &dispose); + if (did_sth == 0) + did_sth = 1; + + if (nr != ~0 && --nr == 0) + break; + + if (count > 0 && --count == 0) + break; + + } + cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); + cond_resched(); + /* + * Free everything on the dispose list. This is safe against + * races due to the reasons described in lu_object_put(). + */ + while (!list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } + + if (nr == 0) + break; } - cfs_write_unlock(&s->ls_guard); - /* - * Free everything on the dispose list. This is safe against races due - * to the reasons described in lu_object_put(). - */ - while (!cfs_list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - cfs_list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - s->ls_stats.s_lru_purged ++; + mutex_unlock(&s->ls_purge_mutex); + + if (nr != 0 && did_sth && start != 0) { + start = 0; /* restart from the first bucket */ + goto again; } + /* race on s->ls_purge_start, but nobody cares */ + s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); + return nr; } EXPORT_SYMBOL(lu_site_purge); @@ -304,7 +444,7 @@ enum { * * XXX overflow is not handled correctly. */ - LU_CDEBUG_LINE = 256 + LU_CDEBUG_LINE = 512 }; struct lu_cdebug_data { @@ -322,9 +462,10 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); * lu_global_init(). */ struct lu_context_key lu_global_key = { - .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD, - .lct_init = lu_global_key_init, - .lct_fini = lu_global_key_fini + .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | + LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, + .lct_init = lu_global_key_init, + .lct_fini = lu_global_key_fini }; /** @@ -333,8 +474,8 @@ struct lu_context_key lu_global_key = { int lu_cdebug_printer(const struct lu_env *env, void *cookie, const char *format, ...) { - struct lu_cdebug_print_info *info = cookie; - struct lu_cdebug_data *key; + struct libcfs_debug_msg_data *msgdata = cookie; + struct lu_cdebug_data *key; int used; int complete; va_list args; @@ -352,10 +493,8 @@ int lu_cdebug_printer(const struct lu_env *env, vsnprintf(key->lck_area + used, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { - if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys)) - libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask, - (char *)info->lpi_file, info->lpi_fn, - info->lpi_line, "%s", key->lck_area); + if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -370,13 +509,13 @@ void lu_object_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct lu_object_header *hdr) { - (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", - hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \ - "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); + (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", + hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), + PFID(&hdr->loh_fid), + hlist_unhashed(&hdr->loh_hash) ? "" : " hash", + list_empty((struct list_head *)&hdr->loh_lru) ? \ + "" : " lru", + hdr->loh_attr & LOHA_EXISTS ? " exist" : ""); } EXPORT_SYMBOL(lu_object_header_print); @@ -384,28 +523,30 @@ EXPORT_SYMBOL(lu_object_header_print); * Print human readable representation of the \a o to the \a printer. */ void lu_object_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct lu_object *o) + lu_printer_t printer, const struct lu_object *o) { - static const char ruler[] = "........................................"; - struct lu_object_header *top; - int depth; + static const char ruler[] = "........................................"; + struct lu_object_header *top; + int depth = 4; - top = o->lo_header; - lu_object_header_print(env, cookie, printer, top); - (*printer)(env, cookie, "{ \n"); - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { - depth = o->lo_depth + 4; + top = o->lo_header; + lu_object_header_print(env, cookie, printer, top); + (*printer)(env, cookie, "{\n"); - /* - * print `.' \a depth times followed by type name and address - */ - (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, - o->lo_dev->ld_type->ldt_name, o); - if (o->lo_ops->loo_object_print != NULL) - o->lo_ops->loo_object_print(env, cookie, printer, o); - (*printer)(env, cookie, "\n"); - } - (*printer)(env, cookie, "} header@%p\n", top); + list_for_each_entry(o, &top->loh_layers, lo_linkage) { + /* + * print `.' \a depth times followed by type name and address + */ + (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, + o->lo_dev->ld_type->ldt_name, o); + + if (o->lo_ops->loo_object_print != NULL) + (*o->lo_ops->loo_object_print)(env, cookie, printer, o); + + (*printer)(env, cookie, "\n"); + } + + (*printer)(env, cookie, "} header@%p\n", top); } EXPORT_SYMBOL(lu_object_print); @@ -417,7 +558,7 @@ int lu_object_invariant(const struct lu_object *o) struct lu_object_header *top; top = o->lo_header; - cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_invariant != NULL && !o->lo_ops->loo_object_invariant(o)) return 0; @@ -427,52 +568,76 @@ int lu_object_invariant(const struct lu_object *o) EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - const cfs_hlist_head_t *bucket, - const struct lu_fid *f, - cfs_waitlink_t *waiter) -{ - struct lu_object_header *h; - cfs_hlist_node_t *scan; - - cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) { - s->ls_stats.s_cache_check ++; - if (likely(lu_fid_eq(&h->loh_fid, f))) { - if (unlikely(lu_object_is_dying(h))) { - /* - * Lookup found an object being destroyed; - * this object cannot be returned (to assure - * that references to dying objects are - * eventually drained), and moreover, lookup - * has to wait until object is freed. - */ - cfs_waitlink_init(waiter); - cfs_waitq_add(&s->ls_marche_funebre, waiter); - cfs_set_current_state(CFS_TASK_UNINT); - s->ls_stats.s_cache_death_race ++; - return ERR_PTR(-EAGAIN); - } - /* bump reference count... */ - if (cfs_atomic_add_return(1, &h->loh_ref) == 1) - ++ s->ls_busy; - /* and move to the head of the LRU */ - /* - * XXX temporary disable this to measure effects of - * read-write locking. - */ - /* list_move_tail(&h->loh_lru, &s->ls_lru); */ - s->ls_stats.s_cache_hit ++; - return lu_object_top(h); - } + cfs_hash_bd_t *bd, + const struct lu_fid *f, + wait_queue_t *waiter, + __u64 *version) +{ + struct lu_site_bkt_data *bkt; + struct lu_object_header *h; + struct hlist_node *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); + + if (*version == ver) + return ERR_PTR(-ENOENT); + + *version = ver; + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return ERR_PTR(-ENOENT); } - s->ls_stats.s_cache_miss ++; - return NULL; + + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (likely(!lu_object_is_dying(h))) { + cfs_hash_get(s->ls_obj_hash, hnode); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + list_del_init(&h->loh_lru); + return lu_object_top(h); + } + + /* + * Lookup found an object being destroyed this object cannot be + * returned (to assure that references to dying objects are eventually + * drained), and moreover, lookup has to wait until object is freed. + */ + + if (likely(waiter != NULL)) { + init_waitqueue_entry_current(waiter); + add_wait_queue(&bkt->lsb_marche_funebre, waiter); + set_current_state(TASK_UNINTERRUPTIBLE); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); + } + + return ERR_PTR(-EAGAIN); } -static __u32 fid_hash(const struct lu_fid *f, int bits) +static struct lu_object *htable_lookup_nowait(struct lu_site *s, + cfs_hash_bd_t *bd, + const struct lu_fid *f) { - /* all objects with same id and different versions will belong to same - * collisions list. */ - return cfs_hash_long(fid_flatten(f), bits); + struct hlist_node *hnode; + struct lu_object_header *h; + + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return ERR_PTR(-ENOENT); + } + + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (unlikely(lu_object_is_dying(h))) + return ERR_PTR(-ENOENT); + + cfs_hash_get(s->ls_obj_hash, hnode); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + list_del_init(&h->loh_lru); + return lu_object_top(h); } /** @@ -488,19 +653,71 @@ struct lu_object *lu_object_find(const struct lu_env *env, } EXPORT_SYMBOL(lu_object_find); +/* + * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because + * the calculation for the number of objects to reclaim is not covered by + * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST. + * This ensures that many concurrent threads will not accidentally purge + * the entire cache. + */ +static void lu_object_limit(const struct lu_env *env, + struct lu_device *dev) +{ + __u64 size, nr; + + if (lu_cache_nr == LU_CACHE_NR_UNLIMITED) + return; + + size = cfs_hash_size_get(dev->ld_site->ls_obj_hash); + nr = (__u64)lu_cache_nr; + if (size > nr) + lu_site_purge(env, dev->ld_site, + MIN(size - nr, LU_CACHE_NR_MAX_ADJUST)); + + return; +} + +static struct lu_object *lu_object_new(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_object *o; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + struct lu_site_bkt_data *bkt; + + o = lu_object_alloc(env, dev, f, conf); + if (unlikely(IS_ERR(o))) + return o; + + hs = dev->ld_site->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + + return o; +} + /** * Core logic of lu_object_find*() functions. */ static struct lu_object *lu_object_find_try(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf, - cfs_waitlink_t *waiter) -{ - struct lu_site *s; - struct lu_object *o; - struct lu_object *shadow; - cfs_hlist_head_t *bucket; + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf, + wait_queue_t *waiter) +{ + struct lu_object *o; + struct lu_object *shadow; + struct lu_site *s; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; /* * This uses standard index maintenance protocol: @@ -515,18 +732,22 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * - unlock index; * - return object. * + * For "LOC_F_NEW" case, we are sure the object is new established. + * It is unnecessary to perform lookup-alloc-lookup-insert, instead, + * just alloc and insert directly. + * * If dying object is found during index search, add @waiter to the * site wait-queue and return ERR_PTR(-EAGAIN). */ - - s = dev->ld_site; - bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits); - - cfs_read_lock(&s->ls_guard); - o = htable_lookup(s, bucket, f, waiter); - cfs_read_unlock(&s->ls_guard); - - if (o != NULL) + if (conf != NULL && conf->loc_flags & LOC_F_NEW) + return lu_object_new(env, dev, f, conf); + + s = dev->ld_site; + hs = s->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + o = htable_lookup(s, &bd, f, waiter, &version); + cfs_hash_bd_unlock(hs, &bd, 1); + if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT) return o; /* @@ -539,20 +760,25 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, LASSERT(lu_fid_eq(lu_object_fid(o), f)); - cfs_write_lock(&s->ls_guard); - shadow = htable_lookup(s, bucket, f, waiter); - if (likely(shadow == NULL)) { - cfs_hlist_add_head(&o->lo_header->loh_hash, bucket); - cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru); - ++ s->ls_busy; - ++ s->ls_total; - shadow = o; - o = NULL; - } else - s->ls_stats.s_cache_race ++; - cfs_write_unlock(&s->ls_guard); - if (o != NULL) - lu_object_free(env, o); + cfs_hash_bd_lock(hs, &bd, 1); + + shadow = htable_lookup(s, &bd, f, waiter, &version); + if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { + struct lu_site_bkt_data *bkt; + + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + + return o; + } + + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE); + cfs_hash_bd_unlock(hs, &bd, 1); + lu_object_free(env, o); return shadow; } @@ -562,28 +788,58 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * objects of different "stacking" to be created within the same site. */ struct lu_object *lu_object_find_at(const struct lu_env *env, - struct lu_device *dev, - const struct lu_fid *f, - const struct lu_object_conf *conf) + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_site_bkt_data *bkt; + struct lu_object *obj; + wait_queue_t wait; + + while (1) { + if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) { + obj = lu_object_find_try(env, dev, f, conf, NULL); + + return obj; + } + + obj = lu_object_find_try(env, dev, f, conf, &wait); + if (obj != ERR_PTR(-EAGAIN)) + return obj; + /* + * lu_object_find_try() already added waiter into the + * wait queue. + */ + waitq_wait(&wait, TASK_UNINTERRUPTIBLE); + bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); + remove_wait_queue(&bkt->lsb_marche_funebre, &wait); + } +} +EXPORT_SYMBOL(lu_object_find_at); + +/** + * Try to find the object in cache without waiting for the dead object + * to be released nor allocating object if no cached one was found. + * + * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging. + */ +void lu_object_purge(const struct lu_env *env, struct lu_device *dev, + const struct lu_fid *f) { - struct lu_object *obj; - cfs_waitlink_t wait; + struct lu_site *s = dev->ld_site; + cfs_hash_t *hs = s->ls_obj_hash; + cfs_hash_bd_t bd; + struct lu_object *o; - while (1) { - obj = lu_object_find_try(env, dev, f, conf, &wait); - if (obj == ERR_PTR(-EAGAIN)) { - /* - * lu_object_find_try() already added waiter into the - * wait queue. - */ - cfs_waitq_wait(&wait, CFS_TASK_UNINT); - cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait); - } else - break; - } - return obj; + cfs_hash_bd_get_and_lock(hs, f, &bd, 1); + o = htable_lookup_nowait(s, &bd, f); + cfs_hash_bd_unlock(hs, &bd, 1); + if (!IS_ERR(o)) { + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); + lu_object_put(env, o); + } } -EXPORT_SYMBOL(lu_object_find_at); +EXPORT_SYMBOL(lu_object_purge); /** * Find object with given fid, and return its slice belonging to given device. @@ -610,88 +866,110 @@ EXPORT_SYMBOL(lu_object_find_slice); /** * Global list of all device types. */ -static CFS_LIST_HEAD(lu_device_types); +static struct list_head lu_device_types; int lu_device_type_init(struct lu_device_type *ldt) { - int result; + int result = 0; - CFS_INIT_LIST_HEAD(&ldt->ldt_linkage); - result = ldt->ldt_ops->ldto_init(ldt); - if (result == 0) - cfs_list_add(&ldt->ldt_linkage, &lu_device_types); - return result; + atomic_set(&ldt->ldt_device_nr, 0); + INIT_LIST_HEAD(&ldt->ldt_linkage); + if (ldt->ldt_ops->ldto_init) + result = ldt->ldt_ops->ldto_init(ldt); + + if (result == 0) { + spin_lock(&obd_types_lock); + list_add(&ldt->ldt_linkage, &lu_device_types); + spin_unlock(&obd_types_lock); + } + + return result; } EXPORT_SYMBOL(lu_device_type_init); void lu_device_type_fini(struct lu_device_type *ldt) { - cfs_list_del_init(&ldt->ldt_linkage); - ldt->ldt_ops->ldto_fini(ldt); + spin_lock(&obd_types_lock); + list_del_init(&ldt->ldt_linkage); + spin_unlock(&obd_types_lock); + if (ldt->ldt_ops->ldto_fini) + ldt->ldt_ops->ldto_fini(ldt); } EXPORT_SYMBOL(lu_device_type_fini); -void lu_types_stop(void) -{ - struct lu_device_type *ldt; - - cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) { - if (ldt->ldt_device_nr == 0) - ldt->ldt_ops->ldto_stop(ldt); - } -} -EXPORT_SYMBOL(lu_types_stop); - /** * Global list of all sites on this node */ -static CFS_LIST_HEAD(lu_sites); -static CFS_DECLARE_MUTEX(lu_sites_guard); +static struct list_head lu_sites; +static DEFINE_MUTEX(lu_sites_guard); /** * Global environment used by site shrinker. */ static struct lu_env lu_shrink_env; +struct lu_site_print_arg { + struct lu_env *lsp_env; + void *lsp_cookie; + lu_printer_t lsp_printer; +}; + +static int +lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, + struct hlist_node *hnode, void *data) +{ + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!list_empty(&h->loh_layers)) { + const struct lu_object *o; + + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; +} + /** * Print all objects in \a s. */ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, lu_printer_t printer) { - int i; - - for (i = 0; i < s->ls_hash_size; ++i) { - struct lu_object_header *h; - cfs_hlist_node_t *scan; + struct lu_site_print_arg arg = { + .lsp_env = (struct lu_env *)env, + .lsp_cookie = cookie, + .lsp_printer = printer, + }; - cfs_read_lock(&s->ls_guard); - cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) { - - if (!cfs_list_empty(&h->loh_layers)) { - const struct lu_object *obj; - - obj = lu_object_top(h); - lu_object_print(env, cookie, printer, obj); - } else - lu_object_header_print(env, cookie, printer, h); - } - cfs_read_unlock(&s->ls_guard); - } + cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg); } EXPORT_SYMBOL(lu_site_print); -enum { - LU_CACHE_PERCENT = 20, -}; - /** * Return desired hash table order. */ -static int lu_htable_order(void) -{ - unsigned long cache_size; - int bits; +static unsigned int lu_htable_order(struct lu_device *top) +{ + unsigned long cache_size; + unsigned int bits; + + /* + * For ZFS based OSDs the cache should be disabled by default. This + * allows the ZFS ARC maximum flexibility in determining what buffers + * to cache. If Lustre has objects or buffer which it wants to ensure + * always stay cached it must maintain a hold on them. + */ + if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) { + lu_cache_percent = 1; + lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT; + return LU_SITE_BITS_MIN; + } /* * Calculate hash table size, assuming that we want reasonable @@ -700,16 +978,25 @@ static int lu_htable_order(void) * * Size of lu_object is (arbitrary) taken as 1K (together with inode). */ - cache_size = cfs_num_physpages; + cache_size = totalram_pages; #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - CFS_PAGE_SHIFT)) - cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) + cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; #endif - cache_size = cache_size / 100 * LU_CACHE_PERCENT * - (CFS_PAGE_SIZE / 1024); + /* clear off unreasonable cache setting. */ + if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) { + CWARN("obdclass: invalid lu_cache_percent: %u, it must be in" + " the range of (0, %u]. Will use default value: %u.\n", + lu_cache_percent, LU_CACHE_PERCENT_MAX, + LU_CACHE_PERCENT_DEFAULT); + + lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; + } + cache_size = cache_size / 100 * lu_cache_percent * + (PAGE_CACHE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -717,47 +1004,167 @@ static int lu_htable_order(void) return bits; } -static cfs_lock_class_key_t lu_site_guard_class; +static unsigned lu_obj_hop_hash(cfs_hash_t *hs, + const void *key, unsigned mask) +{ + struct lu_fid *fid = (struct lu_fid *)key; + __u32 hash; + + hash = fid_flatten32(fid); + hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ + hash = hash_long(hash, hs->hs_bkt_bits); + + /* give me another random factor */ + hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); + + hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; + hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); + + return hash & mask; +} + +static void *lu_obj_hop_object(struct hlist_node *hnode) +{ + return hlist_entry(hnode, struct lu_object_header, loh_hash); +} + +static void *lu_obj_hop_key(struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; +} + +static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); +} + +static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (atomic_add_return(1, &h->loh_ref) == 1) { + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + + cfs_hash_bd_get(hs, &h->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(hs, &bd); + bkt->lsb_busy++; + } +} + +static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +{ + LBUG(); /* we should never called it */ +} + +cfs_hash_ops_t lu_site_hash_ops = { + .hs_hash = lu_obj_hop_hash, + .hs_key = lu_obj_hop_key, + .hs_keycmp = lu_obj_hop_keycmp, + .hs_object = lu_obj_hop_object, + .hs_get = lu_obj_hop_get, + .hs_put_locked = lu_obj_hop_put_locked, +}; + +void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) +{ + spin_lock(&s->ls_ld_lock); + if (list_empty(&d->ld_linkage)) + list_add(&d->ld_linkage, &s->ls_ld_linkage); + spin_unlock(&s->ls_ld_lock); +} +EXPORT_SYMBOL(lu_dev_add_linkage); + +void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) +{ + spin_lock(&s->ls_ld_lock); + list_del_init(&d->ld_linkage); + spin_unlock(&s->ls_ld_lock); +} +EXPORT_SYMBOL(lu_dev_del_linkage); /** - * Initialize site \a s, with \a d as the top level device. - */ + * Initialize site \a s, with \a d as the top level device. + */ int lu_site_init(struct lu_site *s, struct lu_device *top) { - int bits; - int size; - int i; - ENTRY; - - memset(s, 0, sizeof *s); - cfs_rwlock_init(&s->ls_guard); - cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class); - CFS_INIT_LIST_HEAD(&s->ls_lru); - CFS_INIT_LIST_HEAD(&s->ls_linkage); - cfs_waitq_init(&s->ls_marche_funebre); + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + char name[16]; + unsigned int bits; + unsigned int i; + ENTRY; + + memset(s, 0, sizeof *s); + mutex_init(&s->ls_purge_mutex); + bits = lu_htable_order(top); + snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); + for (bits = clamp_t(typeof(bits), bits, + LU_SITE_BITS_MIN, LU_SITE_BITS_MAX); + bits >= LU_SITE_BITS_MIN; bits--) { + s->ls_obj_hash = cfs_hash_create(name, bits, bits, + bits - LU_SITE_BKT_BITS, + sizeof(*bkt), 0, 0, + &lu_site_hash_ops, + CFS_HASH_SPIN_BKTLOCK | + CFS_HASH_NO_ITEMREF | + CFS_HASH_DEPTH | + CFS_HASH_ASSERT_EMPTY | + CFS_HASH_COUNTER); + if (s->ls_obj_hash != NULL) + break; + } + + if (s->ls_obj_hash == NULL) { + CERROR("failed to create lu_site hash with bits: %d\n", bits); + return -ENOMEM; + } + + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + INIT_LIST_HEAD(&bkt->lsb_lru); + init_waitqueue_head(&bkt->lsb_marche_funebre); + } + + s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); + if (s->ls_stats == NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; + return -ENOMEM; + } + + lprocfs_counter_init(s->ls_stats, LU_SS_CREATED, + 0, "created", "created"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT, + 0, "cache_hit", "cache_hit"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS, + 0, "cache_miss", "cache_miss"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE, + 0, "cache_race", "cache_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE, + 0, "cache_death_race", "cache_death_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, + 0, "lru_purged", "lru_purged"); + + INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); lu_ref_add(&top->ld_reference, "site-top", s); - for (bits = lu_htable_order(), size = 1 << bits; - (s->ls_hash = - cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL; - --bits, size >>= 1) { - /* - * Scale hash table down, until allocation succeeds. - */ - ; - } - - s->ls_hash_size = size; - s->ls_hash_bits = bits; - s->ls_hash_mask = size - 1; + INIT_LIST_HEAD(&s->ls_ld_linkage); + spin_lock_init(&s->ls_ld_lock); - for (i = 0; i < size; i++) - CFS_INIT_HLIST_HEAD(&s->ls_hash[i]); + lu_dev_add_linkage(s, top); - RETURN(0); + RETURN(0); } EXPORT_SYMBOL(lu_site_init); @@ -766,26 +1173,24 @@ EXPORT_SYMBOL(lu_site_init); */ void lu_site_fini(struct lu_site *s) { - LASSERT(cfs_list_empty(&s->ls_lru)); - LASSERT(s->ls_total == 0); - - cfs_down(&lu_sites_guard); - cfs_list_del_init(&s->ls_linkage); - cfs_up(&lu_sites_guard); + mutex_lock(&lu_sites_guard); + list_del_init(&s->ls_linkage); + mutex_unlock(&lu_sites_guard); - if (s->ls_hash != NULL) { - int i; - for (i = 0; i < s->ls_hash_size; i++) - LASSERT(cfs_hlist_empty(&s->ls_hash[i])); - cfs_free_large(s->ls_hash); - s->ls_hash = NULL; + if (s->ls_obj_hash != NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; } + if (s->ls_top_dev != NULL) { s->ls_top_dev->ld_site = NULL; lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } + + if (s->ls_stats != NULL) + lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); @@ -795,11 +1200,11 @@ EXPORT_SYMBOL(lu_site_fini); int lu_site_init_finish(struct lu_site *s) { int result; - cfs_down(&lu_sites_guard); + mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) - cfs_list_add(&s->ls_linkage, &lu_sites); - cfs_up(&lu_sites_guard); + list_add(&s->ls_linkage, &lu_sites); + mutex_unlock(&lu_sites_guard); return result; } EXPORT_SYMBOL(lu_site_init_finish); @@ -809,7 +1214,7 @@ EXPORT_SYMBOL(lu_site_init_finish); */ void lu_device_get(struct lu_device *d) { - cfs_atomic_inc(&d->ld_ref); + atomic_inc(&d->ld_ref); } EXPORT_SYMBOL(lu_device_get); @@ -818,8 +1223,8 @@ EXPORT_SYMBOL(lu_device_get); */ void lu_device_put(struct lu_device *d) { - LASSERT(cfs_atomic_read(&d->ld_ref) > 0); - cfs_atomic_dec(&d->ld_ref); + LASSERT(atomic_read(&d->ld_ref) > 0); + atomic_dec(&d->ld_ref); } EXPORT_SYMBOL(lu_device_put); @@ -828,13 +1233,16 @@ EXPORT_SYMBOL(lu_device_put); */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) - t->ldt_ops->ldto_start(t); - memset(d, 0, sizeof *d); - cfs_atomic_set(&d->ld_ref, 0); - d->ld_type = t; - lu_ref_init(&d->ld_reference); - return 0; + if (atomic_inc_return(&t->ldt_device_nr) == 1 && + t->ldt_ops->ldto_start != NULL) + t->ldt_ops->ldto_start(t); + + memset(d, 0, sizeof *d); + d->ld_type = t; + lu_ref_init(&d->ld_reference); + INIT_LIST_HEAD(&d->ld_linkage); + + return 0; } EXPORT_SYMBOL(lu_device_init); @@ -843,20 +1251,21 @@ EXPORT_SYMBOL(lu_device_init); */ void lu_device_fini(struct lu_device *d) { - struct lu_device_type *t; + struct lu_device_type *t = d->ld_type; - t = d->ld_type; - if (d->ld_obd != NULL) { - d->ld_obd->obd_lu_dev = NULL; - d->ld_obd = NULL; - } + if (d->ld_obd != NULL) { + d->ld_obd->obd_lu_dev = NULL; + d->ld_obd = NULL; + } - lu_ref_fini(&d->ld_reference); - LASSERTF(cfs_atomic_read(&d->ld_ref) == 0, - "Refcount is %u\n", cfs_atomic_read(&d->ld_ref)); - LASSERT(t->ldt_device_nr > 0); - if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) - t->ldt_ops->ldto_stop(t); + lu_ref_fini(&d->ld_reference); + LASSERTF(atomic_read(&d->ld_ref) == 0, + "Refcount is %u\n", atomic_read(&d->ld_ref)); + LASSERT(atomic_read(&t->ldt_device_nr) > 0); + + if (atomic_dec_and_test(&t->ldt_device_nr) && + t->ldt_ops->ldto_stop != NULL) + t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); @@ -864,16 +1273,17 @@ EXPORT_SYMBOL(lu_device_fini); * Initialize object \a o that is part of compound object \a h and was created * by device \a d. */ -int lu_object_init(struct lu_object *o, - struct lu_object_header *h, struct lu_device *d) +int lu_object_init(struct lu_object *o, struct lu_object_header *h, + struct lu_device *d) { - memset(o, 0, sizeof *o); - o->lo_header = h; - o->lo_dev = d; - lu_device_get(d); - o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o); - CFS_INIT_LIST_HEAD(&o->lo_linkage); - return 0; + memset(o, 0, sizeof(*o)); + o->lo_header = h; + o->lo_dev = d; + lu_device_get(d); + lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); + INIT_LIST_HEAD(&o->lo_linkage); + + return 0; } EXPORT_SYMBOL(lu_object_init); @@ -882,16 +1292,16 @@ EXPORT_SYMBOL(lu_object_init); */ void lu_object_fini(struct lu_object *o) { - struct lu_device *dev = o->lo_dev; + struct lu_device *dev = o->lo_dev; - LASSERT(cfs_list_empty(&o->lo_linkage)); + LASSERT(list_empty(&o->lo_linkage)); - if (dev != NULL) { - lu_ref_del_at(&dev->ld_reference, - o->lo_dev_ref , "lu_object", o); - lu_device_put(dev); - o->lo_dev = NULL; - } + if (dev != NULL) { + lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, + "lu_object", o); + lu_device_put(dev); + o->lo_dev = NULL; + } } EXPORT_SYMBOL(lu_object_fini); @@ -903,7 +1313,7 @@ EXPORT_SYMBOL(lu_object_fini); */ void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &h->loh_layers); + list_move(&o->lo_linkage, &h->loh_layers); } EXPORT_SYMBOL(lu_object_add_top); @@ -915,7 +1325,7 @@ EXPORT_SYMBOL(lu_object_add_top); */ void lu_object_add(struct lu_object *before, struct lu_object *o) { - cfs_list_move(&o->lo_linkage, &before->lo_linkage); + list_move(&o->lo_linkage, &before->lo_linkage); } EXPORT_SYMBOL(lu_object_add); @@ -925,10 +1335,10 @@ EXPORT_SYMBOL(lu_object_add); int lu_object_header_init(struct lu_object_header *h) { memset(h, 0, sizeof *h); - cfs_atomic_set(&h->loh_ref, 1); - CFS_INIT_HLIST_NODE(&h->loh_hash); - CFS_INIT_LIST_HEAD(&h->loh_lru); - CFS_INIT_LIST_HEAD(&h->loh_layers); + atomic_set(&h->loh_ref, 1); + INIT_HLIST_NODE(&h->loh_hash); + INIT_LIST_HEAD(&h->loh_lru); + INIT_LIST_HEAD(&h->loh_layers); lu_ref_init(&h->loh_reference); return 0; } @@ -939,9 +1349,9 @@ EXPORT_SYMBOL(lu_object_header_init); */ void lu_object_header_fini(struct lu_object_header *h) { - LASSERT(cfs_list_empty(&h->loh_layers)); - LASSERT(cfs_list_empty(&h->loh_lru)); - LASSERT(cfs_hlist_unhashed(&h->loh_hash)); + LASSERT(list_empty(&h->loh_layers)); + LASSERT(list_empty(&h->loh_lru)); + LASSERT(hlist_unhashed(&h->loh_hash)); lu_ref_fini(&h->loh_reference); } EXPORT_SYMBOL(lu_object_header_fini); @@ -953,18 +1363,16 @@ EXPORT_SYMBOL(lu_object_header_fini); struct lu_object *lu_object_locate(struct lu_object_header *h, const struct lu_device_type *dtype) { - struct lu_object *o; + struct lu_object *o; - cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; + list_for_each_entry(o, &h->loh_layers, lo_linkage) { + if (o->lo_dev->ld_type == dtype) + return o; + } + return NULL; } EXPORT_SYMBOL(lu_object_locate); - - /** * Finalize and free devices in the device stack. * @@ -988,15 +1396,6 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) /* purge again. */ lu_site_purge(env, site, ~0); - if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) { - /* - * Uh-oh, objects still exist. - */ - static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR); - - lu_site_print(env, site, &cookie, lu_cdebug_printer); - } - for (scan = top; scan != NULL; scan = next) { const struct lu_device_type *ldt = scan->ld_type; struct obd_type *type; @@ -1015,12 +1414,12 @@ enum { /** * Maximal number of tld slots. */ - LU_CONTEXT_KEY_NR = 32 + LU_CONTEXT_KEY_NR = 40 }; static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(lu_keys_guard); /** * Global counter incremented whenever key is registered, unregistered, @@ -1035,8 +1434,8 @@ static unsigned key_set_version = 0; */ int lu_context_key_register(struct lu_context_key *key) { - int result; - int i; + int result; + unsigned int i; LASSERT(key->lct_init != NULL); LASSERT(key->lct_fini != NULL); @@ -1044,11 +1443,11 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - cfs_spin_lock(&lu_keys_guard); + spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; - cfs_atomic_set(&key->lct_used, 1); + atomic_set(&key->lct_used, 1); lu_keys[i] = key; lu_ref_init(&key->lct_reference); result = 0; @@ -1056,8 +1455,8 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - cfs_spin_unlock(&lu_keys_guard); - return result; + spin_unlock(&lu_keys_guard); + return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1069,18 +1468,19 @@ static void key_fini(struct lu_context *ctx, int index) key = lu_keys[index]; LASSERT(key != NULL); LASSERT(key->lct_fini != NULL); - LASSERT(cfs_atomic_read(&key->lct_used) > 1); + LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); lu_ref_del(&key->lct_reference, "ctx", ctx); - cfs_atomic_dec(&key->lct_used); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) { - LASSERT(cfs_module_refcount(key->lct_owner) > 0); - cfs_module_put(key->lct_owner); - } - ctx->lc_value[index] = NULL; - } + atomic_dec(&key->lct_used); + + LASSERT(key->lct_owner != NULL); + if ((ctx->lc_tags & LCT_NOREF) == 0) { + LINVRNT(module_refcount(key->lct_owner) > 0); + module_put(key->lct_owner); + } + ctx->lc_value[index] = NULL; + } } /** @@ -1088,23 +1488,23 @@ static void key_fini(struct lu_context *ctx, int index) */ void lu_context_key_degister(struct lu_context_key *key) { - LASSERT(cfs_atomic_read(&key->lct_used) >= 1); - LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + LASSERT(atomic_read(&key->lct_used) >= 1); + LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); - lu_context_key_quiesce(key); + lu_context_key_quiesce(key); - ++key_set_version; - cfs_spin_lock(&lu_keys_guard); - key_fini(&lu_shrink_env.le_ctx, key->lct_index); - if (lu_keys[key->lct_index]) { - lu_keys[key->lct_index] = NULL; - lu_ref_fini(&key->lct_reference); - } - cfs_spin_unlock(&lu_keys_guard); + ++key_set_version; + spin_lock(&lu_keys_guard); + key_fini(&lu_shrink_env.le_ctx, key->lct_index); + if (lu_keys[key->lct_index]) { + lu_keys[key->lct_index] = NULL; + lu_ref_fini(&key->lct_reference); + } + spin_unlock(&lu_keys_guard); - LASSERTF(cfs_atomic_read(&key->lct_used) == 1, - "key has instances: %d\n", - cfs_atomic_read(&key->lct_used)); + LASSERTF(atomic_read(&key->lct_used) == 1, + "key has instances: %d\n", + atomic_read(&key->lct_used)); } EXPORT_SYMBOL(lu_context_key_degister); @@ -1205,7 +1605,7 @@ EXPORT_SYMBOL(lu_context_key_get); /** * List of remembered contexts. XXX document me. */ -static CFS_LIST_HEAD(lu_context_remembered); +static struct list_head lu_context_remembered; /** * Destroy \a key in all remembered contexts. This is used to destroy key @@ -1226,13 +1626,13 @@ void lu_context_key_quiesce(struct lu_context_key *key) /* * XXX memory barrier has to go here. */ - cfs_spin_lock(&lu_keys_guard); - cfs_list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) - key_fini(ctx, key->lct_index); - cfs_spin_unlock(&lu_keys_guard); - ++key_set_version; - } + spin_lock(&lu_keys_guard); + list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) + key_fini(ctx, key->lct_index); + spin_unlock(&lu_keys_guard); + ++key_set_version; + } } EXPORT_SYMBOL(lu_context_key_quiesce); @@ -1245,23 +1645,23 @@ EXPORT_SYMBOL(lu_context_key_revive); static void keys_fini(struct lu_context *ctx) { - int i; + unsigned int i; - cfs_spin_lock(&lu_keys_guard); - if (ctx->lc_value != NULL) { - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) - key_fini(ctx, i); - OBD_FREE(ctx->lc_value, - ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); - ctx->lc_value = NULL; - } - cfs_spin_unlock(&lu_keys_guard); + if (ctx->lc_value == NULL) + return; + + for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) + key_fini(ctx, i); + + OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); + ctx->lc_value = NULL; } static int keys_fill(struct lu_context *ctx) { - int i; + unsigned int i; + LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; @@ -1282,11 +1682,11 @@ static int keys_fill(struct lu_context *ctx) if (unlikely(IS_ERR(value))) return PTR_ERR(value); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) - cfs_try_module_get(key->lct_owner); - lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); - cfs_atomic_inc(&key->lct_used); + LASSERT(key->lct_owner != NULL); + if (!(ctx->lc_tags & LCT_NOREF)) + try_module_get(key->lct_owner); + lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); + atomic_inc(&key->lct_used); /* * This is the only place in the code, where an * element of ctx->lc_value[] array is set to non-NULL @@ -1303,17 +1703,11 @@ static int keys_fill(struct lu_context *ctx) static int keys_init(struct lu_context *ctx) { - int result; - - OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); - if (likely(ctx->lc_value != NULL)) - result = keys_fill(ctx); - else - result = -ENOMEM; + OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); + if (likely(ctx->lc_value != NULL)) + return keys_fill(ctx); - if (result != 0) - keys_fini(ctx); - return result; + return -ENOMEM; } /** @@ -1321,16 +1715,24 @@ static int keys_init(struct lu_context *ctx) */ int lu_context_init(struct lu_context *ctx, __u32 tags) { - memset(ctx, 0, sizeof *ctx); - ctx->lc_state = LCS_INITIALIZED; - ctx->lc_tags = tags; - if (tags & LCT_REMEMBER) { - cfs_spin_lock(&lu_keys_guard); - cfs_list_add(&ctx->lc_remember, &lu_context_remembered); - cfs_spin_unlock(&lu_keys_guard); - } else - CFS_INIT_LIST_HEAD(&ctx->lc_remember); - return keys_init(ctx); + int rc; + + memset(ctx, 0, sizeof *ctx); + ctx->lc_state = LCS_INITIALIZED; + ctx->lc_tags = tags; + if (tags & LCT_REMEMBER) { + spin_lock(&lu_keys_guard); + list_add(&ctx->lc_remember, &lu_context_remembered); + spin_unlock(&lu_keys_guard); + } else { + INIT_LIST_HEAD(&ctx->lc_remember); + } + + rc = keys_init(ctx); + if (rc != 0) + lu_context_fini(ctx); + + return rc; } EXPORT_SYMBOL(lu_context_init); @@ -1339,12 +1741,19 @@ EXPORT_SYMBOL(lu_context_init); */ void lu_context_fini(struct lu_context *ctx) { - LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); - ctx->lc_state = LCS_FINALIZED; - keys_fini(ctx); - cfs_spin_lock(&lu_keys_guard); - cfs_list_del_init(&ctx->lc_remember); - cfs_spin_unlock(&lu_keys_guard); + LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); + ctx->lc_state = LCS_FINALIZED; + + if ((ctx->lc_tags & LCT_REMEMBER) == 0) { + LASSERT(list_empty(&ctx->lc_remember)); + keys_fini(ctx); + + } else { /* could race with key degister */ + spin_lock(&lu_keys_guard); + keys_fini(ctx); + list_del_init(&ctx->lc_remember); + spin_unlock(&lu_keys_guard); + } } EXPORT_SYMBOL(lu_context_fini); @@ -1363,7 +1772,7 @@ EXPORT_SYMBOL(lu_context_enter); */ void lu_context_exit(struct lu_context *ctx) { - int i; + unsigned int i; LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; @@ -1385,15 +1794,61 @@ EXPORT_SYMBOL(lu_context_exit); /** * Allocate for context all missing keys that were registered after context - * creation. + * creation. key_set_version is only changed in rare cases when modules + * are loaded and removed. */ int lu_context_refill(struct lu_context *ctx) { - LINVRNT(ctx->lc_value != NULL); - return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx); + return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); } EXPORT_SYMBOL(lu_context_refill); +/** + * lu_ctx_tags/lu_ses_tags will be updated if there are new types of + * obd being added. Currently, this is only used on client side, specifically + * for echo device client, for other stack (like ptlrpc threads), context are + * predefined when the lu_device type are registered, during the module probe + * phase. + */ +__u32 lu_context_tags_default = 0; +__u32 lu_session_tags_default = 0; + +void lu_context_tags_update(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_context_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_update); + +void lu_context_tags_clear(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_context_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_clear); + +void lu_session_tags_update(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_session_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_session_tags_update); + +void lu_session_tags_clear(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_session_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_session_tags_clear); + int lu_env_init(struct lu_env *env, __u32 tags) { int result; @@ -1425,48 +1880,177 @@ int lu_env_refill(struct lu_env *env) } EXPORT_SYMBOL(lu_env_refill); -static struct cfs_shrinker *lu_site_shrinker = NULL; - -#ifdef __KERNEL__ -static int lu_cache_shrink(int nr, unsigned int gfp_mask) +/** + * Currently, this API will only be used by echo client. + * Because echo client and normal lustre client will share + * same cl_env cache. So echo client needs to refresh + * the env context after it get one from the cache, especially + * when normal client and echo client co-exist in the same client. + */ +int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, + __u32 stags) { - struct lu_site *s; - struct lu_site *tmp; - int cached = 0; - int remain = nr; - CFS_LIST_HEAD(splice); + int result; - if (nr != 0) { - if (!(gfp_mask & __GFP_FS)) - return -1; - CDEBUG(D_INODE, "Shrink %d objects\n", nr); + if ((env->le_ctx.lc_tags & ctags) != ctags) { + env->le_ctx.lc_version = 0; + env->le_ctx.lc_tags |= ctags; } - cfs_down(&lu_sites_guard); - cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - if (nr != 0) { - remain = lu_site_purge(&lu_shrink_env, s, remain); - /* - * Move just shrunk site to the tail of site list to - * assure shrinking fairness. - */ - cfs_list_move_tail(&s->ls_linkage, &splice); + if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) { + env->le_ses->lc_version = 0; + env->le_ses->lc_tags |= stags; + } + + result = lu_env_refill(env); + + return result; +} +EXPORT_SYMBOL(lu_env_refill_by_tags); + +static struct shrinker *lu_site_shrinker; + +typedef struct lu_site_stats{ + unsigned lss_populated; + unsigned lss_max_search; + unsigned lss_total; + unsigned lss_busy; +} lu_site_stats_t; + +static void lu_site_stats_get(cfs_hash_t *hs, + lu_site_stats_t *stats, int populated) +{ + cfs_hash_bd_t bd; + unsigned int i; + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); + struct hlist_head *hhead; + + cfs_hash_bd_lock(hs, &bd, 1); + stats->lss_busy += bkt->lsb_busy; + stats->lss_total += cfs_hash_bd_count_get(&bd); + stats->lss_max_search = max((int)stats->lss_max_search, + cfs_hash_bd_depmax_get(&bd)); + if (!populated) { + cfs_hash_bd_unlock(hs, &bd, 1); + continue; } - cfs_read_lock(&s->ls_guard); - cached += s->ls_total - s->ls_busy; - cfs_read_unlock(&s->ls_guard); - if (nr && remain <= 0) - break; + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + if (!hlist_empty(hhead)) + stats->lss_populated++; + } + cfs_hash_bd_unlock(hs, &bd, 1); } - cfs_list_splice(&splice, lu_sites.prev); - cfs_up(&lu_sites_guard); +} + - cached = (cached / 100) * sysctl_vfs_cache_pressure; - if (nr == 0) - CDEBUG(D_INODE, "%d objects cached\n", cached); - return cached; +static unsigned long lu_cache_shrink_count(struct shrinker *sk, + struct shrink_control *sc) +{ + lu_site_stats_t stats; + struct lu_site *s; + struct lu_site *tmp; + unsigned long cached = 0; + + if (!(sc->gfp_mask & __GFP_FS)) + return 0; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 0); + cached += stats.lss_total - stats.lss_busy; + } + mutex_unlock(&lu_sites_guard); + + cached = (cached / 100) * sysctl_vfs_cache_pressure; + CDEBUG(D_INODE, "%ld objects cached\n", cached); + return cached; +} + +static unsigned long lu_cache_shrink_scan(struct shrinker *sk, + struct shrink_control *sc) +{ + struct lu_site *s; + struct lu_site *tmp; + unsigned long remain = sc->nr_to_scan; + LIST_HEAD(splice); + + if (!(sc->gfp_mask & __GFP_FS)) + /* We must not take the lu_sites_guard lock when + * __GFP_FS is *not* set because of the deadlock + * possibility detailed above. Additionally, + * since we cannot determine the number of + * objects in the cache without taking this + * lock, we're in a particularly tough spot. As + * a result, we'll just lie and say our cache is + * empty. This _should_ be ok, as we can't + * reclaim objects when __GFP_FS is *not* set + * anyways. + */ + return SHRINK_STOP; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + remain = lu_site_purge(&lu_shrink_env, s, remain); + /* + * Move just shrunk site to the tail of site list to + * assure shrinking fairness. + */ + list_move_tail(&s->ls_linkage, &splice); + } + list_splice(&splice, lu_sites.prev); + mutex_unlock(&lu_sites_guard); + + return sc->nr_to_scan - remain; +} + +#ifndef HAVE_SHRINKER_COUNT +/* + * There exists a potential lock inversion deadlock scenario when using + * Lustre on top of ZFS. This occurs between one of ZFS's + * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially, + * thread A will take the lu_sites_guard lock and sleep on the ht_lock, + * while thread B will take the ht_lock and sleep on the lu_sites_guard + * lock. Obviously neither thread will wake and drop their respective hold + * on their lock. + * + * To prevent this from happening we must ensure the lu_sites_guard lock is + * not taken while down this code path. ZFS reliably does not set the + * __GFP_FS bit in its code paths, so this can be used to determine if it + * is safe to take the lu_sites_guard lock. + * + * Ideally we should accurately return the remaining number of cached + * objects without taking the lu_sites_guard lock, but this is not + * possible in the current implementation. + */ +static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) +{ + int cached = 0; + struct shrink_control scv = { + .nr_to_scan = shrink_param(sc, nr_to_scan), + .gfp_mask = shrink_param(sc, gfp_mask) + }; +#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL) + struct shrinker* shrinker = NULL; +#endif + + + CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); + + lu_cache_shrink_scan(shrinker, &scv); + + cached = lu_cache_shrink_count(shrinker, &scv); + if (scv.nr_to_scan == 0) + CDEBUG(D_INODE, "%d objects cached\n", cached); + return cached; } +#endif /* HAVE_SHRINKER_COUNT */ + + /* * Debugging stuff. */ @@ -1490,24 +2074,24 @@ int lu_printk_printer(const struct lu_env *env, return 0; } -void lu_debugging_setup(void) +int lu_debugging_setup(void) { - lu_env_init(&lu_debugging_env, ~0); + return lu_env_init(&lu_debugging_env, ~0); } void lu_context_keys_dump(void) { - int i; + unsigned int i; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; key = lu_keys[i]; if (key != NULL) { - CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n", + CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n", i, key, key->lct_tags, key->lct_init, key->lct_fini, key->lct_exit, - key->lct_index, cfs_atomic_read(&key->lct_used), + key->lct_index, atomic_read(&key->lct_used), key->lct_owner ? key->lct_owner->name : "", key->lct_owner); lu_ref_print(&key->lct_reference); @@ -1515,23 +2099,6 @@ void lu_context_keys_dump(void) } } EXPORT_SYMBOL(lu_context_keys_dump); -#else /* !__KERNEL__ */ -static int lu_cache_shrink(int nr, unsigned int gfp_mask) -{ - return 0; -} -#endif /* __KERNEL__ */ - -int cl_global_init(void); -void cl_global_fini(void); -int lu_ref_global_init(void); -void lu_ref_global_fini(void); - -int dt_global_init(void); -void dt_global_fini(void); - -int llo_global_init(void); -void llo_global_fini(void); /** * Initialization of global lu_* data. @@ -1539,8 +2106,14 @@ void llo_global_fini(void); int lu_global_init(void) { int result; + DEF_SHRINKER_VAR(shvar, lu_cache_shrink, + lu_cache_shrink_count, lu_cache_shrink_scan); + + CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); - CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys); + INIT_LIST_HEAD(&lu_device_types); + INIT_LIST_HEAD(&lu_context_remembered); + INIT_LIST_HEAD(&lu_sites); result = lu_ref_global_init(); if (result != 0) @@ -1550,14 +2123,15 @@ int lu_global_init(void) result = lu_context_key_register(&lu_global_key); if (result != 0) return result; + /* * At this level, we don't know what tags are needed, so allocate them * conservatively. This should not be too bad, because this * environment is global. */ - cfs_down(&lu_sites_guard); + mutex_lock(&lu_sites_guard); result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); - cfs_up(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); if (result != 0) return result; @@ -1566,26 +2140,10 @@ int lu_global_init(void) * inode, one for ea. Unfortunately setting this high value results in * lu_object/inode cache consuming all the memory. */ - lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink); + lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar); if (lu_site_shrinker == NULL) return -ENOMEM; - result = lu_time_global_init(); - if (result) - GOTO(out, result); - -#ifdef __KERNEL__ - result = dt_global_init(); - if (result) - GOTO(out, result); - - result = llo_global_init(); - if (result) - GOTO(out, result); -#endif - result = cl_global_init(); -out: - return result; } @@ -1594,87 +2152,100 @@ out: */ void lu_global_fini(void) { - cl_global_fini(); -#ifdef __KERNEL__ - llo_global_fini(); - dt_global_fini(); -#endif - lu_time_global_fini(); if (lu_site_shrinker != NULL) { - cfs_remove_shrinker(lu_site_shrinker); + remove_shrinker(lu_site_shrinker); lu_site_shrinker = NULL; } - lu_context_key_degister(&lu_global_key); + lu_context_key_degister(&lu_global_key); /* * Tear shrinker environment down _after_ de-registering * lu_global_key, because the latter has a value in the former. */ - cfs_down(&lu_sites_guard); + mutex_lock(&lu_sites_guard); lu_env_fini(&lu_shrink_env); - cfs_up(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); lu_ref_global_fini(); } -struct lu_buf LU_BUF_NULL = { - .lb_buf = NULL, - .lb_len = 0 -}; -EXPORT_SYMBOL(LU_BUF_NULL); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) +{ +#ifdef LPROCFS + struct lprocfs_counter ret; + + lprocfs_stats_collect(stats, idx, &ret); + return (__u32)ret.lc_count; +#else + return 0; +#endif +} /** * Output site statistical counters into a buffer. Suitable for * lprocfs_rd_*()-style functions. */ +int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) +{ + lu_site_stats_t stats; + + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); +} +EXPORT_SYMBOL(lu_site_stats_seq_print); + int lu_site_stats_print(const struct lu_site *s, char *page, int count) { - int i; - int populated; + lu_site_stats_t stats; - /* - * How many hash buckets are not-empty? Don't bother with locks: it's - * an estimation anyway. - */ - for (i = 0, populated = 0; i < s->ls_hash_size; i++) - populated += !cfs_hlist_empty(&s->ls_hash[i]); - - return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n", - s->ls_total, - s->ls_busy, - populated, - s->ls_hash_size, - s->ls_stats.s_created, - s->ls_stats.s_cache_hit, - s->ls_stats.s_cache_miss, - s->ls_stats.s_cache_check, - s->ls_stats.s_cache_race, - s->ls_stats.s_cache_death_race, - s->ls_stats.s_lru_purged); + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); } EXPORT_SYMBOL(lu_site_stats_print); -const char *lu_time_names[LU_TIME_NR] = { - [LU_TIME_FIND_LOOKUP] = "find_lookup", - [LU_TIME_FIND_ALLOC] = "find_alloc", - [LU_TIME_FIND_INSERT] = "find_insert" -}; -EXPORT_SYMBOL(lu_time_names); - /** * Helper function to initialize a number of kmem slab caches at once. */ int lu_kmem_init(struct lu_kmem_descr *caches) { int result; + struct lu_kmem_descr *iter = caches; - for (result = 0; caches->ckd_cache != NULL; ++caches) { - *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name, - caches->ckd_size, - 0, 0); - if (*caches->ckd_cache == NULL) { + for (result = 0; iter->ckd_cache != NULL; ++iter) { + *iter->ckd_cache = kmem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0, NULL); + if (*iter->ckd_cache == NULL) { result = -ENOMEM; + /* free all previously allocated caches */ + lu_kmem_fini(caches); break; } } @@ -1688,15 +2259,139 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - int rc; - for (; caches->ckd_cache != NULL; ++caches) { if (*caches->ckd_cache != NULL) { - rc = cfs_mem_cache_destroy(*caches->ckd_cache); - LASSERTF(rc == 0, "couldn't destroy %s slab\n", - caches->ckd_name); + kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } } } EXPORT_SYMBOL(lu_kmem_fini); + +/** + * Temporary solution to be able to assign fid in ->do_create() + * till we have fully-functional OST fids + */ +void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, + const struct lu_fid *fid) +{ + struct lu_site *s = o->lo_dev->ld_site; + struct lu_fid *old = &o->lo_header->loh_fid; + struct lu_site_bkt_data *bkt; + struct lu_object *shadow; + wait_queue_t waiter; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; + + LASSERT(fid_is_zero(old)); + + hs = s->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); + shadow = htable_lookup(s, &bd, fid, &waiter, &version); + /* supposed to be unique */ + LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); + *old = *fid; + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); +} +EXPORT_SYMBOL(lu_object_assign_fid); + +/** + * allocates object with 0 (non-assiged) fid + * XXX: temporary solution to be able to assign fid in ->do_create() + * till we have fully-functional OST fids + */ +struct lu_object *lu_object_anon(const struct lu_env *env, + struct lu_device *dev, + const struct lu_object_conf *conf) +{ + struct lu_fid fid; + struct lu_object *o; + + fid_zero(&fid); + o = lu_object_alloc(env, dev, &fid, conf); + + return o; +} +EXPORT_SYMBOL(lu_object_anon); + +struct lu_buf LU_BUF_NULL = { + .lb_buf = NULL, + .lb_len = 0 +}; +EXPORT_SYMBOL(LU_BUF_NULL); + +void lu_buf_free(struct lu_buf *buf) +{ + LASSERT(buf); + if (buf->lb_buf) { + LASSERT(buf->lb_len > 0); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + buf->lb_buf = NULL; + buf->lb_len = 0; + } +} +EXPORT_SYMBOL(lu_buf_free); + +void lu_buf_alloc(struct lu_buf *buf, size_t size) +{ + LASSERT(buf); + LASSERT(buf->lb_buf == NULL); + LASSERT(buf->lb_len == 0); + OBD_ALLOC_LARGE(buf->lb_buf, size); + if (likely(buf->lb_buf)) + buf->lb_len = size; +} +EXPORT_SYMBOL(lu_buf_alloc); + +void lu_buf_realloc(struct lu_buf *buf, size_t size) +{ + lu_buf_free(buf); + lu_buf_alloc(buf, size); +} +EXPORT_SYMBOL(lu_buf_realloc); + +struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len) +{ + if (buf->lb_buf == NULL && buf->lb_len == 0) + lu_buf_alloc(buf, len); + + if ((len > buf->lb_len) && (buf->lb_buf != NULL)) + lu_buf_realloc(buf, len); + + return buf; +} +EXPORT_SYMBOL(lu_buf_check_and_alloc); + +/** + * Increase the size of the \a buf. + * preserves old data in buffer + * old buffer remains unchanged on error + * \retval 0 or -ENOMEM + */ +int lu_buf_check_and_grow(struct lu_buf *buf, size_t len) +{ + char *ptr; + + if (len <= buf->lb_len) + return 0; + + OBD_ALLOC_LARGE(ptr, len); + if (ptr == NULL) + return -ENOMEM; + + /* Free the old buf */ + if (buf->lb_buf != NULL) { + memcpy(ptr, buf->lb_buf, buf->lb_len); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + } + + buf->lb_buf = ptr; + buf->lb_len = len; + return 0; +} +EXPORT_SYMBOL(lu_buf_check_and_grow); +