X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=cc43d7c64d6e4ae711f1eb8a17d5999102160614;hp=e65ddfa20d84b995645a66c851f4df31095f29c4;hb=8701e7e4b5ec1b34700c95b9b6588f4745730b72;hpb=70e80ade90af09300396706b8910e196a7928520 diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index e65ddfa..cc43d7c 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -16,8 +14,8 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -39,140 +39,245 @@ * These are the only exported functions, they provide some generic * infrastructure for managing object devices * - * Author: Nikita Danilov + * Author: Nikita Danilov */ #define DEBUG_SUBSYSTEM S_CLASS -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB + +#include + +#ifdef __KERNEL__ +# include #endif -#include -#include -/* nr_free_pages() */ -#include /* hash_long() */ -#include +#include +#include #include #include #include #include +#include #include -/* lu_time_global_{init,fini}() */ -#include + +enum { + LU_CACHE_PERCENT_MAX = 50, + LU_CACHE_PERCENT_DEFAULT = 20 +}; + +#define LU_CACHE_NR_MAX_ADJUST 128 +#define LU_CACHE_NR_UNLIMITED -1 +#define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED +#define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED +/** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */ +#define LU_CACHE_NR_ZFS_LIMIT 10240 + +#define LU_SITE_BITS_MIN 12 +#define LU_SITE_BITS_MAX 24 +/** + * total 256 buckets, we don't want too many buckets because: + * - consume too much memory + * - avoid unbalanced LRU list + */ +#define LU_SITE_BKT_BITS 8 + + +static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; +CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644, + "Percentage of memory to be used as lu_object cache"); + +static long lu_cache_nr = LU_CACHE_NR_DEFAULT; +CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644, + "Maximum number of objects in lu_object cache"); static void lu_object_free(const struct lu_env *env, struct lu_object *o); -/* +/** * Decrease reference counter on object. If last reference is freed, return * object to the cache, unless lu_object_is_dying(o) holds. In the latter * case, free object immediately. */ void lu_object_put(const struct lu_env *env, struct lu_object *o) { + struct lu_site_bkt_data *bkt; struct lu_object_header *top; struct lu_site *site; struct lu_object *orig; - int kill_it; + cfs_hash_bd_t bd; + const struct lu_fid *fid; - top = o->lo_header; + top = o->lo_header; site = o->lo_dev->ld_site; orig = o; - kill_it = 0; - write_lock(&site->ls_guard); - if (atomic_dec_and_test(&top->loh_ref)) { - /* - * When last reference is released, iterate over object - * layers, and notify them that object is no longer busy. - */ - list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) - o->lo_ops->loo_object_release(env, o); - } - -- site->ls_busy; - if (lu_object_is_dying(top)) { - /* - * If object is dying (will not be cached), removed it - * from hash table and LRU. - * - * This is done with hash table and LRU lists - * locked. As the only way to acquire first reference - * to previously unreferenced object is through - * hash-table lookup (lu_object_find()), or LRU - * scanning (lu_site_purge()), that are done under - * hash-table and LRU lock, no race with concurrent - * object lookup is possible and we can safely destroy - * object below. - */ - hlist_del_init(&top->loh_hash); - list_del_init(&top->loh_lru); - -- site->ls_total; - kill_it = 1; - } + + /* + * till we have full fids-on-OST implemented anonymous objects + * are possible in OSP. such an object isn't listed in the site + * so we should not remove it from the site. + */ + fid = lu_object_fid(o); + if (fid_is_zero(fid)) { + LASSERT(top->loh_hash.next == NULL + && top->loh_hash.pprev == NULL); + LASSERT(list_empty(&top->loh_lru)); + if (!atomic_dec_and_test(&top->loh_ref)) + return; + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); + } + lu_object_free(env, orig); + return; + } + + cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); + + if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) { + if (lu_object_is_dying(top)) { + + /* + * somebody may be waiting for this, currently only + * used for cl_object, see cl_object_put_last(). + */ + wake_up_all(&bkt->lsb_marche_funebre); + } + return; + } + + LASSERT(bkt->lsb_busy > 0); + bkt->lsb_busy--; + /* + * When last reference is released, iterate over object + * layers, and notify them that object is no longer busy. + */ + list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); } - write_unlock(&site->ls_guard); - if (kill_it) - /* - * Object was already removed from hash and lru above, can - * kill it. - */ - lu_object_free(env, orig); + + if (!lu_object_is_dying(top)) { + LASSERT(list_empty(&top->loh_lru)); + list_add_tail(&top->loh_lru, &bkt->lsb_lru); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + return; + } + + /* + * If object is dying (will not be cached), removed it + * from hash table and LRU. + * + * This is done with hash table and LRU lists locked. As the only + * way to acquire first reference to previously unreferenced + * object is through hash-table lookup (lu_object_find()), + * or LRU scanning (lu_site_purge()), that are done under hash-table + * and LRU lock, no race with concurrent object lookup is possible + * and we can safely destroy object below. + */ + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) + cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); + /* + * Object was already removed from hash and lru above, can + * kill it. + */ + lu_object_free(env, orig); } EXPORT_SYMBOL(lu_object_put); -/* +/** + * Put object and don't keep in cache. This is temporary solution for + * multi-site objects when its layering is not constant. + */ +void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o) +{ + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); + return lu_object_put(env, o); +} +EXPORT_SYMBOL(lu_object_put_nocache); + +/** + * Kill the object and take it out of LRU cache. + * Currently used by client code for layout change. + */ +void lu_object_unhash(const struct lu_env *env, struct lu_object *o) +{ + struct lu_object_header *top; + + top = o->lo_header; + set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { + cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; + cfs_hash_bd_t bd; + + cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); + list_del_init(&top->loh_lru); + cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(obj_hash, &bd, 1); + } +} +EXPORT_SYMBOL(lu_object_unhash); + +/** * Allocate new object. * * This follows object creation protocol, described in the comment within * struct lu_device_operations definition. */ static struct lu_object *lu_object_alloc(const struct lu_env *env, - struct lu_site *s, - const struct lu_fid *f) + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) { - struct lu_object *scan; - struct lu_object *top; - struct list_head *layers; - int clean; - int result; - ENTRY; - - /* - * Create top-level object slice. This will also create - * lu_object_header. - */ - top = s->ls_top_dev->ld_ops->ldo_object_alloc(env, - NULL, s->ls_top_dev); - if (top == NULL) - RETURN(ERR_PTR(-ENOMEM)); + struct lu_object *scan; + struct lu_object *top; + struct list_head *layers; + unsigned int init_mask = 0; + unsigned int init_flag; + int clean; + int result; + ENTRY; + + /* + * Create top-level object slice. This will also create + * lu_object_header. + */ + top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); + if (top == NULL) + RETURN(ERR_PTR(-ENOMEM)); + if (IS_ERR(top)) + RETURN(top); /* * This is the only place where object fid is assigned. It's constant * after this point. */ - LASSERT(fid_is_igif(f) || fid_ver(f) == 0); - top->lo_header->loh_fid = *f; + top->lo_header->loh_fid = *f; layers = &top->lo_header->loh_layers; - do { - /* - * Call ->loo_object_init() repeatedly, until no more new - * object slices are created. - */ - clean = 1; - list_for_each_entry(scan, layers, lo_linkage) { - if (scan->lo_flags & LU_OBJECT_ALLOCATED) - continue; - clean = 0; - scan->lo_header = top->lo_header; - result = scan->lo_ops->loo_object_init(env, scan); - if (result != 0) { - lu_object_free(env, top); - RETURN(ERR_PTR(result)); - } - scan->lo_flags |= LU_OBJECT_ALLOCATED; - } - } while (!clean); - list_for_each_entry_reverse(scan, layers, lo_linkage) { + do { + /* + * Call ->loo_object_init() repeatedly, until no more new + * object slices are created. + */ + clean = 1; + init_flag = 1; + list_for_each_entry(scan, layers, lo_linkage) { + if (init_mask & init_flag) + goto next; + clean = 0; + scan->lo_header = top->lo_header; + result = scan->lo_ops->loo_object_init(env, scan, conf); + if (result != 0) { + lu_object_free(env, top); + RETURN(ERR_PTR(result)); + } + init_mask |= init_flag; +next: + init_flag <<= 1; + } + } while (!clean); + + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_start != NULL) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { @@ -182,23 +287,28 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, } } - s->ls_stats.s_created ++; + lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED); RETURN(top); } -/* - * Free object. +/** + * Free an object. */ static void lu_object_free(const struct lu_env *env, struct lu_object *o) { - struct list_head splice; - struct lu_object *scan; - + struct lu_site_bkt_data *bkt; + struct lu_site *site; + struct lu_object *scan; + struct list_head *layers; + struct list_head splice; + + site = o->lo_dev->ld_site; + layers = &o->lo_header->loh_layers; + bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid); /* * First call ->loo_object_delete() method to release all resources. */ - list_for_each_entry_reverse(scan, - &o->lo_header->loh_layers, lo_linkage) { + list_for_each_entry_reverse(scan, layers, lo_linkage) { if (scan->lo_ops->loo_object_delete != NULL) scan->lo_ops->loo_object_delete(env, scan); } @@ -209,62 +319,110 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * necessary, because lu_object_header is freed together with the * top-level slice. */ - CFS_INIT_LIST_HEAD(&splice); - list_splice_init(&o->lo_header->loh_layers, &splice); - while (!list_empty(&splice)) { - o = container_of0(splice.next, struct lu_object, lo_linkage); - list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); - o->lo_ops->loo_object_free(env, o); - } + INIT_LIST_HEAD(&splice); + list_splice_init(layers, &splice); + while (!list_empty(&splice)) { + /* + * Free layers in bottom-to-top order, so that object header + * lives as long as possible and ->loo_object_free() methods + * can look at its contents. + */ + o = container_of0(splice.prev, struct lu_object, lo_linkage); + list_del_init(&o->lo_linkage); + LASSERT(o->lo_ops->loo_object_free != NULL); + o->lo_ops->loo_object_free(env, o); + } + + if (waitqueue_active(&bkt->lsb_marche_funebre)) + wake_up_all(&bkt->lsb_marche_funebre); } -/* - * Free @nr objects from the cold end of the site LRU list. +/** + * Free \a nr objects from the cold end of the site LRU list. */ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) { - struct list_head dispose; struct lu_object_header *h; struct lu_object_header *temp; - - CFS_INIT_LIST_HEAD(&dispose); + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + cfs_hash_bd_t bd2; + struct list_head dispose; + int did_sth; + int start; + int count; + int bnr; + int i; + + if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) + RETURN(0); + + INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to * the dispose list, removing them from LRU and hash table. */ - write_lock(&s->ls_guard); - list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) { - /* - * Objects are sorted in lru order, and "busy" objects (ones - * with h->loh_ref > 0) naturally tend to live near hot end - * that we scan last. Unfortunately, sites usually have small - * (less then ten) number of busy yet rarely accessed objects - * (some global objects, accessed directly through pointers, - * bypassing hash table). Currently algorithm scans them over - * and over again. Probably we should move busy objects out of - * LRU, or we can live with that. - */ - if (nr-- == 0) - break; - if (atomic_read(&h->loh_ref) > 0) + start = s->ls_purge_start; + bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1; + again: + /* + * It doesn't make any sense to make purge threads parallel, that can + * only bring troubles to us. See LU-5331. + */ + mutex_lock(&s->ls_purge_mutex); + did_sth = 0; + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + if (i < start) continue; - hlist_del_init(&h->loh_hash); - list_move(&h->loh_lru, &dispose); - s->ls_total --; + count = bnr; + cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1); + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + + list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) { + LASSERT(atomic_read(&h->loh_ref) == 0); + + cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2); + LASSERT(bd.bd_bucket == bd2.bd_bucket); + + cfs_hash_bd_del_locked(s->ls_obj_hash, + &bd2, &h->loh_hash); + list_move(&h->loh_lru, &dispose); + if (did_sth == 0) + did_sth = 1; + + if (nr != ~0 && --nr == 0) + break; + + if (count > 0 && --count == 0) + break; + + } + cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1); + cond_resched(); + /* + * Free everything on the dispose list. This is safe against + * races due to the reasons described in lu_object_put(). + */ + while (!list_empty(&dispose)) { + h = container_of0(dispose.next, + struct lu_object_header, loh_lru); + list_del_init(&h->loh_lru); + lu_object_free(env, lu_object_top(h)); + lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED); + } + + if (nr == 0) + break; } - write_unlock(&s->ls_guard); - /* - * Free everything on the dispose list. This is safe against races due - * to the reasons described in lu_object_put(). - */ - while (!list_empty(&dispose)) { - h = container_of0(dispose.next, - struct lu_object_header, loh_lru); - list_del_init(&h->loh_lru); - lu_object_free(env, lu_object_top(h)); - s->ls_stats.s_lru_purged ++; + mutex_unlock(&s->ls_purge_mutex); + + if (nr != 0 && did_sth && start != 0) { + start = 0; /* restart from the first bucket */ + goto again; } + /* race on s->ls_purge_start, but nobody cares */ + s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash); + return nr; } EXPORT_SYMBOL(lu_site_purge); @@ -286,49 +444,46 @@ EXPORT_SYMBOL(lu_site_purge); */ enum { - /* + /** * Maximal line size. * * XXX overflow is not handled correctly. */ - LU_CDEBUG_LINE = 256 + LU_CDEBUG_LINE = 512 }; struct lu_cdebug_data { - /* + /** * Temporary buffer. */ char lck_area[LU_CDEBUG_LINE]; - /* - * fid staging area used by dt_store_open(). - */ - struct lu_fid_pack lck_pack; }; /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); -/* +/** * Key, holding temporary buffer. This key is registered very early by * lu_global_init(). */ struct lu_context_key lu_global_key = { - .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD, - .lct_init = lu_global_key_init, - .lct_fini = lu_global_key_fini + .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | + LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, + .lct_init = lu_global_key_init, + .lct_fini = lu_global_key_fini }; -/* +/** * Printer function emitting messages through libcfs_debug_msg(). */ int lu_cdebug_printer(const struct lu_env *env, void *cookie, const char *format, ...) { - struct lu_cdebug_print_info *info = cookie; - struct lu_cdebug_data *key; + struct libcfs_debug_msg_data *msgdata = cookie; + struct lu_cdebug_data *key; int used; int complete; - va_list args; + va_list args; va_start(args, format); @@ -343,9 +498,8 @@ int lu_cdebug_printer(const struct lu_env *env, vsnprintf(key->lck_area + used, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { - libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask, - (char *)info->lpi_file, info->lpi_fn, - info->lpi_line, "%s", key->lck_area); + if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -353,48 +507,55 @@ int lu_cdebug_printer(const struct lu_env *env, } EXPORT_SYMBOL(lu_cdebug_printer); -/* +/** * Print object header. */ -static void lu_object_header_print(const struct lu_env *env, - void *cookie, lu_printer_t printer, - const struct lu_object_header *hdr) +void lu_object_header_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, + const struct lu_object_header *hdr) { - (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", - hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), - PFID(&hdr->loh_fid), - hlist_unhashed(&hdr->loh_hash) ? "" : " hash", - list_empty(&hdr->loh_lru) ? "" : " lru", - hdr->loh_attr & LOHA_EXISTS ? " exist":""); + (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]", + hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref), + PFID(&hdr->loh_fid), + hlist_unhashed(&hdr->loh_hash) ? "" : " hash", + list_empty((struct list_head *)&hdr->loh_lru) ? \ + "" : " lru", + hdr->loh_attr & LOHA_EXISTS ? " exist" : ""); } +EXPORT_SYMBOL(lu_object_header_print); -/* - * Print human readable representation of the @o to the @printer. +/** + * Print human readable representation of the \a o to the \a printer. */ void lu_object_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct lu_object *o) + lu_printer_t printer, const struct lu_object *o) { - static const char ruler[] = "........................................"; - struct lu_object_header *top; - int depth; + static const char ruler[] = "........................................"; + struct lu_object_header *top; + int depth = 4; - top = o->lo_header; - lu_object_header_print(env, cookie, printer, top); - (*printer)(env, cookie, "\n"); - list_for_each_entry(o, &top->loh_layers, lo_linkage) { - depth = o->lo_depth + 4; - LASSERT(o->lo_ops->loo_object_print != NULL); - /* - * print `.' @depth times. - */ - (*printer)(env, cookie, "%*.*s", depth, depth, ruler); - o->lo_ops->loo_object_print(env, cookie, printer, o); - (*printer)(env, cookie, "\n"); - } + top = o->lo_header; + lu_object_header_print(env, cookie, printer, top); + (*printer)(env, cookie, "{\n"); + + list_for_each_entry(o, &top->loh_layers, lo_linkage) { + /* + * print `.' \a depth times followed by type name and address + */ + (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, + o->lo_dev->ld_type->ldt_name, o); + + if (o->lo_ops->loo_object_print != NULL) + (*o->lo_ops->loo_object_print)(env, cookie, printer, o); + + (*printer)(env, cookie, "\n"); + } + + (*printer)(env, cookie, "} header@%p\n", top); } EXPORT_SYMBOL(lu_object_print); -/* +/** * Check object consistency. */ int lu_object_invariant(const struct lu_object *o) @@ -402,7 +563,7 @@ int lu_object_invariant(const struct lu_object *o) struct lu_object_header *top; top = o->lo_header; - list_for_each_entry(o, &top->loh_layers, lo_linkage) { + list_for_each_entry(o, &top->loh_layers, lo_linkage) { if (o->lo_ops->loo_object_invariant != NULL && !o->lo_ops->loo_object_invariant(o)) return 0; @@ -412,51 +573,153 @@ int lu_object_invariant(const struct lu_object *o) EXPORT_SYMBOL(lu_object_invariant); static struct lu_object *htable_lookup(struct lu_site *s, - const struct hlist_head *bucket, - const struct lu_fid *f) + cfs_hash_bd_t *bd, + const struct lu_fid *f, + wait_queue_t *waiter, + __u64 *version) { - struct lu_object_header *h; - struct hlist_node *scan; - - hlist_for_each_entry(h, scan, bucket, loh_hash) { - s->ls_stats.s_cache_check ++; - if (likely(lu_fid_eq(&h->loh_fid, f) && - !lu_object_is_dying(h))) { - /* bump reference count... */ - if (atomic_add_return(1, &h->loh_ref) == 1) - ++ s->ls_busy; - /* and move to the head of the LRU */ - /* - * XXX temporary disable this to measure effects of - * read-write locking. - */ - /* list_move_tail(&h->loh_lru, &s->ls_lru); */ - s->ls_stats.s_cache_hit ++; - return lu_object_top(h); - } + struct lu_site_bkt_data *bkt; + struct lu_object_header *h; + struct hlist_node *hnode; + __u64 ver = cfs_hash_bd_version_get(bd); + + if (*version == ver) + return ERR_PTR(-ENOENT); + + *version = ver; + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return ERR_PTR(-ENOENT); } - s->ls_stats.s_cache_miss ++; - return NULL; + + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (likely(!lu_object_is_dying(h))) { + cfs_hash_get(s->ls_obj_hash, hnode); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + list_del_init(&h->loh_lru); + return lu_object_top(h); + } + + /* + * Lookup found an object being destroyed this object cannot be + * returned (to assure that references to dying objects are eventually + * drained), and moreover, lookup has to wait until object is freed. + */ + + init_waitqueue_entry_current(waiter); + add_wait_queue(&bkt->lsb_marche_funebre, waiter); + set_current_state(TASK_UNINTERRUPTIBLE); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE); + return ERR_PTR(-EAGAIN); } -static __u32 fid_hash(const struct lu_fid *f, int bits) +static struct lu_object *htable_lookup_nowait(struct lu_site *s, + cfs_hash_bd_t *bd, + const struct lu_fid *f) { - /* all objects with same id and different versions will belong to same - * collisions list. */ - return hash_long(fid_flatten(f), bits); + struct hlist_node *hnode; + struct lu_object_header *h; + + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return ERR_PTR(-ENOENT); + } + + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (unlikely(lu_object_is_dying(h))) + return ERR_PTR(-ENOENT); + + cfs_hash_get(s->ls_obj_hash, hnode); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + list_del_init(&h->loh_lru); + return lu_object_top(h); } -/* - * Search cache for an object with the fid @f. If such object is found, return - * it. Otherwise, create new object, insert it into cache and return it. In - * any case, additional reference is acquired on the returned object. +/** + * Search cache for an object with the fid \a f. If such object is found, + * return it. Otherwise, create new object, insert it into cache and return + * it. In any case, additional reference is acquired on the returned object. */ struct lu_object *lu_object_find(const struct lu_env *env, - struct lu_site *s, const struct lu_fid *f) + struct lu_device *dev, const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf); +} +EXPORT_SYMBOL(lu_object_find); + +/* + * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because + * the calculation for the number of objects to reclaim is not covered by + * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST. + * This ensures that many concurrent threads will not accidentally purge + * the entire cache. + */ +static void lu_object_limit(const struct lu_env *env, + struct lu_device *dev) +{ + __u64 size, nr; + + if (lu_cache_nr == LU_CACHE_NR_UNLIMITED) + return; + + size = cfs_hash_size_get(dev->ld_site->ls_obj_hash); + nr = (__u64)lu_cache_nr; + if (size > nr) + lu_site_purge(env, dev->ld_site, + MIN(size - nr, LU_CACHE_NR_MAX_ADJUST)); + + return; +} + +static struct lu_object *lu_object_new(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) { - struct lu_object *o; - struct lu_object *shadow; - struct hlist_head *bucket; + struct lu_object *o; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + struct lu_site_bkt_data *bkt; + + o = lu_object_alloc(env, dev, f, conf); + if (unlikely(IS_ERR(o))) + return o; + + hs = dev->ld_site->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + + return o; +} + +/** + * Core logic of lu_object_find*() functions. + */ +static struct lu_object *lu_object_find_try(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf, + wait_queue_t *waiter) +{ + struct lu_object *o; + struct lu_object *shadow; + struct lu_site *s; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; /* * This uses standard index maintenance protocol: @@ -470,96 +733,240 @@ struct lu_object *lu_object_find(const struct lu_env *env, * object just allocated. * - unlock index; * - return object. + * + * For "LOC_F_NEW" case, we are sure the object is new established. + * It is unnecessary to perform lookup-alloc-lookup-insert, instead, + * just alloc and insert directly. + * + * If dying object is found during index search, add @waiter to the + * site wait-queue and return ERR_PTR(-EAGAIN). */ - - bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits); - - read_lock(&s->ls_guard); - o = htable_lookup(s, bucket, f); - read_unlock(&s->ls_guard); - - if (o != NULL) + if (conf != NULL && conf->loc_flags & LOC_F_NEW) + return lu_object_new(env, dev, f, conf); + + s = dev->ld_site; + hs = s->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); + o = htable_lookup(s, &bd, f, waiter, &version); + cfs_hash_bd_unlock(hs, &bd, 1); + if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT) return o; /* * Allocate new object. This may result in rather complicated * operations, including fld queries, inode loading, etc. */ - o = lu_object_alloc(env, s, f); + o = lu_object_alloc(env, dev, f, conf); if (unlikely(IS_ERR(o))) return o; LASSERT(lu_fid_eq(lu_object_fid(o), f)); - write_lock(&s->ls_guard); - shadow = htable_lookup(s, bucket, f); - if (likely(shadow == NULL)) { - hlist_add_head(&o->lo_header->loh_hash, bucket); - list_add_tail(&o->lo_header->loh_lru, &s->ls_lru); - ++ s->ls_busy; - ++ s->ls_total; - shadow = o; - o = NULL; - } else - s->ls_stats.s_cache_race ++; - write_unlock(&s->ls_guard); - if (o != NULL) - lu_object_free(env, o); + cfs_hash_bd_lock(hs, &bd, 1); + + shadow = htable_lookup(s, &bd, f, waiter, &version); + if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { + struct lu_site_bkt_data *bkt; + + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); + + lu_object_limit(env, dev); + + return o; + } + + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE); + cfs_hash_bd_unlock(hs, &bd, 1); + lu_object_free(env, o); return shadow; } -EXPORT_SYMBOL(lu_object_find); -/* +/** + * Much like lu_object_find(), but top level device of object is specifically + * \a dev rather than top level device of the site. This interface allows + * objects of different "stacking" to be created within the same site. + */ +struct lu_object *lu_object_find_at(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_site_bkt_data *bkt; + struct lu_object *obj; + wait_queue_t wait; + + while (1) { + obj = lu_object_find_try(env, dev, f, conf, &wait); + if (obj != ERR_PTR(-EAGAIN)) + return obj; + /* + * lu_object_find_try() already added waiter into the + * wait queue. + */ + waitq_wait(&wait, TASK_UNINTERRUPTIBLE); + bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f); + remove_wait_queue(&bkt->lsb_marche_funebre, &wait); + } +} +EXPORT_SYMBOL(lu_object_find_at); + +/** + * Try to find the object in cache without waiting for the dead object + * to be released nor allocating object if no cached one was found. + * + * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging. + */ +void lu_object_purge(const struct lu_env *env, struct lu_device *dev, + const struct lu_fid *f) +{ + struct lu_site *s = dev->ld_site; + cfs_hash_t *hs = s->ls_obj_hash; + cfs_hash_bd_t bd; + struct lu_object *o; + + cfs_hash_bd_get_and_lock(hs, f, &bd, 1); + o = htable_lookup_nowait(s, &bd, f); + cfs_hash_bd_unlock(hs, &bd, 1); + if (!IS_ERR(o)) { + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); + lu_object_put(env, o); + } +} +EXPORT_SYMBOL(lu_object_purge); + +/** + * Find object with given fid, and return its slice belonging to given device. + */ +struct lu_object *lu_object_find_slice(const struct lu_env *env, + struct lu_device *dev, + const struct lu_fid *f, + const struct lu_object_conf *conf) +{ + struct lu_object *top; + struct lu_object *obj; + + top = lu_object_find(env, dev, f, conf); + if (!IS_ERR(top)) { + obj = lu_object_locate(top->lo_header, dev->ld_type); + if (obj == NULL) + lu_object_put(env, top); + } else + obj = top; + return obj; +} +EXPORT_SYMBOL(lu_object_find_slice); + +/** + * Global list of all device types. + */ +static struct list_head lu_device_types; + +int lu_device_type_init(struct lu_device_type *ldt) +{ + int result = 0; + + atomic_set(&ldt->ldt_device_nr, 0); + INIT_LIST_HEAD(&ldt->ldt_linkage); + if (ldt->ldt_ops->ldto_init) + result = ldt->ldt_ops->ldto_init(ldt); + + if (result == 0) { + spin_lock(&obd_types_lock); + list_add(&ldt->ldt_linkage, &lu_device_types); + spin_unlock(&obd_types_lock); + } + + return result; +} +EXPORT_SYMBOL(lu_device_type_init); + +void lu_device_type_fini(struct lu_device_type *ldt) +{ + spin_lock(&obd_types_lock); + list_del_init(&ldt->ldt_linkage); + spin_unlock(&obd_types_lock); + if (ldt->ldt_ops->ldto_fini) + ldt->ldt_ops->ldto_fini(ldt); +} +EXPORT_SYMBOL(lu_device_type_fini); + +/** * Global list of all sites on this node */ -static CFS_LIST_HEAD(lu_sites); -static DECLARE_MUTEX(lu_sites_guard); +static struct list_head lu_sites; +static DEFINE_MUTEX(lu_sites_guard); -/* +/** * Global environment used by site shrinker. */ static struct lu_env lu_shrink_env; -/* - * Print all objects in @s. +struct lu_site_print_arg { + struct lu_env *lsp_env; + void *lsp_cookie; + lu_printer_t lsp_printer; +}; + +static int +lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd, + struct hlist_node *hnode, void *data) +{ + struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data; + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (!list_empty(&h->loh_layers)) { + const struct lu_object *o; + + o = lu_object_top(h); + lu_object_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, o); + } else { + lu_object_header_print(arg->lsp_env, arg->lsp_cookie, + arg->lsp_printer, h); + } + return 0; +} + +/** + * Print all objects in \a s. */ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie, lu_printer_t printer) { - int i; + struct lu_site_print_arg arg = { + .lsp_env = (struct lu_env *)env, + .lsp_cookie = cookie, + .lsp_printer = printer, + }; - for (i = 0; i < s->ls_hash_size; ++i) { - struct lu_object_header *h; - struct hlist_node *scan; - - read_lock(&s->ls_guard); - hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) { - - if (!list_empty(&h->loh_layers)) { - const struct lu_object *obj; - - obj = lu_object_top(h); - lu_object_print(env, cookie, printer, obj); - } else - lu_object_header_print(env, cookie, printer, h); - } - read_unlock(&s->ls_guard); - } + cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg); } EXPORT_SYMBOL(lu_site_print); -enum { - LU_CACHE_PERCENT = 20, -}; - -/* +/** * Return desired hash table order. */ -static int lu_htable_order(void) +static int lu_htable_order(struct lu_device *top) { unsigned long cache_size; int bits; + /* + * For ZFS based OSDs the cache should be disabled by default. This + * allows the ZFS ARC maximum flexibility in determining what buffers + * to cache. If Lustre has objects or buffer which it wants to ensure + * always stay cached it must maintain a hold on them. + */ + if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) { + lu_cache_percent = 1; + lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT; + return LU_SITE_BITS_MIN; + } + /* * Calculate hash table size, assuming that we want reasonable * performance when 20% of total memory is occupied by cache of @@ -567,16 +974,25 @@ static int lu_htable_order(void) * * Size of lu_object is (arbitrary) taken as 1K (together with inode). */ - cache_size = num_physpages; + cache_size = totalram_pages; #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - CFS_PAGE_SHIFT)) - cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) + cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; #endif - cache_size = cache_size / 100 * LU_CACHE_PERCENT * - (CFS_PAGE_SIZE / 1024); + /* clear off unreasonable cache setting. */ + if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) { + CWARN("obdclass: invalid lu_cache_percent: %u, it must be in" + " the range of (0, %u]. Will use default value: %u.\n", + lu_cache_percent, LU_CACHE_PERCENT_MAX, + LU_CACHE_PERCENT_DEFAULT); + + lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; + } + cache_size = cache_size / 100 * lu_cache_percent * + (PAGE_CACHE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -584,232 +1000,379 @@ static int lu_htable_order(void) return bits; } -/* - * Initialize site @s, with @d as the top level device. - */ +static unsigned lu_obj_hop_hash(cfs_hash_t *hs, + const void *key, unsigned mask) +{ + struct lu_fid *fid = (struct lu_fid *)key; + __u32 hash; + + hash = fid_flatten32(fid); + hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */ + hash = hash_long(hash, hs->hs_bkt_bits); + + /* give me another random factor */ + hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3); + + hash <<= hs->hs_cur_bits - hs->hs_bkt_bits; + hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1); + + return hash & mask; +} + +static void *lu_obj_hop_object(struct hlist_node *hnode) +{ + return hlist_entry(hnode, struct lu_object_header, loh_hash); +} + +static void *lu_obj_hop_key(struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return &h->loh_fid; +} + +static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key); +} + +static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode) +{ + struct lu_object_header *h; + + h = hlist_entry(hnode, struct lu_object_header, loh_hash); + if (atomic_add_return(1, &h->loh_ref) == 1) { + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + + cfs_hash_bd_get(hs, &h->loh_fid, &bd); + bkt = cfs_hash_bd_extra_get(hs, &bd); + bkt->lsb_busy++; + } +} + +static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode) +{ + LBUG(); /* we should never called it */ +} + +cfs_hash_ops_t lu_site_hash_ops = { + .hs_hash = lu_obj_hop_hash, + .hs_key = lu_obj_hop_key, + .hs_keycmp = lu_obj_hop_keycmp, + .hs_object = lu_obj_hop_object, + .hs_get = lu_obj_hop_get, + .hs_put_locked = lu_obj_hop_put_locked, +}; + +void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) +{ + spin_lock(&s->ls_ld_lock); + if (list_empty(&d->ld_linkage)) + list_add(&d->ld_linkage, &s->ls_ld_linkage); + spin_unlock(&s->ls_ld_lock); +} +EXPORT_SYMBOL(lu_dev_add_linkage); + +void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) +{ + spin_lock(&s->ls_ld_lock); + list_del_init(&d->ld_linkage); + spin_unlock(&s->ls_ld_lock); +} +EXPORT_SYMBOL(lu_dev_del_linkage); + +/** + * Initialize site \a s, with \a d as the top level device. + */ int lu_site_init(struct lu_site *s, struct lu_device *top) { - int bits; - int size; - int i; - ENTRY; + struct lu_site_bkt_data *bkt; + cfs_hash_bd_t bd; + char name[16]; + int bits; + int i; + ENTRY; + + INIT_LIST_HEAD(&lu_sites); + + memset(s, 0, sizeof *s); + mutex_init(&s->ls_purge_mutex); + bits = lu_htable_order(top); + snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name); + for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX); + bits >= LU_SITE_BITS_MIN; bits--) { + s->ls_obj_hash = cfs_hash_create(name, bits, bits, + bits - LU_SITE_BKT_BITS, + sizeof(*bkt), 0, 0, + &lu_site_hash_ops, + CFS_HASH_SPIN_BKTLOCK | + CFS_HASH_NO_ITEMREF | + CFS_HASH_DEPTH | + CFS_HASH_ASSERT_EMPTY | + CFS_HASH_COUNTER); + if (s->ls_obj_hash != NULL) + break; + } + + if (s->ls_obj_hash == NULL) { + CERROR("failed to create lu_site hash with bits: %d\n", bits); + return -ENOMEM; + } + + cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) { + bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd); + INIT_LIST_HEAD(&bkt->lsb_lru); + init_waitqueue_head(&bkt->lsb_marche_funebre); + } + + s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); + if (s->ls_stats == NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; + return -ENOMEM; + } - memset(s, 0, sizeof *s); - rwlock_init(&s->ls_guard); - CFS_INIT_LIST_HEAD(&s->ls_lru); - CFS_INIT_LIST_HEAD(&s->ls_linkage); + lprocfs_counter_init(s->ls_stats, LU_SS_CREATED, + 0, "created", "created"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT, + 0, "cache_hit", "cache_hit"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS, + 0, "cache_miss", "cache_miss"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE, + 0, "cache_race", "cache_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE, + 0, "cache_death_race", "cache_death_race"); + lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED, + 0, "lru_purged", "lru_purged"); + + INIT_LIST_HEAD(&s->ls_linkage); s->ls_top_dev = top; top->ld_site = s; lu_device_get(top); + lu_ref_add(&top->ld_reference, "site-top", s); - for (bits = lu_htable_order(), size = 1 << bits; - (s->ls_hash = - cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL; - --bits, size >>= 1) { - /* - * Scale hash table down, until allocation succeeds. - */ - ; - } + INIT_LIST_HEAD(&s->ls_ld_linkage); + spin_lock_init(&s->ls_ld_lock); - s->ls_hash_size = size; - s->ls_hash_bits = bits; - s->ls_hash_mask = size - 1; + lu_dev_add_linkage(s, top); - for (i = 0; i < size; i++) - INIT_HLIST_HEAD(&s->ls_hash[i]); - - RETURN(0); + RETURN(0); } EXPORT_SYMBOL(lu_site_init); -/* - * Finalize @s and release its resources. +/** + * Finalize \a s and release its resources. */ void lu_site_fini(struct lu_site *s) { - LASSERT(list_empty(&s->ls_lru)); - LASSERT(s->ls_total == 0); + mutex_lock(&lu_sites_guard); + list_del_init(&s->ls_linkage); + mutex_unlock(&lu_sites_guard); - down(&lu_sites_guard); - list_del_init(&s->ls_linkage); - up(&lu_sites_guard); - - if (s->ls_hash != NULL) { - int i; - for (i = 0; i < s->ls_hash_size; i++) - LASSERT(hlist_empty(&s->ls_hash[i])); - cfs_free_large(s->ls_hash); - s->ls_hash = NULL; + if (s->ls_obj_hash != NULL) { + cfs_hash_putref(s->ls_obj_hash); + s->ls_obj_hash = NULL; } + if (s->ls_top_dev != NULL) { s->ls_top_dev->ld_site = NULL; + lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } + + if (s->ls_stats != NULL) + lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); -/* +/** * Called when initialization of stack for this site is completed. */ int lu_site_init_finish(struct lu_site *s) { int result; - down(&lu_sites_guard); + mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) - list_add(&s->ls_linkage, &lu_sites); - up(&lu_sites_guard); + list_add(&s->ls_linkage, &lu_sites); + mutex_unlock(&lu_sites_guard); return result; } EXPORT_SYMBOL(lu_site_init_finish); -/* - * Acquire additional reference on device @d +/** + * Acquire additional reference on device \a d */ void lu_device_get(struct lu_device *d) { - atomic_inc(&d->ld_ref); + atomic_inc(&d->ld_ref); } EXPORT_SYMBOL(lu_device_get); -/* - * Release reference on device @d. +/** + * Release reference on device \a d. */ void lu_device_put(struct lu_device *d) { - atomic_dec(&d->ld_ref); + LASSERT(atomic_read(&d->ld_ref) > 0); + atomic_dec(&d->ld_ref); } EXPORT_SYMBOL(lu_device_put); -/* - * Initialize device @d of type @t. +/** + * Initialize device \a d of type \a t. */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - memset(d, 0, sizeof *d); - atomic_set(&d->ld_ref, 0); - d->ld_type = t; - return 0; + if (atomic_inc_return(&t->ldt_device_nr) == 1 && + t->ldt_ops->ldto_start != NULL) + t->ldt_ops->ldto_start(t); + + memset(d, 0, sizeof *d); + d->ld_type = t; + lu_ref_init(&d->ld_reference); + INIT_LIST_HEAD(&d->ld_linkage); + + return 0; } EXPORT_SYMBOL(lu_device_init); -/* - * Finalize device @d. +/** + * Finalize device \a d. */ void lu_device_fini(struct lu_device *d) { - if (d->ld_obd != NULL) - /* finish lprocfs */ - lprocfs_obd_cleanup(d->ld_obd); + struct lu_device_type *t = d->ld_type; + + if (d->ld_obd != NULL) { + d->ld_obd->obd_lu_dev = NULL; + d->ld_obd = NULL; + } - LASSERTF(atomic_read(&d->ld_ref) == 0, - "Refcount is %u\n", atomic_read(&d->ld_ref)); + lu_ref_fini(&d->ld_reference); + LASSERTF(atomic_read(&d->ld_ref) == 0, + "Refcount is %u\n", atomic_read(&d->ld_ref)); + LASSERT(atomic_read(&t->ldt_device_nr) > 0); + + if (atomic_dec_and_test(&t->ldt_device_nr) && + t->ldt_ops->ldto_stop != NULL) + t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); -/* - * Initialize object @o that is part of compound object @h and was created by - * device @d. +/** + * Initialize object \a o that is part of compound object \a h and was created + * by device \a d. */ -int lu_object_init(struct lu_object *o, - struct lu_object_header *h, struct lu_device *d) +int lu_object_init(struct lu_object *o, struct lu_object_header *h, + struct lu_device *d) { - memset(o, 0, sizeof *o); - o->lo_header = h; - o->lo_dev = d; - lu_device_get(d); - CFS_INIT_LIST_HEAD(&o->lo_linkage); - return 0; + memset(o, 0, sizeof(*o)); + o->lo_header = h; + o->lo_dev = d; + lu_device_get(d); + lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); + INIT_LIST_HEAD(&o->lo_linkage); + + return 0; } EXPORT_SYMBOL(lu_object_init); -/* +/** * Finalize object and release its resources. */ void lu_object_fini(struct lu_object *o) { - LASSERT(list_empty(&o->lo_linkage)); + struct lu_device *dev = o->lo_dev; - if (o->lo_dev != NULL) { - lu_device_put(o->lo_dev); - o->lo_dev = NULL; - } + LASSERT(list_empty(&o->lo_linkage)); + + if (dev != NULL) { + lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, + "lu_object", o); + lu_device_put(dev); + o->lo_dev = NULL; + } } EXPORT_SYMBOL(lu_object_fini); -/* - * Add object @o as first layer of compound object @h +/** + * Add object \a o as first layer of compound object \a h * * This is typically called by the ->ldo_object_alloc() method of top-level * device. */ void lu_object_add_top(struct lu_object_header *h, struct lu_object *o) { - list_move(&o->lo_linkage, &h->loh_layers); + list_move(&o->lo_linkage, &h->loh_layers); } EXPORT_SYMBOL(lu_object_add_top); -/* - * Add object @o as a layer of compound object, going after @before.1 +/** + * Add object \a o as a layer of compound object, going after \a before. * - * This is typically called by the ->ldo_object_alloc() method of - * @before->lo_dev. + * This is typically called by the ->ldo_object_alloc() method of \a + * before->lo_dev. */ void lu_object_add(struct lu_object *before, struct lu_object *o) { - list_move(&o->lo_linkage, &before->lo_linkage); + list_move(&o->lo_linkage, &before->lo_linkage); } EXPORT_SYMBOL(lu_object_add); -/* +/** * Initialize compound object. */ int lu_object_header_init(struct lu_object_header *h) { memset(h, 0, sizeof *h); - atomic_set(&h->loh_ref, 1); - INIT_HLIST_NODE(&h->loh_hash); - CFS_INIT_LIST_HEAD(&h->loh_lru); - CFS_INIT_LIST_HEAD(&h->loh_layers); + atomic_set(&h->loh_ref, 1); + INIT_HLIST_NODE(&h->loh_hash); + INIT_LIST_HEAD(&h->loh_lru); + INIT_LIST_HEAD(&h->loh_layers); + lu_ref_init(&h->loh_reference); return 0; } EXPORT_SYMBOL(lu_object_header_init); -/* +/** * Finalize compound object. */ void lu_object_header_fini(struct lu_object_header *h) { - LASSERT(list_empty(&h->loh_layers)); - LASSERT(list_empty(&h->loh_lru)); - LASSERT(hlist_unhashed(&h->loh_hash)); + LASSERT(list_empty(&h->loh_layers)); + LASSERT(list_empty(&h->loh_lru)); + LASSERT(hlist_unhashed(&h->loh_hash)); + lu_ref_fini(&h->loh_reference); } EXPORT_SYMBOL(lu_object_header_fini); -/* +/** * Given a compound object, find its slice, corresponding to the device type - * @dtype. + * \a dtype. */ struct lu_object *lu_object_locate(struct lu_object_header *h, - struct lu_device_type *dtype) + const struct lu_device_type *dtype) { - struct lu_object *o; + struct lu_object *o; - list_for_each_entry(o, &h->loh_layers, lo_linkage) { - if (o->lo_dev->ld_type == dtype) - return o; - } - return NULL; + list_for_each_entry(o, &h->loh_layers, lo_linkage) { + if (o->lo_dev->ld_type == dtype) + return o; + } + return NULL; } EXPORT_SYMBOL(lu_object_locate); - - -/* +/** * Finalize and free devices in the device stack. - * + * * Finalize device stack by purging object cache, and calling * lu_device_type_operations::ldto_device_fini() and * lu_device_type_operations::ldto_device_free() on all devices in the stack. @@ -823,45 +1386,47 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) lu_site_purge(env, site, ~0); for (scan = top; scan != NULL; scan = next) { next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); + lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); lu_device_put(scan); } /* purge again. */ lu_site_purge(env, site, ~0); - if (!list_empty(&site->ls_lru) || site->ls_total != 0) { - /* - * Uh-oh, objects still exist. - */ - static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR); - - lu_site_print(env, site, &cookie, lu_cdebug_printer); - } - for (scan = top; scan != NULL; scan = next) { const struct lu_device_type *ldt = scan->ld_type; struct obd_type *type; next = ldt->ldt_ops->ldto_device_free(env, scan); type = ldt->ldt_obd_type; - type->typ_refcnt--; - class_put_type(type); + if (type != NULL) { + type->typ_refcnt--; + class_put_type(type); + } } } EXPORT_SYMBOL(lu_stack_fini); enum { - /* + /** * Maximal number of tld slots. */ - LU_CONTEXT_KEY_NR = 16 + LU_CONTEXT_KEY_NR = 40 }; static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; -static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(lu_keys_guard); -/* +/** + * Global counter incremented whenever key is registered, unregistered, + * revived or quiesced. This is used to void unnecessary calls to + * lu_context_refill(). No locking is provided, as initialization and shutdown + * are supposed to be externally serialized. + */ +static unsigned key_set_version = 0; + +/** * Register new key. */ int lu_context_key_register(struct lu_context_key *key) @@ -875,171 +1440,340 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - spin_lock(&lu_keys_guard); + spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; - atomic_set(&key->lct_used, 1); + atomic_set(&key->lct_used, 1); lu_keys[i] = key; + lu_ref_init(&key->lct_reference); result = 0; + ++key_set_version; break; } } - spin_unlock(&lu_keys_guard); - return result; + spin_unlock(&lu_keys_guard); + return result; } EXPORT_SYMBOL(lu_context_key_register); static void key_fini(struct lu_context *ctx, int index) { - if (ctx->lc_value[index] != NULL) { + if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) { struct lu_context_key *key; key = lu_keys[index]; LASSERT(key != NULL); LASSERT(key->lct_fini != NULL); - LASSERT(atomic_read(&key->lct_used) > 1); + LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); - atomic_dec(&key->lct_used); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) { - LASSERT(module_refcount(key->lct_owner) > 0); - module_put(key->lct_owner); - } - ctx->lc_value[index] = NULL; - } + lu_ref_del(&key->lct_reference, "ctx", ctx); + atomic_dec(&key->lct_used); + + LASSERT(key->lct_owner != NULL); + if ((ctx->lc_tags & LCT_NOREF) == 0) { + LINVRNT(module_refcount(key->lct_owner) > 0); + module_put(key->lct_owner); + } + ctx->lc_value[index] = NULL; + } } -/* +/** * Deregister key. */ void lu_context_key_degister(struct lu_context_key *key) { - LASSERT(atomic_read(&key->lct_used) >= 1); - LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + LASSERT(atomic_read(&key->lct_used) >= 1); + LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + + lu_context_key_quiesce(key); + + ++key_set_version; + spin_lock(&lu_keys_guard); + key_fini(&lu_shrink_env.le_ctx, key->lct_index); + if (lu_keys[key->lct_index]) { + lu_keys[key->lct_index] = NULL; + lu_ref_fini(&key->lct_reference); + } + spin_unlock(&lu_keys_guard); + + LASSERTF(atomic_read(&key->lct_used) == 1, + "key has instances: %d\n", + atomic_read(&key->lct_used)); +} +EXPORT_SYMBOL(lu_context_key_degister); + +/** + * Register a number of keys. This has to be called after all keys have been + * initialized by a call to LU_CONTEXT_KEY_INIT(). + */ +int lu_context_key_register_many(struct lu_context_key *k, ...) +{ + struct lu_context_key *key = k; + va_list args; + int result; + + va_start(args, k); + do { + result = lu_context_key_register(key); + if (result) + break; + key = va_arg(args, struct lu_context_key *); + } while (key != NULL); + va_end(args); - key_fini(&lu_shrink_env.le_ctx, key->lct_index); + if (result != 0) { + va_start(args, k); + while (k != key) { + lu_context_key_degister(k); + k = va_arg(args, struct lu_context_key *); + } + va_end(args); + } - if (atomic_read(&key->lct_used) > 1) - CERROR("key has instances.\n"); - spin_lock(&lu_keys_guard); - lu_keys[key->lct_index] = NULL; - spin_unlock(&lu_keys_guard); + return result; } -EXPORT_SYMBOL(lu_context_key_degister); +EXPORT_SYMBOL(lu_context_key_register_many); -/* - * Return value associated with key @key in context @ctx. +/** + * De-register a number of keys. This is a dual to + * lu_context_key_register_many(). + */ +void lu_context_key_degister_many(struct lu_context_key *k, ...) +{ + va_list args; + + va_start(args, k); + do { + lu_context_key_degister(k); + k = va_arg(args, struct lu_context_key*); + } while (k != NULL); + va_end(args); +} +EXPORT_SYMBOL(lu_context_key_degister_many); + +/** + * Revive a number of keys. + */ +void lu_context_key_revive_many(struct lu_context_key *k, ...) +{ + va_list args; + + va_start(args, k); + do { + lu_context_key_revive(k); + k = va_arg(args, struct lu_context_key*); + } while (k != NULL); + va_end(args); +} +EXPORT_SYMBOL(lu_context_key_revive_many); + +/** + * Quiescent a number of keys. + */ +void lu_context_key_quiesce_many(struct lu_context_key *k, ...) +{ + va_list args; + + va_start(args, k); + do { + lu_context_key_quiesce(k); + k = va_arg(args, struct lu_context_key*); + } while (k != NULL); + va_end(args); +} +EXPORT_SYMBOL(lu_context_key_quiesce_many); + +/** + * Return value associated with key \a key in context \a ctx. */ void *lu_context_key_get(const struct lu_context *ctx, - struct lu_context_key *key) + const struct lu_context_key *key) { - LASSERT(ctx->lc_state == LCS_ENTERED); - LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + LINVRNT(ctx->lc_state == LCS_ENTERED); + LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + LASSERT(lu_keys[key->lct_index] == key); return ctx->lc_value[key->lct_index]; } EXPORT_SYMBOL(lu_context_key_get); +/** + * List of remembered contexts. XXX document me. + */ +static struct list_head lu_context_remembered; + +/** + * Destroy \a key in all remembered contexts. This is used to destroy key + * values in "shared" contexts (like service threads), when a module owning + * the key is about to be unloaded. + */ +void lu_context_key_quiesce(struct lu_context_key *key) +{ + struct lu_context *ctx; + extern unsigned cl_env_cache_purge(unsigned nr); + + if (!(key->lct_tags & LCT_QUIESCENT)) { + /* + * XXX layering violation. + */ + cl_env_cache_purge(~0); + key->lct_tags |= LCT_QUIESCENT; + /* + * XXX memory barrier has to go here. + */ + spin_lock(&lu_keys_guard); + list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) + key_fini(ctx, key->lct_index); + spin_unlock(&lu_keys_guard); + ++key_set_version; + } +} +EXPORT_SYMBOL(lu_context_key_quiesce); + +void lu_context_key_revive(struct lu_context_key *key) +{ + key->lct_tags &= ~LCT_QUIESCENT; + ++key_set_version; +} +EXPORT_SYMBOL(lu_context_key_revive); + static void keys_fini(struct lu_context *ctx) { - int i; + int i; - if (ctx->lc_value != NULL) { - for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) - key_fini(ctx, i); - OBD_FREE(ctx->lc_value, - ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); - ctx->lc_value = NULL; - } + if (ctx->lc_value == NULL) + return; + + for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) + key_fini(ctx, i); + + OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); + ctx->lc_value = NULL; } -static int keys_fill(const struct lu_context *ctx) +static int keys_fill(struct lu_context *ctx) { int i; + LINVRNT(ctx->lc_value != NULL); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; key = lu_keys[i]; - if (ctx->lc_value[i] == NULL && - key != NULL && key->lct_tags & ctx->lc_tags) { + if (ctx->lc_value[i] == NULL && key != NULL && + (key->lct_tags & ctx->lc_tags) && + /* + * Don't create values for a LCT_QUIESCENT key, as this + * will pin module owning a key. + */ + !(key->lct_tags & LCT_QUIESCENT)) { void *value; - LASSERT(key->lct_init != NULL); - LASSERT(key->lct_index == i); + LINVRNT(key->lct_init != NULL); + LINVRNT(key->lct_index == i); value = key->lct_init(ctx, key); if (unlikely(IS_ERR(value))) return PTR_ERR(value); - LASSERT(key->lct_owner != NULL); - if (!(ctx->lc_tags & LCT_NOREF)) - try_module_get(key->lct_owner); - atomic_inc(&key->lct_used); + + LASSERT(key->lct_owner != NULL); + if (!(ctx->lc_tags & LCT_NOREF)) + try_module_get(key->lct_owner); + lu_ref_add_atomic(&key->lct_reference, "ctx", ctx); + atomic_inc(&key->lct_used); + /* + * This is the only place in the code, where an + * element of ctx->lc_value[] array is set to non-NULL + * value. + */ ctx->lc_value[i] = value; + if (key->lct_exit != NULL) + ctx->lc_tags |= LCT_HAS_EXIT; } + ctx->lc_version = key_set_version; } return 0; } static int keys_init(struct lu_context *ctx) { - int result; + OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); + if (likely(ctx->lc_value != NULL)) + return keys_fill(ctx); - OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]); - if (likely(ctx->lc_value != NULL)) - result = keys_fill(ctx); - else - result = -ENOMEM; - - if (result != 0) - keys_fini(ctx); - return result; + return -ENOMEM; } -/* +/** * Initialize context data-structure. Create values for all keys. */ int lu_context_init(struct lu_context *ctx, __u32 tags) { - memset(ctx, 0, sizeof *ctx); - ctx->lc_state = LCS_INITIALIZED; - ctx->lc_tags = tags; - return keys_init(ctx); + int rc; + + memset(ctx, 0, sizeof *ctx); + ctx->lc_state = LCS_INITIALIZED; + ctx->lc_tags = tags; + if (tags & LCT_REMEMBER) { + spin_lock(&lu_keys_guard); + list_add(&ctx->lc_remember, &lu_context_remembered); + spin_unlock(&lu_keys_guard); + } else { + INIT_LIST_HEAD(&ctx->lc_remember); + } + + rc = keys_init(ctx); + if (rc != 0) + lu_context_fini(ctx); + + return rc; } EXPORT_SYMBOL(lu_context_init); -/* +/** * Finalize context data-structure. Destroy key values. */ void lu_context_fini(struct lu_context *ctx) { - LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); - ctx->lc_state = LCS_FINALIZED; - keys_fini(ctx); + LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); + ctx->lc_state = LCS_FINALIZED; + + if ((ctx->lc_tags & LCT_REMEMBER) == 0) { + LASSERT(list_empty(&ctx->lc_remember)); + keys_fini(ctx); + + } else { /* could race with key degister */ + spin_lock(&lu_keys_guard); + keys_fini(ctx); + list_del_init(&ctx->lc_remember); + spin_unlock(&lu_keys_guard); + } } EXPORT_SYMBOL(lu_context_fini); -/* +/** * Called before entering context. */ void lu_context_enter(struct lu_context *ctx) { - LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); + LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT); ctx->lc_state = LCS_ENTERED; } EXPORT_SYMBOL(lu_context_enter); -/* - * Called after exiting from @ctx +/** + * Called after exiting from \a ctx */ void lu_context_exit(struct lu_context *ctx) { int i; - LASSERT(ctx->lc_state == LCS_ENTERED); + LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; - if (ctx->lc_value != NULL) { + if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (ctx->lc_value[i] != NULL) { struct lu_context_key *key; @@ -1055,40 +1789,72 @@ void lu_context_exit(struct lu_context *ctx) } EXPORT_SYMBOL(lu_context_exit); -/* +/** * Allocate for context all missing keys that were registered after context - * creation. + * creation. key_set_version is only changed in rare cases when modules + * are loaded and removed. */ -int lu_context_refill(const struct lu_context *ctx) +int lu_context_refill(struct lu_context *ctx) { - LASSERT(ctx->lc_value != NULL); - return keys_fill(ctx); + return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx); } EXPORT_SYMBOL(lu_context_refill); -static int lu_env_setup(struct lu_env *env, struct lu_context *ses, - __u32 tags, int noref) +/** + * lu_ctx_tags/lu_ses_tags will be updated if there are new types of + * obd being added. Currently, this is only used on client side, specifically + * for echo device client, for other stack (like ptlrpc threads), context are + * predefined when the lu_device type are registered, during the module probe + * phase. + */ +__u32 lu_context_tags_default = 0; +__u32 lu_session_tags_default = 0; + +void lu_context_tags_update(__u32 tags) { - int result; + spin_lock(&lu_keys_guard); + lu_context_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_update); - LASSERT(ergo(!noref, !(tags & LCT_NOREF))); +void lu_context_tags_clear(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_context_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); +} +EXPORT_SYMBOL(lu_context_tags_clear); - env->le_ses = ses; - result = lu_context_init(&env->le_ctx, tags); - if (likely(result == 0)) - lu_context_enter(&env->le_ctx); - return result; +void lu_session_tags_update(__u32 tags) +{ + spin_lock(&lu_keys_guard); + lu_session_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } +EXPORT_SYMBOL(lu_session_tags_update); -static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses, - __u32 tags) +void lu_session_tags_clear(__u32 tags) { - return lu_env_setup(env, ses, tags, 1); + spin_lock(&lu_keys_guard); + lu_session_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } +EXPORT_SYMBOL(lu_session_tags_clear); -int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags) +int lu_env_init(struct lu_env *env, __u32 tags) { - return lu_env_setup(env, ses, tags, 0); + int result; + + env->le_ses = NULL; + result = lu_context_init(&env->le_ctx, tags); + if (likely(result == 0)) + lu_context_enter(&env->le_ctx); + return result; } EXPORT_SYMBOL(lu_env_init); @@ -1100,180 +1866,530 @@ void lu_env_fini(struct lu_env *env) } EXPORT_SYMBOL(lu_env_fini); -static int lu_cache_shrink(int nr, unsigned int gfp_mask) +int lu_env_refill(struct lu_env *env) { - struct lu_site *s; - struct lu_site *tmp; - int cached = 0; - int remain = nr; - CFS_LIST_HEAD(splice); + int result; - if (nr != 0 && !(gfp_mask & __GFP_FS)) - return -1; + result = lu_context_refill(&env->le_ctx); + if (result == 0 && env->le_ses != NULL) + result = lu_context_refill(env->le_ses); + return result; +} +EXPORT_SYMBOL(lu_env_refill); + +/** + * Currently, this API will only be used by echo client. + * Because echo client and normal lustre client will share + * same cl_env cache. So echo client needs to refresh + * the env context after it get one from the cache, especially + * when normal client and echo client co-exist in the same client. + */ +int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, + __u32 stags) +{ + int result; - down(&lu_sites_guard); - list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { - if (nr != 0) { - remain = lu_site_purge(&lu_shrink_env, s, remain); - /* - * Move just shrunk site to the tail of site list to - * assure shrinking fairness. - */ - list_move_tail(&s->ls_linkage, &splice); + if ((env->le_ctx.lc_tags & ctags) != ctags) { + env->le_ctx.lc_version = 0; + env->le_ctx.lc_tags |= ctags; + } + + if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) { + env->le_ses->lc_version = 0; + env->le_ses->lc_tags |= stags; + } + + result = lu_env_refill(env); + + return result; +} +EXPORT_SYMBOL(lu_env_refill_by_tags); + +static struct shrinker *lu_site_shrinker; + +typedef struct lu_site_stats{ + unsigned lss_populated; + unsigned lss_max_search; + unsigned lss_total; + unsigned lss_busy; +} lu_site_stats_t; + +static void lu_site_stats_get(cfs_hash_t *hs, + lu_site_stats_t *stats, int populated) +{ + cfs_hash_bd_t bd; + int i; + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd); + struct hlist_head *hhead; + + cfs_hash_bd_lock(hs, &bd, 1); + stats->lss_busy += bkt->lsb_busy; + stats->lss_total += cfs_hash_bd_count_get(&bd); + stats->lss_max_search = max((int)stats->lss_max_search, + cfs_hash_bd_depmax_get(&bd)); + if (!populated) { + cfs_hash_bd_unlock(hs, &bd, 1); + continue; } - read_lock(&s->ls_guard); - cached += s->ls_total - s->ls_busy; - read_unlock(&s->ls_guard); - if (remain <= 0) - break; + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + if (!hlist_empty(hhead)) + stats->lss_populated++; + } + cfs_hash_bd_unlock(hs, &bd, 1); } - list_splice(&splice, lu_sites.prev); - up(&lu_sites_guard); - return cached; } -static struct shrinker *lu_site_shrinker = NULL; +#ifdef __KERNEL__ + +static unsigned long lu_cache_shrink_count(struct shrinker *sk, + struct shrink_control *sc) +{ + lu_site_stats_t stats; + struct lu_site *s; + struct lu_site *tmp; + unsigned long cached = 0; + + if (!(sc->gfp_mask & __GFP_FS)) + return 0; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 0); + cached += stats.lss_total - stats.lss_busy; + } + mutex_unlock(&lu_sites_guard); + + cached = (cached / 100) * sysctl_vfs_cache_pressure; + CDEBUG(D_INODE, "%ld objects cached\n", cached); + return cached; +} + +static unsigned long lu_cache_shrink_scan(struct shrinker *sk, + struct shrink_control *sc) +{ + struct lu_site *s; + struct lu_site *tmp; + unsigned long remain = sc->nr_to_scan; + LIST_HEAD(splice); + + if (!(sc->gfp_mask & __GFP_FS)) + /* We must not take the lu_sites_guard lock when + * __GFP_FS is *not* set because of the deadlock + * possibility detailed above. Additionally, + * since we cannot determine the number of + * objects in the cache without taking this + * lock, we're in a particularly tough spot. As + * a result, we'll just lie and say our cache is + * empty. This _should_ be ok, as we can't + * reclaim objects when __GFP_FS is *not* set + * anyways. + */ + return SHRINK_STOP; + + mutex_lock(&lu_sites_guard); + list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { + remain = lu_site_purge(&lu_shrink_env, s, remain); + /* + * Move just shrunk site to the tail of site list to + * assure shrinking fairness. + */ + list_move_tail(&s->ls_linkage, &splice); + } + list_splice(&splice, lu_sites.prev); + mutex_unlock(&lu_sites_guard); + + return sc->nr_to_scan - remain; +} + +#ifndef HAVE_SHRINKER_COUNT +/* + * There exists a potential lock inversion deadlock scenario when using + * Lustre on top of ZFS. This occurs between one of ZFS's + * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially, + * thread A will take the lu_sites_guard lock and sleep on the ht_lock, + * while thread B will take the ht_lock and sleep on the lu_sites_guard + * lock. Obviously neither thread will wake and drop their respective hold + * on their lock. + * + * To prevent this from happening we must ensure the lu_sites_guard lock is + * not taken while down this code path. ZFS reliably does not set the + * __GFP_FS bit in its code paths, so this can be used to determine if it + * is safe to take the lu_sites_guard lock. + * + * Ideally we should accurately return the remaining number of cached + * objects without taking the lu_sites_guard lock, but this is not + * possible in the current implementation. + */ +static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) +{ + int cached = 0; + struct shrink_control scv = { + .nr_to_scan = shrink_param(sc, nr_to_scan), + .gfp_mask = shrink_param(sc, gfp_mask) + }; +#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL) + struct shrinker* shrinker = NULL; +#endif + + + CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan); + + lu_cache_shrink_scan(shrinker, &scv); + + cached = lu_cache_shrink_count(shrinker, &scv); + if (scv.nr_to_scan == 0) + CDEBUG(D_INODE, "%d objects cached\n", cached); + return cached; +} + +#endif /* HAVE_SHRINKER_COUNT */ + /* + * Debugging stuff. + */ + +/** + * Environment to be used in debugger, contains all tags. + */ +struct lu_env lu_debugging_env; + +/** + * Debugging printer function using printk(). + */ +int lu_printk_printer(const struct lu_env *env, + void *unused, const char *format, ...) +{ + va_list args; + + va_start(args, format); + vprintk(format, args); + va_end(args); + return 0; +} + +int lu_debugging_setup(void) +{ + return lu_env_init(&lu_debugging_env, ~0); +} + +void lu_context_keys_dump(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { + struct lu_context_key *key; + + key = lu_keys[i]; + if (key != NULL) { + CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n", + i, key, key->lct_tags, + key->lct_init, key->lct_fini, key->lct_exit, + key->lct_index, atomic_read(&key->lct_used), + key->lct_owner ? key->lct_owner->name : "", + key->lct_owner); + lu_ref_print(&key->lct_reference); + } + } +} +EXPORT_SYMBOL(lu_context_keys_dump); +#endif /* __KERNEL__ */ + +/** * Initialization of global lu_* data. */ int lu_global_init(void) { int result; + DEF_SHRINKER_VAR(shvar, lu_cache_shrink, + lu_cache_shrink_count, lu_cache_shrink_scan); + + CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys); + + INIT_LIST_HEAD(&lu_device_types); + INIT_LIST_HEAD(&lu_context_remembered); + + result = lu_ref_global_init(); + if (result != 0) + return result; LU_CONTEXT_KEY_INIT(&lu_global_key); result = lu_context_key_register(&lu_global_key); - if (result == 0) { - /* - * At this level, we don't know what tags are needed, so - * allocate them conservatively. This should not be too bad, - * because this environment is global. - */ - down(&lu_sites_guard); - result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER); - up(&lu_sites_guard); - if (result == 0) { - /* - * seeks estimation: 3 seeks to read a record from oi, - * one to read inode, one for ea. Unfortunately - * setting this high value results in lu_object/inode - * cache consuming all the memory. - */ - lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, - lu_cache_shrink); - if (result == 0) - result = lu_time_global_init(); - } - } + if (result != 0) + return result; + + /* + * At this level, we don't know what tags are needed, so allocate them + * conservatively. This should not be too bad, because this + * environment is global. + */ + mutex_lock(&lu_sites_guard); + result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); + mutex_unlock(&lu_sites_guard); + if (result != 0) + return result; + + /* + * seeks estimation: 3 seeks to read a record from oi, one to read + * inode, one for ea. Unfortunately setting this high value results in + * lu_object/inode cache consuming all the memory. + */ + lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar); + if (lu_site_shrinker == NULL) + return -ENOMEM; + return result; } -/* +/** * Dual to lu_global_init(). */ void lu_global_fini(void) { - lu_time_global_fini(); if (lu_site_shrinker != NULL) { - remove_shrinker(lu_site_shrinker); + remove_shrinker(lu_site_shrinker); lu_site_shrinker = NULL; } - lu_context_key_degister(&lu_global_key); + lu_context_key_degister(&lu_global_key); /* * Tear shrinker environment down _after_ de-registering * lu_global_key, because the latter has a value in the former. */ - down(&lu_sites_guard); + mutex_lock(&lu_sites_guard); lu_env_fini(&lu_shrink_env); - up(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); + + lu_ref_global_fini(); } -struct lu_buf LU_BUF_NULL = { - .lb_buf = NULL, - .lb_len = 0 -}; -EXPORT_SYMBOL(LU_BUF_NULL); +static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) +{ +#ifdef LPROCFS + struct lprocfs_counter ret; -/* - * XXX: Functions below logically belong to fid module, but they are used by - * dt_store_open(). Put them here until better place is found. + lprocfs_stats_collect(stats, idx, &ret); + return (__u32)ret.lc_count; +#else + return 0; +#endif +} + +/** + * Output site statistical counters into a buffer. Suitable for + * lprocfs_rd_*()-style functions. */ +int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m) +{ + lu_site_stats_t stats; + + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); +} +EXPORT_SYMBOL(lu_site_stats_seq_print); -void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid, - struct lu_fid *befider) +int lu_site_stats_print(const struct lu_site *s, char *page, int count) { - int recsize; - __u64 seq; - __u32 oid; + lu_site_stats_t stats; + + memset(&stats, 0, sizeof(stats)); + lu_site_stats_get(s->ls_obj_hash, &stats, 1); + + return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n", + stats.lss_busy, + stats.lss_total, + stats.lss_populated, + CFS_HASH_NHLIST(s->ls_obj_hash), + stats.lss_max_search, + ls_stats_read(s->ls_stats, LU_SS_CREATED), + ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT), + ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS), + ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE), + ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE), + ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED)); +} +EXPORT_SYMBOL(lu_site_stats_print); - seq = fid_seq(fid); - oid = fid_oid(fid); +/** + * Helper function to initialize a number of kmem slab caches at once. + */ +int lu_kmem_init(struct lu_kmem_descr *caches) +{ + int result; + struct lu_kmem_descr *iter = caches; + + for (result = 0; iter->ckd_cache != NULL; ++iter) { + *iter->ckd_cache = kmem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0, NULL); + if (*iter->ckd_cache == NULL) { + result = -ENOMEM; + /* free all previously allocated caches */ + lu_kmem_fini(caches); + break; + } + } + return result; +} +EXPORT_SYMBOL(lu_kmem_init); - /* - * Two cases: compact 6 bytes representation for a common case, and - * full 17 byte representation for "unusual" fid. - */ +/** + * Helper function to finalize a number of kmem slab cached at once. Dual to + * lu_kmem_init(). + */ +void lu_kmem_fini(struct lu_kmem_descr *caches) +{ + for (; caches->ckd_cache != NULL; ++caches) { + if (*caches->ckd_cache != NULL) { + kmem_cache_destroy(*caches->ckd_cache); + *caches->ckd_cache = NULL; + } + } +} +EXPORT_SYMBOL(lu_kmem_fini); - /* - * Check that usual case is really usual. - */ - CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull); +/** + * Temporary solution to be able to assign fid in ->do_create() + * till we have fully-functional OST fids + */ +void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, + const struct lu_fid *fid) +{ + struct lu_site *s = o->lo_dev->ld_site; + struct lu_fid *old = &o->lo_header->loh_fid; + struct lu_site_bkt_data *bkt; + struct lu_object *shadow; + wait_queue_t waiter; + cfs_hash_t *hs; + cfs_hash_bd_t bd; + __u64 version = 0; + + LASSERT(fid_is_zero(old)); + + hs = s->ls_obj_hash; + cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); + shadow = htable_lookup(s, &bd, fid, &waiter, &version); + /* supposed to be unique */ + LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); + *old = *fid; + bkt = cfs_hash_bd_extra_get(hs, &bd); + cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); + bkt->lsb_busy++; + cfs_hash_bd_unlock(hs, &bd, 1); +} +EXPORT_SYMBOL(lu_object_assign_fid); - if (fid_is_igif(fid) || - seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) { - fid_cpu_to_be(befider, fid); - recsize = sizeof *befider; - } else { - unsigned char *small_befider; +/** + * allocates object with 0 (non-assiged) fid + * XXX: temporary solution to be able to assign fid in ->do_create() + * till we have fully-functional OST fids + */ +struct lu_object *lu_object_anon(const struct lu_env *env, + struct lu_device *dev, + const struct lu_object_conf *conf) +{ + struct lu_fid fid; + struct lu_object *o; - small_befider = (char *)befider; + fid_zero(&fid); + o = lu_object_alloc(env, dev, &fid, conf); - small_befider[0] = seq >> 16; - small_befider[1] = seq >> 8; - small_befider[2] = seq; + return o; +} +EXPORT_SYMBOL(lu_object_anon); - small_befider[3] = oid >> 8; - small_befider[4] = oid; +struct lu_buf LU_BUF_NULL = { + .lb_buf = NULL, + .lb_len = 0 +}; +EXPORT_SYMBOL(LU_BUF_NULL); - recsize = 5; - } - memcpy(pack->fp_area, befider, recsize); - pack->fp_len = recsize + 1; +void lu_buf_free(struct lu_buf *buf) +{ + LASSERT(buf); + if (buf->lb_buf) { + LASSERT(buf->lb_len > 0); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + buf->lb_buf = NULL; + buf->lb_len = 0; + } } -EXPORT_SYMBOL(fid_pack); +EXPORT_SYMBOL(lu_buf_free); -int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid) +void lu_buf_alloc(struct lu_buf *buf, int size) { - int result; + LASSERT(buf); + LASSERT(buf->lb_buf == NULL); + LASSERT(buf->lb_len == 0); + OBD_ALLOC_LARGE(buf->lb_buf, size); + if (likely(buf->lb_buf)) + buf->lb_len = size; +} +EXPORT_SYMBOL(lu_buf_alloc); - result = 0; - switch (pack->fp_len) { - case sizeof *fid + 1: - memcpy(fid, pack->fp_area, sizeof *fid); - fid_be_to_cpu(fid, fid); - break; - case 6: { - const unsigned char *area; - - area = pack->fp_area; - fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2]; - fid->f_oid = (area[3] << 8) | area[4]; - fid->f_ver = 0; - break; - } - default: - CERROR("Unexpected packed fid size: %d\n", pack->fp_len); - result = -EIO; - } - return result; +void lu_buf_realloc(struct lu_buf *buf, int size) +{ + lu_buf_free(buf); + lu_buf_alloc(buf, size); } -EXPORT_SYMBOL(fid_unpack); +EXPORT_SYMBOL(lu_buf_realloc); + +struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len) +{ + if (buf->lb_buf == NULL && buf->lb_len == 0) + lu_buf_alloc(buf, len); + + if ((len > buf->lb_len) && (buf->lb_buf != NULL)) + lu_buf_realloc(buf, len); + + return buf; +} +EXPORT_SYMBOL(lu_buf_check_and_alloc); + +/** + * Increase the size of the \a buf. + * preserves old data in buffer + * old buffer remains unchanged on error + * \retval 0 or -ENOMEM + */ +int lu_buf_check_and_grow(struct lu_buf *buf, int len) +{ + char *ptr; + + if (len <= buf->lb_len) + return 0; + + OBD_ALLOC_LARGE(ptr, len); + if (ptr == NULL) + return -ENOMEM; + + /* Free the old buf */ + if (buf->lb_buf != NULL) { + memcpy(ptr, buf->lb_buf, buf->lb_len); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + } + + buf->lb_buf = ptr; + buf->lb_len = len; + return 0; +} +EXPORT_SYMBOL(lu_buf_check_and_grow); -const char *lu_time_names[LU_TIME_NR] = { - [LU_TIME_FIND_LOOKUP] = "find_lookup", - [LU_TIME_FIND_ALLOC] = "find_alloc", - [LU_TIME_FIND_INSERT] = "find_insert" -}; -EXPORT_SYMBOL(lu_time_names);