X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_object.c;h=ba45a0df8857fe2def735d1c86a2c0e2df9ed74c;hb=3e38436dc09097429e1ca1fbfa3ef4981e124715;hp=9eef99e9858dd2f349f80a2a7a8394df53481323;hpb=018e6e44fd52e12c9d4ff78ca1a5345b12577fd2;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index 9eef99e..ba45a0d 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -57,9 +57,8 @@ #include #include #include +#include #include -/* lu_time_global_{init,fini}() */ -#include static void lu_object_free(const struct lu_env *env, struct lu_object *o); @@ -75,11 +74,32 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) struct lu_site *site; struct lu_object *orig; cfs_hash_bd_t bd; + const struct lu_fid *fid; top = o->lo_header; site = o->lo_dev->ld_site; orig = o; + /* + * till we have full fids-on-OST implemented anonymous objects + * are possible in OSP. such an object isn't listed in the site + * so we should not remove it from the site. + */ + fid = lu_object_fid(o); + if (fid_is_zero(fid)) { + LASSERT(top->loh_hash.next == NULL + && top->loh_hash.pprev == NULL); + LASSERT(cfs_list_empty(&top->loh_lru)); + if (!cfs_atomic_dec_and_test(&top->loh_ref)) + return; + cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { + if (o->lo_ops->loo_object_release != NULL) + o->lo_ops->loo_object_release(env, o); + } + lu_object_free(env, orig); + return; + } + cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd); bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd); @@ -124,7 +144,8 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * and LRU lock, no race with concurrent object lookup is possible * and we can safely destroy object below. */ - cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) + cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash); cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1); /* * Object was already removed from hash and lru above, can @@ -140,13 +161,34 @@ EXPORT_SYMBOL(lu_object_put); */ void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o) { - cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, - &o->lo_header->loh_flags); + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); return lu_object_put(env, o); } EXPORT_SYMBOL(lu_object_put_nocache); /** + * Kill the object and take it out of LRU cache. + * Currently used by client code for layout change. + */ +void lu_object_unhash(const struct lu_env *env, struct lu_object *o) +{ + struct lu_object_header *top; + + top = o->lo_header; + set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags); + if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) { + cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash; + cfs_hash_bd_t bd; + + cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1); + cfs_list_del_init(&top->loh_lru); + cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash); + cfs_hash_bd_unlock(obj_hash, &bd, 1); + } +} +EXPORT_SYMBOL(lu_object_unhash); + +/** * Allocate new object. * * This follows object creation protocol, described in the comment within @@ -164,18 +206,19 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, int result; ENTRY; - /* - * Create top-level object slice. This will also create - * lu_object_header. - */ - top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (top == NULL) - RETURN(ERR_PTR(-ENOMEM)); + /* + * Create top-level object slice. This will also create + * lu_object_header. + */ + top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); + if (top == NULL) + RETURN(ERR_PTR(-ENOMEM)); + if (IS_ERR(top)) + RETURN(top); /* * This is the only place where object fid is assigned. It's constant * after this point. */ - LASSERT(fid_is_igif(f) || fid_ver(f) == 0); top->lo_header->loh_fid = *f; layers = &top->lo_header->loh_layers; do { @@ -275,6 +318,9 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr) int bnr; int i; + if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU)) + RETURN(0); + CFS_INIT_LIST_HEAD(&dispose); /* * Under LRU list lock, scan LRU list and move unreferenced objects to @@ -379,10 +425,10 @@ LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data); * lu_global_init(). */ struct lu_context_key lu_global_key = { - .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | - LCT_MG_THREAD | LCT_CL_THREAD, - .lct_init = lu_global_key_init, - .lct_fini = lu_global_key_fini + .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | + LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL, + .lct_init = lu_global_key_init, + .lct_fini = lu_global_key_fini }; /** @@ -494,20 +540,21 @@ static struct lu_object *htable_lookup(struct lu_site *s, __u64 ver = cfs_hash_bd_version_get(bd); if (*version == ver) - return NULL; + return ERR_PTR(-ENOENT); *version = ver; bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); - /* cfs_hash_bd_lookup_intent is a somehow "internal" function - * of cfs_hash, but we don't want refcount on object right now */ - hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f); + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); if (hnode == NULL) { lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); - return NULL; + return ERR_PTR(-ENOENT); } h = container_of0(hnode, struct lu_object_header, loh_hash); if (likely(!lu_object_is_dying(h))) { + cfs_hash_get(s->ls_obj_hash, hnode); lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); cfs_list_del_init(&h->loh_lru); return lu_object_top(h); @@ -518,7 +565,6 @@ static struct lu_object *htable_lookup(struct lu_site *s, * returned (to assure that references to dying objects are eventually * drained), and moreover, lookup has to wait until object is freed. */ - cfs_atomic_dec(&h->loh_ref); cfs_waitlink_init(waiter); cfs_waitq_add(&bkt->lsb_marche_funebre, waiter); @@ -527,6 +573,31 @@ static struct lu_object *htable_lookup(struct lu_site *s, return ERR_PTR(-EAGAIN); } +static struct lu_object *htable_lookup_nowait(struct lu_site *s, + cfs_hash_bd_t *bd, + const struct lu_fid *f) +{ + cfs_hlist_node_t *hnode; + struct lu_object_header *h; + + /* cfs_hash_bd_peek_locked is a somehow "internal" function + * of cfs_hash, it doesn't add refcount on object. */ + hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); + if (hnode == NULL) { + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); + return ERR_PTR(-ENOENT); + } + + h = container_of0(hnode, struct lu_object_header, loh_hash); + if (unlikely(lu_object_is_dying(h))) + return ERR_PTR(-ENOENT); + + cfs_hash_get(s->ls_obj_hash, hnode); + lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT); + cfs_list_del_init(&h->loh_lru); + return lu_object_top(h); +} + /** * Search cache for an object with the fid \a f. If such object is found, * return it. Otherwise, create new object, insert it into cache and return @@ -607,7 +678,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1); o = htable_lookup(s, &bd, f, waiter, &version); cfs_hash_bd_unlock(hs, &bd, 1); - if (o != NULL) + if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT) return o; /* @@ -623,7 +694,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, cfs_hash_bd_lock(hs, &bd, 1); shadow = htable_lookup(s, &bd, f, waiter, &version); - if (likely(shadow == NULL)) { + if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) { struct lu_site_bkt_data *bkt; bkt = cfs_hash_bd_extra_get(hs, &bd); @@ -669,6 +740,30 @@ struct lu_object *lu_object_find_at(const struct lu_env *env, EXPORT_SYMBOL(lu_object_find_at); /** + * Try to find the object in cache without waiting for the dead object + * to be released nor allocating object if no cached one was found. + * + * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging. + */ +void lu_object_purge(const struct lu_env *env, struct lu_device *dev, + const struct lu_fid *f) +{ + struct lu_site *s = dev->ld_site; + cfs_hash_t *hs = s->ls_obj_hash; + cfs_hash_bd_t bd; + struct lu_object *o; + + cfs_hash_bd_get_and_lock(hs, f, &bd, 1); + o = htable_lookup_nowait(s, &bd, f); + cfs_hash_bd_unlock(hs, &bd, 1); + if (!IS_ERR(o)) { + set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags); + lu_object_put(env, o); + } +} +EXPORT_SYMBOL(lu_object_purge); + +/** * Find object with given fid, and return its slice belonging to given device. */ struct lu_object *lu_object_find_slice(const struct lu_env *env, @@ -697,20 +792,22 @@ static CFS_LIST_HEAD(lu_device_types); int lu_device_type_init(struct lu_device_type *ldt) { - int result; + int result = 0; - CFS_INIT_LIST_HEAD(&ldt->ldt_linkage); - result = ldt->ldt_ops->ldto_init(ldt); - if (result == 0) - cfs_list_add(&ldt->ldt_linkage, &lu_device_types); - return result; + CFS_INIT_LIST_HEAD(&ldt->ldt_linkage); + if (ldt->ldt_ops->ldto_init) + result = ldt->ldt_ops->ldto_init(ldt); + if (result == 0) + cfs_list_add(&ldt->ldt_linkage, &lu_device_types); + return result; } EXPORT_SYMBOL(lu_device_type_init); void lu_device_type_fini(struct lu_device_type *ldt) { - cfs_list_del_init(&ldt->ldt_linkage); - ldt->ldt_ops->ldto_fini(ldt); + cfs_list_del_init(&ldt->ldt_linkage); + if (ldt->ldt_ops->ldto_fini) + ldt->ldt_ops->ldto_fini(ldt); } EXPORT_SYMBOL(lu_device_type_fini); @@ -718,10 +815,10 @@ void lu_types_stop(void) { struct lu_device_type *ldt; - cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) { - if (ldt->ldt_device_nr == 0) - ldt->ldt_ops->ldto_stop(ldt); - } + cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) { + if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop) + ldt->ldt_ops->ldto_stop(ldt); + } } EXPORT_SYMBOL(lu_types_stop); @@ -729,7 +826,7 @@ EXPORT_SYMBOL(lu_types_stop); * Global list of all sites on this node */ static CFS_LIST_HEAD(lu_sites); -static CFS_DEFINE_MUTEX(lu_sites_guard); +static DEFINE_MUTEX(lu_sites_guard); /** * Global environment used by site shrinker. @@ -803,12 +900,12 @@ static int lu_htable_order(void) * * Size of lu_object is (arbitrary) taken as 1K (together with inode). */ - cache_size = cfs_num_physpages; + cache_size = num_physpages; #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - CFS_PAGE_SHIFT)) - cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) + cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -821,7 +918,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (CFS_PAGE_SIZE / 1024); + (PAGE_CACHE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -900,18 +997,18 @@ cfs_hash_ops_t lu_site_hash_ops = { void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d) { - cfs_spin_lock(&s->ls_ld_lock); + spin_lock(&s->ls_ld_lock); if (cfs_list_empty(&d->ld_linkage)) cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage); - cfs_spin_unlock(&s->ls_ld_lock); + spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_add_linkage); void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d) { - cfs_spin_lock(&s->ls_ld_lock); + spin_lock(&s->ls_ld_lock); cfs_list_del_init(&d->ld_linkage); - cfs_spin_unlock(&s->ls_ld_lock); + spin_unlock(&s->ls_ld_lock); } EXPORT_SYMBOL(lu_dev_del_linkage); @@ -991,11 +1088,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) lu_ref_add(&top->ld_reference, "site-top", s); CFS_INIT_LIST_HEAD(&s->ls_ld_linkage); - cfs_spin_lock_init(&s->ls_ld_lock); + spin_lock_init(&s->ls_ld_lock); lu_dev_add_linkage(s, top); - RETURN(0); + RETURN(0); } EXPORT_SYMBOL(lu_site_init); @@ -1004,9 +1101,9 @@ EXPORT_SYMBOL(lu_site_init); */ void lu_site_fini(struct lu_site *s) { - cfs_mutex_lock(&lu_sites_guard); + mutex_lock(&lu_sites_guard); cfs_list_del_init(&s->ls_linkage); - cfs_mutex_unlock(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); if (s->ls_obj_hash != NULL) { cfs_hash_putref(s->ls_obj_hash); @@ -1031,11 +1128,11 @@ EXPORT_SYMBOL(lu_site_fini); int lu_site_init_finish(struct lu_site *s) { int result; - cfs_mutex_lock(&lu_sites_guard); + mutex_lock(&lu_sites_guard); result = lu_context_refill(&lu_shrink_env.le_ctx); if (result == 0) cfs_list_add(&s->ls_linkage, &lu_sites); - cfs_mutex_unlock(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); return result; } EXPORT_SYMBOL(lu_site_init_finish); @@ -1101,16 +1198,17 @@ EXPORT_SYMBOL(lu_device_fini); * Initialize object \a o that is part of compound object \a h and was created * by device \a d. */ -int lu_object_init(struct lu_object *o, - struct lu_object_header *h, struct lu_device *d) +int lu_object_init(struct lu_object *o, struct lu_object_header *h, + struct lu_device *d) { - memset(o, 0, sizeof *o); - o->lo_header = h; - o->lo_dev = d; - lu_device_get(d); - o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o); - CFS_INIT_LIST_HEAD(&o->lo_linkage); - return 0; + memset(o, 0, sizeof(*o)); + o->lo_header = h; + o->lo_dev = d; + lu_device_get(d); + lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o); + CFS_INIT_LIST_HEAD(&o->lo_linkage); + + return 0; } EXPORT_SYMBOL(lu_object_init); @@ -1119,16 +1217,16 @@ EXPORT_SYMBOL(lu_object_init); */ void lu_object_fini(struct lu_object *o) { - struct lu_device *dev = o->lo_dev; + struct lu_device *dev = o->lo_dev; - LASSERT(cfs_list_empty(&o->lo_linkage)); + LASSERT(cfs_list_empty(&o->lo_linkage)); - if (dev != NULL) { - lu_ref_del_at(&dev->ld_reference, - o->lo_dev_ref , "lu_object", o); - lu_device_put(dev); - o->lo_dev = NULL; - } + if (dev != NULL) { + lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, + "lu_object", o); + lu_device_put(dev); + o->lo_dev = NULL; + } } EXPORT_SYMBOL(lu_object_fini); @@ -1243,7 +1341,7 @@ enum { /** * Maximal number of tld slots. */ - LU_CONTEXT_KEY_NR = 32 + LU_CONTEXT_KEY_NR = 40 }; static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, }; @@ -1272,7 +1370,7 @@ int lu_context_key_register(struct lu_context_key *key) LASSERT(key->lct_owner != NULL); result = -ENFILE; - cfs_spin_lock(&lu_keys_guard); + spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { if (lu_keys[i] == NULL) { key->lct_index = i; @@ -1284,8 +1382,8 @@ int lu_context_key_register(struct lu_context_key *key) break; } } - cfs_spin_unlock(&lu_keys_guard); - return result; + spin_unlock(&lu_keys_guard); + return result; } EXPORT_SYMBOL(lu_context_key_register); @@ -1317,23 +1415,23 @@ static void key_fini(struct lu_context *ctx, int index) */ void lu_context_key_degister(struct lu_context_key *key) { - LASSERT(cfs_atomic_read(&key->lct_used) >= 1); - LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); + LASSERT(cfs_atomic_read(&key->lct_used) >= 1); + LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys)); - lu_context_key_quiesce(key); + lu_context_key_quiesce(key); - ++key_set_version; - cfs_spin_lock(&lu_keys_guard); - key_fini(&lu_shrink_env.le_ctx, key->lct_index); - if (lu_keys[key->lct_index]) { - lu_keys[key->lct_index] = NULL; - lu_ref_fini(&key->lct_reference); - } - cfs_spin_unlock(&lu_keys_guard); + ++key_set_version; + spin_lock(&lu_keys_guard); + key_fini(&lu_shrink_env.le_ctx, key->lct_index); + if (lu_keys[key->lct_index]) { + lu_keys[key->lct_index] = NULL; + lu_ref_fini(&key->lct_reference); + } + spin_unlock(&lu_keys_guard); - LASSERTF(cfs_atomic_read(&key->lct_used) == 1, - "key has instances: %d\n", - cfs_atomic_read(&key->lct_used)); + LASSERTF(cfs_atomic_read(&key->lct_used) == 1, + "key has instances: %d\n", + cfs_atomic_read(&key->lct_used)); } EXPORT_SYMBOL(lu_context_key_degister); @@ -1444,24 +1542,22 @@ static CFS_LIST_HEAD(lu_context_remembered); void lu_context_key_quiesce(struct lu_context_key *key) { struct lu_context *ctx; - extern unsigned cl_env_cache_purge(unsigned nr); if (!(key->lct_tags & LCT_QUIESCENT)) { /* * XXX layering violation. */ - cl_env_cache_purge(~0); key->lct_tags |= LCT_QUIESCENT; /* * XXX memory barrier has to go here. */ - cfs_spin_lock(&lu_keys_guard); - cfs_list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) - key_fini(ctx, key->lct_index); - cfs_spin_unlock(&lu_keys_guard); - ++key_set_version; - } + spin_lock(&lu_keys_guard); + cfs_list_for_each_entry(ctx, &lu_context_remembered, + lc_remember) + key_fini(ctx, key->lct_index); + spin_unlock(&lu_keys_guard); + ++key_set_version; + } } EXPORT_SYMBOL(lu_context_key_quiesce); @@ -1546,13 +1642,13 @@ int lu_context_init(struct lu_context *ctx, __u32 tags) { int rc; - memset(ctx, 0, sizeof *ctx); - ctx->lc_state = LCS_INITIALIZED; - ctx->lc_tags = tags; - if (tags & LCT_REMEMBER) { - cfs_spin_lock(&lu_keys_guard); - cfs_list_add(&ctx->lc_remember, &lu_context_remembered); - cfs_spin_unlock(&lu_keys_guard); + memset(ctx, 0, sizeof *ctx); + ctx->lc_state = LCS_INITIALIZED; + ctx->lc_tags = tags; + if (tags & LCT_REMEMBER) { + spin_lock(&lu_keys_guard); + cfs_list_add(&ctx->lc_remember, &lu_context_remembered); + spin_unlock(&lu_keys_guard); } else { CFS_INIT_LIST_HEAD(&ctx->lc_remember); } @@ -1578,10 +1674,10 @@ void lu_context_fini(struct lu_context *ctx) keys_fini(ctx); } else { /* could race with key degister */ - cfs_spin_lock(&lu_keys_guard); + spin_lock(&lu_keys_guard); keys_fini(ctx); cfs_list_del_init(&ctx->lc_remember); - cfs_spin_unlock(&lu_keys_guard); + spin_unlock(&lu_keys_guard); } } EXPORT_SYMBOL(lu_context_fini); @@ -1644,37 +1740,37 @@ __u32 lu_session_tags_default = 0; void lu_context_tags_update(__u32 tags) { - cfs_spin_lock(&lu_keys_guard); - lu_context_tags_default |= tags; - key_set_version ++; - cfs_spin_unlock(&lu_keys_guard); + spin_lock(&lu_keys_guard); + lu_context_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_update); void lu_context_tags_clear(__u32 tags) { - cfs_spin_lock(&lu_keys_guard); - lu_context_tags_default &= ~tags; - key_set_version ++; - cfs_spin_unlock(&lu_keys_guard); + spin_lock(&lu_keys_guard); + lu_context_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_context_tags_clear); void lu_session_tags_update(__u32 tags) { - cfs_spin_lock(&lu_keys_guard); - lu_session_tags_default |= tags; - key_set_version ++; - cfs_spin_unlock(&lu_keys_guard); + spin_lock(&lu_keys_guard); + lu_session_tags_default |= tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_update); void lu_session_tags_clear(__u32 tags) { - cfs_spin_lock(&lu_keys_guard); - lu_session_tags_default &= ~tags; - key_set_version ++; - cfs_spin_unlock(&lu_keys_guard); + spin_lock(&lu_keys_guard); + lu_session_tags_default &= ~tags; + key_set_version++; + spin_unlock(&lu_keys_guard); } EXPORT_SYMBOL(lu_session_tags_clear); @@ -1737,7 +1833,7 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, } EXPORT_SYMBOL(lu_env_refill_by_tags); -static struct cfs_shrinker *lu_site_shrinker = NULL; +static struct shrinker *lu_site_shrinker; typedef struct lu_site_stats{ unsigned lss_populated; @@ -1776,6 +1872,24 @@ static void lu_site_stats_get(cfs_hash_t *hs, #ifdef __KERNEL__ +/* + * There exists a potential lock inversion deadlock scenario when using + * Lustre on top of ZFS. This occurs between one of ZFS's + * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially, + * thread A will take the lu_sites_guard lock and sleep on the ht_lock, + * while thread B will take the ht_lock and sleep on the lu_sites_guard + * lock. Obviously neither thread will wake and drop their respective hold + * on their lock. + * + * To prevent this from happening we must ensure the lu_sites_guard lock is + * not taken while down this code path. ZFS reliably does not set the + * __GFP_FS bit in its code paths, so this can be used to determine if it + * is safe to take the lu_sites_guard lock. + * + * Ideally we should accurately return the remaining number of cached + * objects without taking the lu_sites_guard lock, but this is not + * possible in the current implementation. + */ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { lu_site_stats_t stats; @@ -1785,13 +1899,27 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) int remain = shrink_param(sc, nr_to_scan); CFS_LIST_HEAD(splice); - if (remain != 0) { - if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) + if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) { + if (remain != 0) return -1; - CDEBUG(D_INODE, "Shrink %d objects\n", remain); + else + /* We must not take the lu_sites_guard lock when + * __GFP_FS is *not* set because of the deadlock + * possibility detailed above. Additionally, + * since we cannot determine the number of + * objects in the cache without taking this + * lock, we're in a particularly tough spot. As + * a result, we'll just lie and say our cache is + * empty. This _should_ be ok, as we can't + * reclaim objects when __GFP_FS is *not* set + * anyways. + */ + return 0; } - cfs_mutex_lock(&lu_sites_guard); + CDEBUG(D_INODE, "Shrink %d objects\n", remain); + + mutex_lock(&lu_sites_guard); cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) { if (shrink_param(sc, nr_to_scan) != 0) { remain = lu_site_purge(&lu_shrink_env, s, remain); @@ -1809,7 +1937,7 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) break; } cfs_list_splice(&splice, lu_sites.prev); - cfs_mutex_unlock(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); cached = (cached / 100) * sysctl_vfs_cache_pressure; if (shrink_param(sc, nr_to_scan) == 0) @@ -1840,9 +1968,9 @@ int lu_printk_printer(const struct lu_env *env, return 0; } -void lu_debugging_setup(void) +int lu_debugging_setup(void) { - lu_env_init(&lu_debugging_env, ~0); + return lu_env_init(&lu_debugging_env, ~0); } void lu_context_keys_dump(void) @@ -1872,17 +2000,6 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask) } #endif /* __KERNEL__ */ -int cl_global_init(void); -void cl_global_fini(void); -int lu_ref_global_init(void); -void lu_ref_global_fini(void); - -int dt_global_init(void); -void dt_global_fini(void); - -int llo_global_init(void); -void llo_global_fini(void); - /** * Initialization of global lu_* data. */ @@ -1900,14 +2017,15 @@ int lu_global_init(void) result = lu_context_key_register(&lu_global_key); if (result != 0) return result; + /* * At this level, we don't know what tags are needed, so allocate them * conservatively. This should not be too bad, because this * environment is global. */ - cfs_mutex_lock(&lu_sites_guard); + mutex_lock(&lu_sites_guard); result = lu_env_init(&lu_shrink_env, LCT_SHRINKER); - cfs_mutex_unlock(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); if (result != 0) return result; @@ -1916,26 +2034,10 @@ int lu_global_init(void) * inode, one for ea. Unfortunately setting this high value results in * lu_object/inode cache consuming all the memory. */ - lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink); + lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink); if (lu_site_shrinker == NULL) return -ENOMEM; - result = lu_time_global_init(); - if (result) - GOTO(out, result); - -#ifdef __KERNEL__ - result = dt_global_init(); - if (result) - GOTO(out, result); - - result = llo_global_init(); - if (result) - GOTO(out, result); -#endif - result = cl_global_init(); -out: - return result; } @@ -1944,36 +2046,24 @@ out: */ void lu_global_fini(void) { - cl_global_fini(); -#ifdef __KERNEL__ - llo_global_fini(); - dt_global_fini(); -#endif - lu_time_global_fini(); if (lu_site_shrinker != NULL) { - cfs_remove_shrinker(lu_site_shrinker); + remove_shrinker(lu_site_shrinker); lu_site_shrinker = NULL; } - lu_context_key_degister(&lu_global_key); + lu_context_key_degister(&lu_global_key); /* * Tear shrinker environment down _after_ de-registering * lu_global_key, because the latter has a value in the former. */ - cfs_mutex_lock(&lu_sites_guard); + mutex_lock(&lu_sites_guard); lu_env_fini(&lu_shrink_env); - cfs_mutex_unlock(&lu_sites_guard); + mutex_unlock(&lu_sites_guard); lu_ref_global_fini(); } -struct lu_buf LU_BUF_NULL = { - .lb_buf = NULL, - .lb_len = 0 -}; -EXPORT_SYMBOL(LU_BUF_NULL); - static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx) { #ifdef LPROCFS @@ -2012,13 +2102,6 @@ int lu_site_stats_print(const struct lu_site *s, char *page, int count) } EXPORT_SYMBOL(lu_site_stats_print); -const char *lu_time_names[LU_TIME_NR] = { - [LU_TIME_FIND_LOOKUP] = "find_lookup", - [LU_TIME_FIND_ALLOC] = "find_alloc", - [LU_TIME_FIND_INSERT] = "find_insert" -}; -EXPORT_SYMBOL(lu_time_names); - /** * Helper function to initialize a number of kmem slab caches at once. */ @@ -2028,9 +2111,9 @@ int lu_kmem_init(struct lu_kmem_descr *caches) struct lu_kmem_descr *iter = caches; for (result = 0; iter->ckd_cache != NULL; ++iter) { - *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name, - iter->ckd_size, - 0, 0); + *iter->ckd_cache = kmem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0, NULL); if (*iter->ckd_cache == NULL) { result = -ENOMEM; /* free all previously allocated caches */ @@ -2048,13 +2131,9 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - int rc; - for (; caches->ckd_cache != NULL; ++caches) { if (*caches->ckd_cache != NULL) { - rc = cfs_mem_cache_destroy(*caches->ckd_cache); - LASSERTF(rc == 0, "couldn't destroy %s slab\n", - caches->ckd_name); + kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } } @@ -2083,7 +2162,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1); shadow = htable_lookup(s, &bd, fid, &waiter, &version); /* supposed to be unique */ - LASSERT(shadow == NULL); + LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT); *old = *fid; bkt = cfs_hash_bd_extra_get(hs, &bd); cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash); @@ -2091,3 +2170,100 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o, cfs_hash_bd_unlock(hs, &bd, 1); } EXPORT_SYMBOL(lu_object_assign_fid); + +/** + * allocates object with 0 (non-assiged) fid + * XXX: temporary solution to be able to assign fid in ->do_create() + * till we have fully-functional OST fids + */ +struct lu_object *lu_object_anon(const struct lu_env *env, + struct lu_device *dev, + const struct lu_object_conf *conf) +{ + struct lu_fid fid; + struct lu_object *o; + + fid_zero(&fid); + o = lu_object_alloc(env, dev, &fid, conf); + + return o; +} +EXPORT_SYMBOL(lu_object_anon); + +struct lu_buf LU_BUF_NULL = { + .lb_buf = NULL, + .lb_len = 0 +}; +EXPORT_SYMBOL(LU_BUF_NULL); + +void lu_buf_free(struct lu_buf *buf) +{ + LASSERT(buf); + if (buf->lb_buf) { + LASSERT(buf->lb_len > 0); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + buf->lb_buf = NULL; + buf->lb_len = 0; + } +} +EXPORT_SYMBOL(lu_buf_free); + +void lu_buf_alloc(struct lu_buf *buf, int size) +{ + LASSERT(buf); + LASSERT(buf->lb_buf == NULL); + LASSERT(buf->lb_len == 0); + OBD_ALLOC_LARGE(buf->lb_buf, size); + if (likely(buf->lb_buf)) + buf->lb_len = size; +} +EXPORT_SYMBOL(lu_buf_alloc); + +void lu_buf_realloc(struct lu_buf *buf, int size) +{ + lu_buf_free(buf); + lu_buf_alloc(buf, size); +} +EXPORT_SYMBOL(lu_buf_realloc); + +struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len) +{ + if (buf->lb_buf == NULL && buf->lb_len == 0) + lu_buf_alloc(buf, len); + + if ((len > buf->lb_len) && (buf->lb_buf != NULL)) + lu_buf_realloc(buf, len); + + return buf; +} +EXPORT_SYMBOL(lu_buf_check_and_alloc); + +/** + * Increase the size of the \a buf. + * preserves old data in buffer + * old buffer remains unchanged on error + * \retval 0 or -ENOMEM + */ +int lu_buf_check_and_grow(struct lu_buf *buf, int len) +{ + char *ptr; + + if (len <= buf->lb_len) + return 0; + + OBD_ALLOC_LARGE(ptr, len); + if (ptr == NULL) + return -ENOMEM; + + /* Free the old buf */ + if (buf->lb_buf != NULL) { + memcpy(ptr, buf->lb_buf, buf->lb_len); + OBD_FREE_LARGE(buf->lb_buf, buf->lb_len); + } + + buf->lb_buf = ptr; + buf->lb_len = len; + return 0; +} +EXPORT_SYMBOL(lu_buf_check_and_grow); +