* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
}
EXPORT_SYMBOL(lu_object_find);
+static struct lu_object *lu_object_new(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *o;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ struct lu_site_bkt_data *bkt;
+
+ o = lu_object_alloc(env, dev, f, conf);
+ if (unlikely(IS_ERR(o)))
+ return o;
+
+ hs = dev->ld_site->ls_obj_hash;
+ cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
+ cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
+ bkt->lsb_busy++;
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ return o;
+}
+
/**
* Core logic of lu_object_find*() functions.
*/
* - unlock index;
* - return object.
*
+ * For "LOC_F_NEW" case, we are sure the object is new established.
+ * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
+ * just alloc and insert directly.
+ *
* If dying object is found during index search, add @waiter to the
* site wait-queue and return ERR_PTR(-EAGAIN).
*/
+ if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ return lu_object_new(env, dev, f, conf);
+
s = dev->ld_site;
hs = s->ls_obj_hash;
cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
EXPORT_SYMBOL(lu_site_print);
enum {
- LU_CACHE_PERCENT = 20,
+ LU_CACHE_PERCENT_MAX = 50,
+ LU_CACHE_PERCENT_DEFAULT = 20
};
+static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
+ "Percentage of memory to be used as lu_object cache");
+
/**
* Return desired hash table order.
*/
cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
#endif
- cache_size = cache_size / 100 * LU_CACHE_PERCENT *
+ /* clear off unreasonable cache setting. */
+ if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
+ CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
+ " the range of (0, %u]. Will use default value: %u.\n",
+ lu_cache_percent, LU_CACHE_PERCENT_MAX,
+ LU_CACHE_PERCENT_DEFAULT);
+
+ lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+ }
+ cache_size = cache_size / 100 * lu_cache_percent *
(CFS_PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
return bits;
}
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs, void *key, unsigned mask)
+static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
{
struct lu_fid *fid = (struct lu_fid *)key;
unsigned hash;
return &h->loh_fid;
}
-static int lu_obj_hop_keycmp(void *key, cfs_hlist_node_t *hnode)
+static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
{
struct lu_object_header *h;
}
#ifdef __KERNEL__
-static int KERN_SHRINKER(lu_cache_shrink)
+static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan,
+ unsigned int gfp_mask)
{
lu_site_stats_t stats;
struct lu_site *s;
{
int result;
- CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
+ CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
result = lu_ref_global_init();
if (result != 0)
int lu_kmem_init(struct lu_kmem_descr *caches)
{
int result;
+ struct lu_kmem_descr *iter = caches;
- for (result = 0; caches->ckd_cache != NULL; ++caches) {
- *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
- caches->ckd_size,
- 0, 0);
- if (*caches->ckd_cache == NULL) {
+ for (result = 0; iter->ckd_cache != NULL; ++iter) {
+ *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
+ iter->ckd_size,
+ 0, 0);
+ if (*iter->ckd_cache == NULL) {
result = -ENOMEM;
+ /* free all previously allocated caches */
+ lu_kmem_fini(caches);
break;
}
}