Whamcloud - gitweb
LU-1017 handle -EAGAIN properly in lu_object_find_try()
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
index 69aa691..6057254 100644 (file)
@@ -30,6 +30,9 @@
  * Use is subject to license terms.
  */
 /*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
  * This file is part of Lustre, http://www.lustre.org/
  * Lustre is a trademark of Sun Microsystems, Inc.
  *
@@ -110,6 +113,8 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         }
 
         if (!lu_object_is_dying(top)) {
+                LASSERT(cfs_list_empty(&top->loh_lru));
+                cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
                 return;
         }
@@ -126,7 +131,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
          * and we can safely destroy object below.
          */
         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
-        cfs_list_del_init(&top->loh_lru);
         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
         /*
          * Object was already removed from hash and lru above, can
@@ -282,20 +286,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
 
                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
-                        /*
-                         * Objects are sorted in lru order, and "busy"
-                         * objects (ones with h->loh_ref > 0) naturally tend to
-                         * live near hot end that we scan last. Unfortunately,
-                         * sites usually have small (less then ten) number of
-                         * busy yet rarely accessed objects (some global
-                         * objects, accessed directly through pointers,
-                         * bypassing hash table).
-                         * Currently algorithm scans them over and over again.
-                         * Probably we should move busy objects out of LRU,
-                         * or we can live with that.
-                         */
-                        if (cfs_atomic_read(&h->loh_ref) > 0)
-                                continue;
+                        LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
 
                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
@@ -364,7 +355,7 @@ enum {
          *
          * XXX overflow is not handled correctly.
          */
-        LU_CDEBUG_LINE = 256
+        LU_CDEBUG_LINE = 512
 };
 
 struct lu_cdebug_data {
@@ -513,6 +504,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
         h = container_of0(hnode, struct lu_object_header, loh_hash);
         if (likely(!lu_object_is_dying(h))) {
                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
+                cfs_list_del_init(&h->loh_lru);
                 return lu_object_top(h);
         }
 
@@ -543,6 +535,29 @@ struct lu_object *lu_object_find(const struct lu_env *env,
 }
 EXPORT_SYMBOL(lu_object_find);
 
+static struct lu_object *lu_object_new(const struct lu_env *env,
+                                       struct lu_device *dev,
+                                       const struct lu_fid *f,
+                                       const struct lu_object_conf *conf)
+{
+        struct lu_object        *o;
+        cfs_hash_t              *hs;
+        cfs_hash_bd_t            bd;
+        struct lu_site_bkt_data *bkt;
+
+        o = lu_object_alloc(env, dev, f, conf);
+        if (unlikely(IS_ERR(o)))
+                return o;
+
+        hs = dev->ld_site->ls_obj_hash;
+        cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
+        bkt = cfs_hash_bd_extra_get(hs, &bd);
+        cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
+        bkt->lsb_busy++;
+        cfs_hash_bd_unlock(hs, &bd, 1);
+        return o;
+}
+
 /**
  * Core logic of lu_object_find*() functions.
  */
@@ -572,9 +587,16 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
          *     - unlock index;
          *     - return object.
          *
+         * For "LOC_F_NEW" case, we are sure the object is new established.
+         * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
+         * just alloc and insert directly.
+         *
          * If dying object is found during index search, add @waiter to the
          * site wait-queue and return ERR_PTR(-EAGAIN).
          */
+        if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+                return lu_object_new(env, dev, f, conf);
+
         s  = dev->ld_site;
         hs = s->ls_obj_hash;
         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
@@ -596,21 +618,22 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
         cfs_hash_bd_lock(hs, &bd, 1);
 
         shadow = htable_lookup(s, &bd, f, waiter, &version);
-        if (likely(shadow == NULL)) {
+        if (shadow == NULL) {
                 struct lu_site_bkt_data *bkt;
 
                 bkt = cfs_hash_bd_extra_get(hs, &bd);
                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
-                cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
                 bkt->lsb_busy++;
                 cfs_hash_bd_unlock(hs, &bd, 1);
                 return o;
+        } else {
+                if (!cfs_list_empty(&shadow->lo_header->loh_lru))
+                        cfs_list_del_init(&shadow->lo_header->loh_lru);
+                lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
+                cfs_hash_bd_unlock(hs, &bd, 1);
+                lu_object_free(env, o);
+                return shadow;
         }
-
-        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
-        cfs_hash_bd_unlock(hs, &bd, 1);
-        lu_object_free(env, o);
-        return shadow;
 }
 
 /**
@@ -754,9 +777,14 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
 EXPORT_SYMBOL(lu_site_print);
 
 enum {
-        LU_CACHE_PERCENT   = 20,
+        LU_CACHE_PERCENT_MAX     = 50,
+        LU_CACHE_PERCENT_DEFAULT = 20
 };
 
+static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
+                "Percentage of memory to be used as lu_object cache");
+
 /**
  * Return desired hash table order.
  */
@@ -780,7 +808,16 @@ static int lu_htable_order(void)
                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
 #endif
 
-        cache_size = cache_size / 100 * LU_CACHE_PERCENT *
+        /* clear off unreasonable cache setting. */
+        if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
+                CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
+                      " the range of (0, %u]. Will use default value: %u.\n",
+                      lu_cache_percent, LU_CACHE_PERCENT_MAX,
+                      LU_CACHE_PERCENT_DEFAULT);
+
+                lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+        }
+        cache_size = cache_size / 100 * lu_cache_percent *
                 (CFS_PAGE_SIZE / 1024);
 
         for (bits = 1; (1 << bits) < cache_size; ++bits) {
@@ -789,13 +826,22 @@ static int lu_htable_order(void)
         return bits;
 }
 
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs, void *key, unsigned mask)
+static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+                                const void *key, unsigned mask)
 {
         struct lu_fid  *fid = (struct lu_fid *)key;
-        unsigned        hash;
+        __u32           hash;
+
+        hash = fid_flatten32(fid);
+        hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+        hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+
+        /* give me another random factor */
+        hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+
+        hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+        hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
 
-        hash = (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
-        hash += fid_hash(fid, hs->hs_bkt_bits) << hs->hs_bkt_bits;
         return hash & mask;
 }
 
@@ -812,7 +858,7 @@ static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
         return &h->loh_fid;
 }
 
-static int lu_obj_hop_keycmp(void *key, cfs_hlist_node_t *hnode)
+static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
 {
         struct lu_object_header *h;
 
@@ -853,27 +899,29 @@ cfs_hash_ops_t lu_site_hash_ops = {
  * Initialize site \a s, with \a d as the top level device.
  */
 #define LU_SITE_BITS_MIN    12
-#define LU_SITE_BITS_MAX    23
+#define LU_SITE_BITS_MAX    24
 /**
- * total 128 buckets, we don't want too many buckets because:
+ * total 256 buckets, we don't want too many buckets because:
  * - consume too much memory
  * - avoid unbalanced LRU list
  */
-#define LU_SITE_BKT_BITS    7
+#define LU_SITE_BKT_BITS    8
 
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
         struct lu_site_bkt_data *bkt;
         cfs_hash_bd_t bd;
+        char name[16];
         int bits;
         int i;
         ENTRY;
 
         memset(s, 0, sizeof *s);
         bits = lu_htable_order();
+        snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
              bits >= LU_SITE_BITS_MIN; bits--) {
-                s->ls_obj_hash = cfs_hash_create("lu_site", bits, bits,
+                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
                                                  bits - LU_SITE_BKT_BITS,
                                                  sizeof(*bkt), 0, 0,
                                                  &lu_site_hash_ops,
@@ -922,6 +970,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         lu_device_get(top);
         lu_ref_add(&top->ld_reference, "site-top", s);
 
+        CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
+        cfs_spin_lock_init(&s->ls_ld_lock);
+
+        cfs_spin_lock(&s->ls_ld_lock);
+        cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
+        cfs_spin_unlock(&s->ls_ld_lock);
+
         RETURN(0);
 }
 EXPORT_SYMBOL(lu_site_init);
@@ -997,6 +1052,7 @@ int lu_device_init(struct lu_device *d, struct lu_device_type *t)
         cfs_atomic_set(&d->ld_ref, 0);
         d->ld_type = t;
         lu_ref_init(&d->ld_reference);
+        CFS_INIT_LIST_HEAD(&d->ld_linkage);
         return 0;
 }
 EXPORT_SYMBOL(lu_device_init);
@@ -1557,6 +1613,52 @@ int lu_context_refill(struct lu_context *ctx)
 }
 EXPORT_SYMBOL(lu_context_refill);
 
+/**
+ * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
+ * obd being added. Currently, this is only used on client side, specifically
+ * for echo device client, for other stack (like ptlrpc threads), context are
+ * predefined when the lu_device type are registered, during the module probe
+ * phase.
+ */
+__u32 lu_context_tags_default = 0;
+__u32 lu_session_tags_default = 0;
+
+void lu_context_tags_update(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_context_tags_default |= tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_update);
+
+void lu_context_tags_clear(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_context_tags_default &= ~tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_clear);
+
+void lu_session_tags_update(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_session_tags_default |= tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_update);
+
+void lu_session_tags_clear(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_session_tags_default &= ~tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_clear);
+
 int lu_env_init(struct lu_env *env, __u32 tags)
 {
         int result;
@@ -1588,6 +1690,34 @@ int lu_env_refill(struct lu_env *env)
 }
 EXPORT_SYMBOL(lu_env_refill);
 
+/**
+ * Currently, this API will only be used by echo client.
+ * Because echo client and normal lustre client will share
+ * same cl_env cache. So echo client needs to refresh
+ * the env context after it get one from the cache, especially
+ * when normal client and echo client co-exist in the same client.
+ */
+int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
+                          __u32 stags)
+{
+        int    result;
+
+        if ((env->le_ctx.lc_tags & ctags) != ctags) {
+                env->le_ctx.lc_version = 0;
+                env->le_ctx.lc_tags |= ctags;
+        }
+
+        if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
+                env->le_ses->lc_version = 0;
+                env->le_ses->lc_tags |= stags;
+        }
+
+        result = lu_env_refill(env);
+
+        return result;
+}
+EXPORT_SYMBOL(lu_env_refill_by_tags);
+
 static struct cfs_shrinker *lu_site_shrinker = NULL;
 
 typedef struct lu_site_stats{
@@ -1626,24 +1756,25 @@ static void lu_site_stats_get(cfs_hash_t *hs,
 }
 
 #ifdef __KERNEL__
-static int KERN_SHRINKER(lu_cache_shrink)
+
+static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
         lu_site_stats_t stats;
         struct lu_site *s;
         struct lu_site *tmp;
         int cached = 0;
-        int remain = nr_to_scan;
+        int remain = shrink_param(sc, nr_to_scan);
         CFS_LIST_HEAD(splice);
 
-        if (nr_to_scan != 0) {
-                if (!(gfp_mask & __GFP_FS))
+        if (remain != 0) {
+                if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
                         return -1;
-                CDEBUG(D_INODE, "Shrink %d objects\n", nr_to_scan);
+                CDEBUG(D_INODE, "Shrink %d objects\n", remain);
         }
 
         cfs_down(&lu_sites_guard);
         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-                if (nr_to_scan != 0) {
+                if (shrink_param(sc, nr_to_scan) != 0) {
                         remain = lu_site_purge(&lu_shrink_env, s, remain);
                         /*
                          * Move just shrunk site to the tail of site list to
@@ -1655,14 +1786,14 @@ static int KERN_SHRINKER(lu_cache_shrink)
                 memset(&stats, 0, sizeof(stats));
                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
                 cached += stats.lss_total - stats.lss_busy;
-                if (nr_to_scan && remain <= 0)
+                if (shrink_param(sc, nr_to_scan) && remain <= 0)
                         break;
         }
         cfs_list_splice(&splice, lu_sites.prev);
         cfs_up(&lu_sites_guard);
 
         cached = (cached / 100) * sysctl_vfs_cache_pressure;
-        if (nr_to_scan == 0)
+        if (shrink_param(sc, nr_to_scan) == 0)
                 CDEBUG(D_INODE, "%d objects cached\n", cached);
         return cached;
 }
@@ -1740,7 +1871,7 @@ int lu_global_init(void)
 {
         int result;
 
-        CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
+        CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
 
         result = lu_ref_global_init();
         if (result != 0)
@@ -1875,13 +2006,16 @@ EXPORT_SYMBOL(lu_time_names);
 int lu_kmem_init(struct lu_kmem_descr *caches)
 {
         int result;
+        struct lu_kmem_descr *iter = caches;
 
-        for (result = 0; caches->ckd_cache != NULL; ++caches) {
-                *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
-                                                          caches->ckd_size,
-                                                          0, 0);
-                if (*caches->ckd_cache == NULL) {
+        for (result = 0; iter->ckd_cache != NULL; ++iter) {
+                *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
+                                                        iter->ckd_size,
+                                                        0, 0);
+                if (*iter->ckd_cache == NULL) {
                         result = -ENOMEM;
+                        /* free all previously allocated caches */
+                        lu_kmem_fini(caches);
                         break;
                 }
         }