Whamcloud - gitweb
LU-1017 handle -EAGAIN properly in lu_object_find_try()
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
index f0e9535..6057254 100644 (file)
@@ -113,6 +113,8 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         }
 
         if (!lu_object_is_dying(top)) {
+                LASSERT(cfs_list_empty(&top->loh_lru));
+                cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
                 return;
         }
@@ -129,7 +131,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
          * and we can safely destroy object below.
          */
         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
-        cfs_list_del_init(&top->loh_lru);
         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
         /*
          * Object was already removed from hash and lru above, can
@@ -285,20 +286,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
 
                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
-                        /*
-                         * Objects are sorted in lru order, and "busy"
-                         * objects (ones with h->loh_ref > 0) naturally tend to
-                         * live near hot end that we scan last. Unfortunately,
-                         * sites usually have small (less then ten) number of
-                         * busy yet rarely accessed objects (some global
-                         * objects, accessed directly through pointers,
-                         * bypassing hash table).
-                         * Currently algorithm scans them over and over again.
-                         * Probably we should move busy objects out of LRU,
-                         * or we can live with that.
-                         */
-                        if (cfs_atomic_read(&h->loh_ref) > 0)
-                                continue;
+                        LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
 
                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
@@ -367,7 +355,7 @@ enum {
          *
          * XXX overflow is not handled correctly.
          */
-        LU_CDEBUG_LINE = 256
+        LU_CDEBUG_LINE = 512
 };
 
 struct lu_cdebug_data {
@@ -516,6 +504,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
         h = container_of0(hnode, struct lu_object_header, loh_hash);
         if (likely(!lu_object_is_dying(h))) {
                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
+                cfs_list_del_init(&h->loh_lru);
                 return lu_object_top(h);
         }
 
@@ -564,7 +553,6 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
         bkt = cfs_hash_bd_extra_get(hs, &bd);
         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
-        cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
         bkt->lsb_busy++;
         cfs_hash_bd_unlock(hs, &bd, 1);
         return o;
@@ -630,21 +618,22 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
         cfs_hash_bd_lock(hs, &bd, 1);
 
         shadow = htable_lookup(s, &bd, f, waiter, &version);
-        if (likely(shadow == NULL)) {
+        if (shadow == NULL) {
                 struct lu_site_bkt_data *bkt;
 
                 bkt = cfs_hash_bd_extra_get(hs, &bd);
                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
-                cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
                 bkt->lsb_busy++;
                 cfs_hash_bd_unlock(hs, &bd, 1);
                 return o;
+        } else {
+                if (!cfs_list_empty(&shadow->lo_header->loh_lru))
+                        cfs_list_del_init(&shadow->lo_header->loh_lru);
+                lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
+                cfs_hash_bd_unlock(hs, &bd, 1);
+                lu_object_free(env, o);
+                return shadow;
         }
-
-        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
-        cfs_hash_bd_unlock(hs, &bd, 1);
-        lu_object_free(env, o);
-        return shadow;
 }
 
 /**
@@ -841,10 +830,18 @@ static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
                                 const void *key, unsigned mask)
 {
         struct lu_fid  *fid = (struct lu_fid *)key;
-        unsigned        hash;
+        __u32           hash;
+
+        hash = fid_flatten32(fid);
+        hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+        hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+
+        /* give me another random factor */
+        hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+
+        hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+        hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
 
-        hash = (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
-        hash += fid_hash(fid, hs->hs_bkt_bits) << hs->hs_bkt_bits;
         return hash & mask;
 }
 
@@ -902,27 +899,29 @@ cfs_hash_ops_t lu_site_hash_ops = {
  * Initialize site \a s, with \a d as the top level device.
  */
 #define LU_SITE_BITS_MIN    12
-#define LU_SITE_BITS_MAX    23
+#define LU_SITE_BITS_MAX    24
 /**
- * total 128 buckets, we don't want too many buckets because:
+ * total 256 buckets, we don't want too many buckets because:
  * - consume too much memory
  * - avoid unbalanced LRU list
  */
-#define LU_SITE_BKT_BITS    7
+#define LU_SITE_BKT_BITS    8
 
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
         struct lu_site_bkt_data *bkt;
         cfs_hash_bd_t bd;
+        char name[16];
         int bits;
         int i;
         ENTRY;
 
         memset(s, 0, sizeof *s);
         bits = lu_htable_order();
+        snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
              bits >= LU_SITE_BITS_MIN; bits--) {
-                s->ls_obj_hash = cfs_hash_create("lu_site", bits, bits,
+                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
                                                  bits - LU_SITE_BKT_BITS,
                                                  sizeof(*bkt), 0, 0,
                                                  &lu_site_hash_ops,
@@ -971,6 +970,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         lu_device_get(top);
         lu_ref_add(&top->ld_reference, "site-top", s);
 
+        CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
+        cfs_spin_lock_init(&s->ls_ld_lock);
+
+        cfs_spin_lock(&s->ls_ld_lock);
+        cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
+        cfs_spin_unlock(&s->ls_ld_lock);
+
         RETURN(0);
 }
 EXPORT_SYMBOL(lu_site_init);
@@ -1046,6 +1052,7 @@ int lu_device_init(struct lu_device *d, struct lu_device_type *t)
         cfs_atomic_set(&d->ld_ref, 0);
         d->ld_type = t;
         lu_ref_init(&d->ld_reference);
+        CFS_INIT_LIST_HEAD(&d->ld_linkage);
         return 0;
 }
 EXPORT_SYMBOL(lu_device_init);
@@ -1606,6 +1613,52 @@ int lu_context_refill(struct lu_context *ctx)
 }
 EXPORT_SYMBOL(lu_context_refill);
 
+/**
+ * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
+ * obd being added. Currently, this is only used on client side, specifically
+ * for echo device client, for other stack (like ptlrpc threads), context are
+ * predefined when the lu_device type are registered, during the module probe
+ * phase.
+ */
+__u32 lu_context_tags_default = 0;
+__u32 lu_session_tags_default = 0;
+
+void lu_context_tags_update(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_context_tags_default |= tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_update);
+
+void lu_context_tags_clear(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_context_tags_default &= ~tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_clear);
+
+void lu_session_tags_update(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_session_tags_default |= tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_update);
+
+void lu_session_tags_clear(__u32 tags)
+{
+        cfs_spin_lock(&lu_keys_guard);
+        lu_session_tags_default &= ~tags;
+        key_set_version ++;
+        cfs_spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_clear);
+
 int lu_env_init(struct lu_env *env, __u32 tags)
 {
         int result;
@@ -1637,6 +1690,34 @@ int lu_env_refill(struct lu_env *env)
 }
 EXPORT_SYMBOL(lu_env_refill);
 
+/**
+ * Currently, this API will only be used by echo client.
+ * Because echo client and normal lustre client will share
+ * same cl_env cache. So echo client needs to refresh
+ * the env context after it get one from the cache, especially
+ * when normal client and echo client co-exist in the same client.
+ */
+int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
+                          __u32 stags)
+{
+        int    result;
+
+        if ((env->le_ctx.lc_tags & ctags) != ctags) {
+                env->le_ctx.lc_version = 0;
+                env->le_ctx.lc_tags |= ctags;
+        }
+
+        if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
+                env->le_ses->lc_version = 0;
+                env->le_ses->lc_tags |= stags;
+        }
+
+        result = lu_env_refill(env);
+
+        return result;
+}
+EXPORT_SYMBOL(lu_env_refill_by_tags);
+
 static struct cfs_shrinker *lu_site_shrinker = NULL;
 
 typedef struct lu_site_stats{
@@ -1675,25 +1756,25 @@ static void lu_site_stats_get(cfs_hash_t *hs,
 }
 
 #ifdef __KERNEL__
-static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan,
-                           unsigned int gfp_mask)
+
+static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
         lu_site_stats_t stats;
         struct lu_site *s;
         struct lu_site *tmp;
         int cached = 0;
-        int remain = nr_to_scan;
+        int remain = shrink_param(sc, nr_to_scan);
         CFS_LIST_HEAD(splice);
 
-        if (nr_to_scan != 0) {
-                if (!(gfp_mask & __GFP_FS))
+        if (remain != 0) {
+                if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
                         return -1;
-                CDEBUG(D_INODE, "Shrink %d objects\n", nr_to_scan);
+                CDEBUG(D_INODE, "Shrink %d objects\n", remain);
         }
 
         cfs_down(&lu_sites_guard);
         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-                if (nr_to_scan != 0) {
+                if (shrink_param(sc, nr_to_scan) != 0) {
                         remain = lu_site_purge(&lu_shrink_env, s, remain);
                         /*
                          * Move just shrunk site to the tail of site list to
@@ -1705,14 +1786,14 @@ static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan,
                 memset(&stats, 0, sizeof(stats));
                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
                 cached += stats.lss_total - stats.lss_busy;
-                if (nr_to_scan && remain <= 0)
+                if (shrink_param(sc, nr_to_scan) && remain <= 0)
                         break;
         }
         cfs_list_splice(&splice, lu_sites.prev);
         cfs_up(&lu_sites_guard);
 
         cached = (cached / 100) * sysctl_vfs_cache_pressure;
-        if (nr_to_scan == 0)
+        if (shrink_param(sc, nr_to_scan) == 0)
                 CDEBUG(D_INODE, "%d objects cached\n", cached);
         return cached;
 }