Whamcloud - gitweb
LU-5577 obdclass: lu_htable_order() return type to long
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
index cb2da24..95d7192 100644 (file)
 #define DEBUG_SUBSYSTEM S_CLASS
 
 #include <libcfs/libcfs.h>
-
-#ifdef __KERNEL__
-# include <linux/module.h>
-#endif
-
-/* hash_long() */
-#include <libcfs/libcfs_hash.h>
+#include <linux/module.h>
+#include <libcfs/libcfs_hash.h> /* hash_long() */
 #include <obd_class.h>
 #include <obd_support.h>
 #include <lustre_disk.h>
 #include <lu_ref.h>
 #include <libcfs/list.h>
 
+enum {
+       LU_CACHE_PERCENT_MAX     = 50,
+       LU_CACHE_PERCENT_DEFAULT = 20
+};
+
+#define        LU_CACHE_NR_MAX_ADJUST          128
+#define        LU_CACHE_NR_UNLIMITED           -1
+#define        LU_CACHE_NR_DEFAULT             LU_CACHE_NR_UNLIMITED
+#define        LU_CACHE_NR_LDISKFS_LIMIT       LU_CACHE_NR_UNLIMITED
+/** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */
+#define        LU_CACHE_NR_ZFS_LIMIT           10240
+
+#define LU_SITE_BITS_MIN    12
+#define LU_SITE_BITS_MAX    24
+/**
+ * total 256 buckets, we don't want too many buckets because:
+ * - consume too much memory
+ * - avoid unbalanced LRU list
+ */
+#define LU_SITE_BKT_BITS    8
+
+
+static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
+               "Percentage of memory to be used as lu_object cache");
+
+static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
+CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644,
+               "Maximum number of objects in lu_object cache");
+
 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
 
 /**
@@ -89,10 +114,10 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
        if (fid_is_zero(fid)) {
                LASSERT(top->loh_hash.next == NULL
                        && top->loh_hash.pprev == NULL);
-               LASSERT(cfs_list_empty(&top->loh_lru));
-               if (!cfs_atomic_dec_and_test(&top->loh_ref))
+               LASSERT(list_empty(&top->loh_lru));
+               if (!atomic_dec_and_test(&top->loh_ref))
                        return;
-               cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+               list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
                        if (o->lo_ops->loo_object_release != NULL)
                                o->lo_ops->loo_object_release(env, o);
                }
@@ -103,17 +128,17 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
 
-        if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
-                if (lu_object_is_dying(top)) {
+       if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
+               if (lu_object_is_dying(top)) {
 
-                        /*
-                         * somebody may be waiting for this, currently only
-                         * used for cl_object, see cl_object_put_last().
-                         */
-                        cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
-                }
-                return;
-        }
+                       /*
+                        * somebody may be waiting for this, currently only
+                        * used for cl_object, see cl_object_put_last().
+                        */
+                       wake_up_all(&bkt->lsb_marche_funebre);
+               }
+               return;
+       }
 
         LASSERT(bkt->lsb_busy > 0);
         bkt->lsb_busy--;
@@ -121,14 +146,14 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
          * When last reference is released, iterate over object
          * layers, and notify them that object is no longer busy.
          */
-        cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+       list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
                 if (o->lo_ops->loo_object_release != NULL)
                         o->lo_ops->loo_object_release(env, o);
         }
 
         if (!lu_object_is_dying(top)) {
-                LASSERT(cfs_list_empty(&top->loh_lru));
-                cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
+               LASSERT(list_empty(&top->loh_lru));
+               list_add_tail(&top->loh_lru, &bkt->lsb_lru);
                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
                 return;
         }
@@ -181,7 +206,7 @@ void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
                cfs_hash_bd_t bd;
 
                cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
-               cfs_list_del_init(&top->loh_lru);
+               list_del_init(&top->loh_lru);
                cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
                cfs_hash_bd_unlock(obj_hash, &bd, 1);
        }
@@ -201,7 +226,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
 {
        struct lu_object *scan;
        struct lu_object *top;
-       cfs_list_t *layers;
+       struct list_head *layers;
        unsigned int init_mask = 0;
        unsigned int init_flag;
        int clean;
@@ -231,7 +256,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
                 */
                clean = 1;
                init_flag = 1;
-               cfs_list_for_each_entry(scan, layers, lo_linkage) {
+               list_for_each_entry(scan, layers, lo_linkage) {
                        if (init_mask & init_flag)
                                goto next;
                        clean = 0;
@@ -247,7 +272,7 @@ next:
                }
        } while (!clean);
 
-        cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+       list_for_each_entry_reverse(scan, layers, lo_linkage) {
                 if (scan->lo_ops->loo_object_start != NULL) {
                         result = scan->lo_ops->loo_object_start(env, scan);
                         if (result != 0) {
@@ -266,11 +291,11 @@ next:
  */
 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
 {
-        struct lu_site_bkt_data *bkt;
-        struct lu_site          *site;
-        struct lu_object        *scan;
-        cfs_list_t              *layers;
-        cfs_list_t               splice;
+       struct lu_site_bkt_data *bkt;
+       struct lu_site          *site;
+       struct lu_object        *scan;
+       struct list_head        *layers;
+       struct list_head         splice;
 
         site   = o->lo_dev->ld_site;
         layers = &o->lo_header->loh_layers;
@@ -278,7 +303,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
         /*
          * First call ->loo_object_delete() method to release all resources.
          */
-        cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
+       list_for_each_entry_reverse(scan, layers, lo_linkage) {
                 if (scan->lo_ops->loo_object_delete != NULL)
                         scan->lo_ops->loo_object_delete(env, scan);
         }
@@ -289,22 +314,22 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
          * necessary, because lu_object_header is freed together with the
          * top-level slice.
          */
-        CFS_INIT_LIST_HEAD(&splice);
-        cfs_list_splice_init(layers, &splice);
-        while (!cfs_list_empty(&splice)) {
-                /*
-                 * Free layers in bottom-to-top order, so that object header
-                 * lives as long as possible and ->loo_object_free() methods
-                 * can look at its contents.
-                 */
-                o = container_of0(splice.prev, struct lu_object, lo_linkage);
-                cfs_list_del_init(&o->lo_linkage);
-                LASSERT(o->lo_ops->loo_object_free != NULL);
-                o->lo_ops->loo_object_free(env, o);
-        }
+       INIT_LIST_HEAD(&splice);
+       list_splice_init(layers, &splice);
+       while (!list_empty(&splice)) {
+               /*
+                * Free layers in bottom-to-top order, so that object header
+                * lives as long as possible and ->loo_object_free() methods
+                * can look at its contents.
+                */
+               o = container_of0(splice.prev, struct lu_object, lo_linkage);
+               list_del_init(&o->lo_linkage);
+               LASSERT(o->lo_ops->loo_object_free != NULL);
+               o->lo_ops->loo_object_free(env, o);
+       }
 
-        if (cfs_waitq_active(&bkt->lsb_marche_funebre))
-                cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
+       if (waitqueue_active(&bkt->lsb_marche_funebre))
+               wake_up_all(&bkt->lsb_marche_funebre);
 }
 
 /**
@@ -317,24 +342,29 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
         struct lu_site_bkt_data *bkt;
         cfs_hash_bd_t            bd;
         cfs_hash_bd_t            bd2;
-        cfs_list_t               dispose;
-        int                      did_sth;
-        int                      start;
+       struct list_head         dispose;
+       int                      did_sth;
+       unsigned int             start;
         int                      count;
         int                      bnr;
-        int                      i;
+       unsigned int             i;
 
        if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
                RETURN(0);
 
-        CFS_INIT_LIST_HEAD(&dispose);
+       INIT_LIST_HEAD(&dispose);
         /*
          * Under LRU list lock, scan LRU list and move unreferenced objects to
          * the dispose list, removing them from LRU and hash table.
          */
         start = s->ls_purge_start;
-        bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
+       bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
  again:
+       /*
+        * It doesn't make any sense to make purge threads parallel, that can
+        * only bring troubles to us. See LU-5331.
+        */
+       mutex_lock(&s->ls_purge_mutex);
         did_sth = 0;
         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
                 if (i < start)
@@ -343,15 +373,15 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
 
-                cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
-                        LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
+               list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
+                       LASSERT(atomic_read(&h->loh_ref) == 0);
 
                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
 
                         cfs_hash_bd_del_locked(s->ls_obj_hash,
                                                &bd2, &h->loh_hash);
-                        cfs_list_move(&h->loh_lru, &dispose);
+                       list_move(&h->loh_lru, &dispose);
                         if (did_sth == 0)
                                 did_sth = 1;
 
@@ -361,24 +391,25 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
                         if (count > 0 && --count == 0)
                                 break;
 
-                }
-                cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
-                cfs_cond_resched();
-                /*
-                 * Free everything on the dispose list. This is safe against
-                 * races due to the reasons described in lu_object_put().
-                 */
-                while (!cfs_list_empty(&dispose)) {
-                        h = container_of0(dispose.next,
-                                          struct lu_object_header, loh_lru);
-                        cfs_list_del_init(&h->loh_lru);
-                        lu_object_free(env, lu_object_top(h));
-                        lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
-                }
+               }
+               cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
+               cond_resched();
+               /*
+                * Free everything on the dispose list. This is safe against
+                * races due to the reasons described in lu_object_put().
+                */
+               while (!list_empty(&dispose)) {
+                       h = container_of0(dispose.next,
+                                         struct lu_object_header, loh_lru);
+                       list_del_init(&h->loh_lru);
+                       lu_object_free(env, lu_object_top(h));
+                       lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
+               }
 
                 if (nr == 0)
                         break;
         }
+       mutex_unlock(&s->ls_purge_mutex);
 
         if (nr != 0 && did_sth && start != 0) {
                 start = 0; /* restart from the first bucket */
@@ -462,8 +493,8 @@ int lu_cdebug_printer(const struct lu_env *env,
         vsnprintf(key->lck_area + used,
                   ARRAY_SIZE(key->lck_area) - used, format, args);
         if (complete) {
-                if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
-                        libcfs_debug_msg(msgdata, "%s", key->lck_area);
+               if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
+                       libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
                 key->lck_area[0] = 0;
         }
         va_end(args);
@@ -478,13 +509,13 @@ void lu_object_header_print(const struct lu_env *env, void *cookie,
                             lu_printer_t printer,
                             const struct lu_object_header *hdr)
 {
-        (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
-                   hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
-                   PFID(&hdr->loh_fid),
-                   cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
-                   cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
-                   "" : " lru",
-                   hdr->loh_attr & LOHA_EXISTS ? " exist":"");
+       (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
+                  hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
+                  PFID(&hdr->loh_fid),
+                  hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+                  list_empty((struct list_head *)&hdr->loh_lru) ? \
+                  "" : " lru",
+                  hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
 }
 EXPORT_SYMBOL(lu_object_header_print);
 
@@ -502,7 +533,7 @@ void lu_object_print(const struct lu_env *env, void *cookie,
        lu_object_header_print(env, cookie, printer, top);
        (*printer)(env, cookie, "{\n");
 
-       cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+       list_for_each_entry(o, &top->loh_layers, lo_linkage) {
                /*
                 * print `.' \a depth times followed by type name and address
                 */
@@ -527,7 +558,7 @@ int lu_object_invariant(const struct lu_object *o)
         struct lu_object_header *top;
 
         top = o->lo_header;
-        cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+       list_for_each_entry(o, &top->loh_layers, lo_linkage) {
                 if (o->lo_ops->loo_object_invariant != NULL &&
                     !o->lo_ops->loo_object_invariant(o))
                         return 0;
@@ -537,15 +568,15 @@ int lu_object_invariant(const struct lu_object *o)
 EXPORT_SYMBOL(lu_object_invariant);
 
 static struct lu_object *htable_lookup(struct lu_site *s,
-                                       cfs_hash_bd_t *bd,
-                                       const struct lu_fid *f,
-                                       cfs_waitlink_t *waiter,
-                                       __u64 *version)
+                                      cfs_hash_bd_t *bd,
+                                      const struct lu_fid *f,
+                                      wait_queue_t *waiter,
+                                      __u64 *version)
 {
-        struct lu_site_bkt_data *bkt;
-        struct lu_object_header *h;
-        cfs_hlist_node_t        *hnode;
-        __u64  ver = cfs_hash_bd_version_get(bd);
+       struct lu_site_bkt_data *bkt;
+       struct lu_object_header *h;
+       struct hlist_node       *hnode;
+       __u64  ver = cfs_hash_bd_version_get(bd);
 
         if (*version == ver)
                return ERR_PTR(-ENOENT);
@@ -564,7 +595,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
         if (likely(!lu_object_is_dying(h))) {
                cfs_hash_get(s->ls_obj_hash, hnode);
                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
-                cfs_list_del_init(&h->loh_lru);
+               list_del_init(&h->loh_lru);
                 return lu_object_top(h);
         }
 
@@ -574,18 +605,21 @@ static struct lu_object *htable_lookup(struct lu_site *s,
          * drained), and moreover, lookup has to wait until object is freed.
          */
 
-        cfs_waitlink_init(waiter);
-        cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
-        cfs_set_current_state(CFS_TASK_UNINT);
-        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
-        return ERR_PTR(-EAGAIN);
+       if (likely(waiter != NULL)) {
+               init_waitqueue_entry_current(waiter);
+               add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
+       }
+
+       return ERR_PTR(-EAGAIN);
 }
 
 static struct lu_object *htable_lookup_nowait(struct lu_site *s,
                                              cfs_hash_bd_t *bd,
                                              const struct lu_fid *f)
 {
-       cfs_hlist_node_t        *hnode;
+       struct hlist_node       *hnode;
        struct lu_object_header *h;
 
        /* cfs_hash_bd_peek_locked is a somehow "internal" function
@@ -602,7 +636,7 @@ static struct lu_object *htable_lookup_nowait(struct lu_site *s,
 
        cfs_hash_get(s->ls_obj_hash, hnode);
        lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
-       cfs_list_del_init(&h->loh_lru);
+       list_del_init(&h->loh_lru);
        return lu_object_top(h);
 }
 
@@ -619,6 +653,30 @@ struct lu_object *lu_object_find(const struct lu_env *env,
 }
 EXPORT_SYMBOL(lu_object_find);
 
+/*
+ * Limit the lu_object cache to a maximum of lu_cache_nr objects.  Because
+ * the calculation for the number of objects to reclaim is not covered by
+ * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
+ * This ensures that many concurrent threads will not accidentally purge
+ * the entire cache.
+ */
+static void lu_object_limit(const struct lu_env *env,
+                           struct lu_device *dev)
+{
+       __u64 size, nr;
+
+       if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
+               return;
+
+       size = cfs_hash_size_get(dev->ld_site->ls_obj_hash);
+       nr = (__u64)lu_cache_nr;
+       if (size > nr)
+               lu_site_purge(env, dev->ld_site,
+                             MIN(size - nr, LU_CACHE_NR_MAX_ADJUST));
+
+       return;
+}
+
 static struct lu_object *lu_object_new(const struct lu_env *env,
                                        struct lu_device *dev,
                                        const struct lu_fid *f,
@@ -639,6 +697,9 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
         bkt->lsb_busy++;
         cfs_hash_bd_unlock(hs, &bd, 1);
+
+       lu_object_limit(env, dev);
+
         return o;
 }
 
@@ -646,17 +707,17 @@ static struct lu_object *lu_object_new(const struct lu_env *env,
  * Core logic of lu_object_find*() functions.
  */
 static struct lu_object *lu_object_find_try(const struct lu_env *env,
-                                            struct lu_device *dev,
-                                            const struct lu_fid *f,
-                                            const struct lu_object_conf *conf,
-                                            cfs_waitlink_t *waiter)
-{
-        struct lu_object      *o;
-        struct lu_object      *shadow;
-        struct lu_site        *s;
-        cfs_hash_t            *hs;
-        cfs_hash_bd_t          bd;
-        __u64                  version = 0;
+                                           struct lu_device *dev,
+                                           const struct lu_fid *f,
+                                           const struct lu_object_conf *conf,
+                                           wait_queue_t *waiter)
+{
+       struct lu_object      *o;
+       struct lu_object      *shadow;
+       struct lu_site        *s;
+       cfs_hash_t            *hs;
+       cfs_hash_bd_t          bd;
+       __u64                  version = 0;
 
         /*
          * This uses standard index maintenance protocol:
@@ -709,6 +770,9 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
                 bkt->lsb_busy++;
                 cfs_hash_bd_unlock(hs, &bd, 1);
+
+               lu_object_limit(env, dev);
+
                 return o;
         }
 
@@ -724,26 +788,32 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
  * objects of different "stacking" to be created within the same site.
  */
 struct lu_object *lu_object_find_at(const struct lu_env *env,
-                                    struct lu_device *dev,
-                                    const struct lu_fid *f,
-                                    const struct lu_object_conf *conf)
+                                   struct lu_device *dev,
+                                   const struct lu_fid *f,
+                                   const struct lu_object_conf *conf)
 {
-        struct lu_site_bkt_data *bkt;
-        struct lu_object        *obj;
-        cfs_waitlink_t           wait;
+       struct lu_site_bkt_data *bkt;
+       struct lu_object        *obj;
+       wait_queue_t           wait;
 
-        while (1) {
-                obj = lu_object_find_try(env, dev, f, conf, &wait);
-                if (obj != ERR_PTR(-EAGAIN))
-                        return obj;
-                /*
-                 * lu_object_find_try() already added waiter into the
-                 * wait queue.
-                 */
-                cfs_waitq_wait(&wait, CFS_TASK_UNINT);
-                bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
-                cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
-        }
+       while (1) {
+               if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT) {
+                       obj = lu_object_find_try(env, dev, f, conf, NULL);
+
+                       return obj;
+               }
+
+               obj = lu_object_find_try(env, dev, f, conf, &wait);
+               if (obj != ERR_PTR(-EAGAIN))
+                       return obj;
+               /*
+                * lu_object_find_try() already added waiter into the
+                * wait queue.
+                */
+               waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+               bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
+               remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+       }
 }
 EXPORT_SYMBOL(lu_object_find_at);
 
@@ -796,44 +866,41 @@ EXPORT_SYMBOL(lu_object_find_slice);
 /**
  * Global list of all device types.
  */
-static CFS_LIST_HEAD(lu_device_types);
+static struct list_head lu_device_types;
 
 int lu_device_type_init(struct lu_device_type *ldt)
 {
        int result = 0;
 
-       CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
+       atomic_set(&ldt->ldt_device_nr, 0);
+       INIT_LIST_HEAD(&ldt->ldt_linkage);
        if (ldt->ldt_ops->ldto_init)
                result = ldt->ldt_ops->ldto_init(ldt);
-       if (result == 0)
-               cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
+
+       if (result == 0) {
+               spin_lock(&obd_types_lock);
+               list_add(&ldt->ldt_linkage, &lu_device_types);
+               spin_unlock(&obd_types_lock);
+       }
+
        return result;
 }
 EXPORT_SYMBOL(lu_device_type_init);
 
 void lu_device_type_fini(struct lu_device_type *ldt)
 {
-       cfs_list_del_init(&ldt->ldt_linkage);
+       spin_lock(&obd_types_lock);
+       list_del_init(&ldt->ldt_linkage);
+       spin_unlock(&obd_types_lock);
        if (ldt->ldt_ops->ldto_fini)
                ldt->ldt_ops->ldto_fini(ldt);
 }
 EXPORT_SYMBOL(lu_device_type_fini);
 
-void lu_types_stop(void)
-{
-        struct lu_device_type *ldt;
-
-       cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
-               if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
-                       ldt->ldt_ops->ldto_stop(ldt);
-       }
-}
-EXPORT_SYMBOL(lu_types_stop);
-
 /**
  * Global list of all sites on this node
  */
-static CFS_LIST_HEAD(lu_sites);
+static struct list_head lu_sites;
 static DEFINE_MUTEX(lu_sites_guard);
 
 /**
@@ -849,23 +916,23 @@ struct lu_site_print_arg {
 
 static int
 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                  cfs_hlist_node_t *hnode, void *data)
+                 struct hlist_node *hnode, void *data)
 {
-        struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
-        struct lu_object_header  *h;
+       struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
+       struct lu_object_header  *h;
 
-        h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        if (!cfs_list_empty(&h->loh_layers)) {
-                const struct lu_object *o;
+       h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+       if (!list_empty(&h->loh_layers)) {
+               const struct lu_object *o;
 
-                o = lu_object_top(h);
-                lu_object_print(arg->lsp_env, arg->lsp_cookie,
-                                arg->lsp_printer, o);
-        } else {
-                lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
-                                       arg->lsp_printer, h);
-        }
-        return 0;
+               o = lu_object_top(h);
+               lu_object_print(arg->lsp_env, arg->lsp_cookie,
+                               arg->lsp_printer, o);
+       } else {
+               lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
+                                      arg->lsp_printer, h);
+       }
+       return 0;
 }
 
 /**
@@ -884,22 +951,25 @@ void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
 }
 EXPORT_SYMBOL(lu_site_print);
 
-enum {
-        LU_CACHE_PERCENT_MAX     = 50,
-        LU_CACHE_PERCENT_DEFAULT = 20
-};
-
-static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
-CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
-                "Percentage of memory to be used as lu_object cache");
-
 /**
  * Return desired hash table order.
  */
-static int lu_htable_order(void)
+static unsigned long lu_htable_order(struct lu_device *top)
 {
-        unsigned long cache_size;
-        int bits;
+       unsigned long cache_size;
+       unsigned long bits;
+
+       /*
+        * For ZFS based OSDs the cache should be disabled by default.  This
+        * allows the ZFS ARC maximum flexibility in determining what buffers
+        * to cache.  If Lustre has objects or buffer which it wants to ensure
+        * always stay cached it must maintain a hold on them.
+        */
+       if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) {
+               lu_cache_percent = 1;
+               lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT;
+               return LU_SITE_BITS_MIN;
+       }
 
         /*
          * Calculate hash table size, assuming that we want reasonable
@@ -908,7 +978,7 @@ static int lu_htable_order(void)
          *
          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
          */
-       cache_size = num_physpages;
+       cache_size = totalram_pages;
 
 #if BITS_PER_LONG == 32
         /* limit hashtable size for lowmem systems to low RAM */
@@ -935,61 +1005,61 @@ static int lu_htable_order(void)
 }
 
 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
-                                const void *key, unsigned mask)
+                               const void *key, unsigned mask)
 {
-        struct lu_fid  *fid = (struct lu_fid *)key;
-        __u32           hash;
+       struct lu_fid  *fid = (struct lu_fid *)key;
+       __u32           hash;
 
-        hash = fid_flatten32(fid);
-        hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
-        hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+       hash = fid_flatten32(fid);
+       hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+       hash = hash_long(hash, hs->hs_bkt_bits);
 
-        /* give me another random factor */
-        hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+       /* give me another random factor */
+       hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
 
-        hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
-        hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
+       hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+       hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
 
-        return hash & mask;
+       return hash & mask;
 }
 
-static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_object(struct hlist_node *hnode)
 {
-        return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
+       return hlist_entry(hnode, struct lu_object_header, loh_hash);
 }
 
-static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
+static void *lu_obj_hop_key(struct hlist_node *hnode)
 {
-        struct lu_object_header *h;
+       struct lu_object_header *h;
 
-        h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        return &h->loh_fid;
+       h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+       return &h->loh_fid;
 }
 
-static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
 {
-        struct lu_object_header *h;
+       struct lu_object_header *h;
 
-        h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
+       h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+       return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 }
 
-static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
 {
-        struct lu_object_header *h;
+       struct lu_object_header *h;
 
-        h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
-        if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
-                struct lu_site_bkt_data *bkt;
-                cfs_hash_bd_t            bd;
+       h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+       if (atomic_add_return(1, &h->loh_ref) == 1) {
+               struct lu_site_bkt_data *bkt;
+               cfs_hash_bd_t            bd;
 
-                cfs_hash_bd_get(hs, &h->loh_fid, &bd);
-                bkt = cfs_hash_bd_extra_get(hs, &bd);
-                bkt->lsb_busy++;
-        }
+               cfs_hash_bd_get(hs, &h->loh_fid, &bd);
+               bkt = cfs_hash_bd_extra_get(hs, &bd);
+               bkt->lsb_busy++;
+       }
 }
 
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
 {
         LBUG(); /* we should never called it */
 }
@@ -1006,8 +1076,8 @@ cfs_hash_ops_t lu_site_hash_ops = {
 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
 {
        spin_lock(&s->ls_ld_lock);
-       if (cfs_list_empty(&d->ld_linkage))
-               cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
+       if (list_empty(&d->ld_linkage))
+               list_add(&d->ld_linkage, &s->ls_ld_linkage);
        spin_unlock(&s->ls_ld_lock);
 }
 EXPORT_SYMBOL(lu_dev_add_linkage);
@@ -1015,59 +1085,53 @@ EXPORT_SYMBOL(lu_dev_add_linkage);
 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
 {
        spin_lock(&s->ls_ld_lock);
-       cfs_list_del_init(&d->ld_linkage);
+       list_del_init(&d->ld_linkage);
        spin_unlock(&s->ls_ld_lock);
 }
 EXPORT_SYMBOL(lu_dev_del_linkage);
 
 /**
- * Initialize site \a s, with \a d as the top level device.
- */
-#define LU_SITE_BITS_MIN    12
-#define LU_SITE_BITS_MAX    24
-/**
- * total 256 buckets, we don't want too many buckets because:
- * - consume too much memory
- * - avoid unbalanced LRU list
- */
-#define LU_SITE_BKT_BITS    8
-
+  * Initialize site \a s, with \a d as the top level device.
+  */
 int lu_site_init(struct lu_site *s, struct lu_device *top)
 {
-        struct lu_site_bkt_data *bkt;
-        cfs_hash_bd_t bd;
-        char name[16];
-        int bits;
-        int i;
-        ENTRY;
-
-        memset(s, 0, sizeof *s);
-        bits = lu_htable_order();
-        snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
-        for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
-             bits >= LU_SITE_BITS_MIN; bits--) {
-                s->ls_obj_hash = cfs_hash_create(name, bits, bits,
-                                                 bits - LU_SITE_BKT_BITS,
-                                                 sizeof(*bkt), 0, 0,
-                                                 &lu_site_hash_ops,
-                                                 CFS_HASH_SPIN_BKTLOCK |
-                                                 CFS_HASH_NO_ITEMREF |
-                                                 CFS_HASH_DEPTH |
-                                                 CFS_HASH_ASSERT_EMPTY);
-                if (s->ls_obj_hash != NULL)
-                        break;
-        }
+       struct lu_site_bkt_data *bkt;
+       cfs_hash_bd_t bd;
+       char name[16];
+       unsigned long bits;
+       unsigned int i;
+       ENTRY;
 
-        if (s->ls_obj_hash == NULL) {
-                CERROR("failed to create lu_site hash with bits: %d\n", bits);
-                return -ENOMEM;
-        }
+       memset(s, 0, sizeof *s);
+       mutex_init(&s->ls_purge_mutex);
+       bits = lu_htable_order(top);
+       snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
+       for (bits = clamp_t(typeof(bits), bits,
+                           LU_SITE_BITS_MIN, LU_SITE_BITS_MAX);
+            bits >= LU_SITE_BITS_MIN; bits--) {
+               s->ls_obj_hash = cfs_hash_create(name, bits, bits,
+                                                bits - LU_SITE_BKT_BITS,
+                                                sizeof(*bkt), 0, 0,
+                                                &lu_site_hash_ops,
+                                                CFS_HASH_SPIN_BKTLOCK |
+                                                CFS_HASH_NO_ITEMREF |
+                                                CFS_HASH_DEPTH |
+                                                CFS_HASH_ASSERT_EMPTY |
+                                                CFS_HASH_COUNTER);
+               if (s->ls_obj_hash != NULL)
+                       break;
+       }
 
-        cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
-                bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
-                CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
-                cfs_waitq_init(&bkt->lsb_marche_funebre);
-        }
+       if (s->ls_obj_hash == NULL) {
+               CERROR("failed to create lu_site hash with bits: %lu\n", bits);
+               return -ENOMEM;
+       }
+
+       cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+               bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+               INIT_LIST_HEAD(&bkt->lsb_lru);
+               init_waitqueue_head(&bkt->lsb_marche_funebre);
+       }
 
         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
         if (s->ls_stats == NULL) {
@@ -1089,13 +1153,13 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
                              0, "lru_purged", "lru_purged");
 
-        CFS_INIT_LIST_HEAD(&s->ls_linkage);
+       INIT_LIST_HEAD(&s->ls_linkage);
         s->ls_top_dev = top;
         top->ld_site = s;
         lu_device_get(top);
         lu_ref_add(&top->ld_reference, "site-top", s);
 
-        CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
+       INIT_LIST_HEAD(&s->ls_ld_linkage);
        spin_lock_init(&s->ls_ld_lock);
 
        lu_dev_add_linkage(s, top);
@@ -1110,7 +1174,7 @@ EXPORT_SYMBOL(lu_site_init);
 void lu_site_fini(struct lu_site *s)
 {
        mutex_lock(&lu_sites_guard);
-        cfs_list_del_init(&s->ls_linkage);
+       list_del_init(&s->ls_linkage);
        mutex_unlock(&lu_sites_guard);
 
         if (s->ls_obj_hash != NULL) {
@@ -1139,7 +1203,7 @@ int lu_site_init_finish(struct lu_site *s)
        mutex_lock(&lu_sites_guard);
         result = lu_context_refill(&lu_shrink_env.le_ctx);
         if (result == 0)
-                cfs_list_add(&s->ls_linkage, &lu_sites);
+               list_add(&s->ls_linkage, &lu_sites);
        mutex_unlock(&lu_sites_guard);
         return result;
 }
@@ -1150,7 +1214,7 @@ EXPORT_SYMBOL(lu_site_init_finish);
  */
 void lu_device_get(struct lu_device *d)
 {
-        cfs_atomic_inc(&d->ld_ref);
+       atomic_inc(&d->ld_ref);
 }
 EXPORT_SYMBOL(lu_device_get);
 
@@ -1159,8 +1223,8 @@ EXPORT_SYMBOL(lu_device_get);
  */
 void lu_device_put(struct lu_device *d)
 {
-        LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
-        cfs_atomic_dec(&d->ld_ref);
+       LASSERT(atomic_read(&d->ld_ref) > 0);
+       atomic_dec(&d->ld_ref);
 }
 EXPORT_SYMBOL(lu_device_put);
 
@@ -1169,14 +1233,16 @@ EXPORT_SYMBOL(lu_device_put);
  */
 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
 {
-        if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
-                t->ldt_ops->ldto_start(t);
-        memset(d, 0, sizeof *d);
-        cfs_atomic_set(&d->ld_ref, 0);
-        d->ld_type = t;
-        lu_ref_init(&d->ld_reference);
-        CFS_INIT_LIST_HEAD(&d->ld_linkage);
-        return 0;
+       if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
+           t->ldt_ops->ldto_start != NULL)
+               t->ldt_ops->ldto_start(t);
+
+       memset(d, 0, sizeof *d);
+       d->ld_type = t;
+       lu_ref_init(&d->ld_reference);
+       INIT_LIST_HEAD(&d->ld_linkage);
+
+       return 0;
 }
 EXPORT_SYMBOL(lu_device_init);
 
@@ -1185,20 +1251,21 @@ EXPORT_SYMBOL(lu_device_init);
  */
 void lu_device_fini(struct lu_device *d)
 {
-        struct lu_device_type *t;
+       struct lu_device_type *t = d->ld_type;
 
-        t = d->ld_type;
-        if (d->ld_obd != NULL) {
-                d->ld_obd->obd_lu_dev = NULL;
-                d->ld_obd = NULL;
-        }
+       if (d->ld_obd != NULL) {
+               d->ld_obd->obd_lu_dev = NULL;
+               d->ld_obd = NULL;
+       }
+
+       lu_ref_fini(&d->ld_reference);
+       LASSERTF(atomic_read(&d->ld_ref) == 0,
+                "Refcount is %u\n", atomic_read(&d->ld_ref));
+       LASSERT(atomic_read(&t->ldt_device_nr) > 0);
 
-        lu_ref_fini(&d->ld_reference);
-        LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
-                 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
-        LASSERT(t->ldt_device_nr > 0);
-        if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
-                t->ldt_ops->ldto_stop(t);
+       if (atomic_dec_and_test(&t->ldt_device_nr) &&
+           t->ldt_ops->ldto_stop != NULL)
+               t->ldt_ops->ldto_stop(t);
 }
 EXPORT_SYMBOL(lu_device_fini);
 
@@ -1214,7 +1281,7 @@ int lu_object_init(struct lu_object *o, struct lu_object_header *h,
        o->lo_dev = d;
        lu_device_get(d);
        lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
-       CFS_INIT_LIST_HEAD(&o->lo_linkage);
+       INIT_LIST_HEAD(&o->lo_linkage);
 
        return 0;
 }
@@ -1227,7 +1294,7 @@ void lu_object_fini(struct lu_object *o)
 {
        struct lu_device *dev = o->lo_dev;
 
-       LASSERT(cfs_list_empty(&o->lo_linkage));
+       LASSERT(list_empty(&o->lo_linkage));
 
        if (dev != NULL) {
                lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
@@ -1246,7 +1313,7 @@ EXPORT_SYMBOL(lu_object_fini);
  */
 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
 {
-        cfs_list_move(&o->lo_linkage, &h->loh_layers);
+       list_move(&o->lo_linkage, &h->loh_layers);
 }
 EXPORT_SYMBOL(lu_object_add_top);
 
@@ -1258,7 +1325,7 @@ EXPORT_SYMBOL(lu_object_add_top);
  */
 void lu_object_add(struct lu_object *before, struct lu_object *o)
 {
-        cfs_list_move(&o->lo_linkage, &before->lo_linkage);
+       list_move(&o->lo_linkage, &before->lo_linkage);
 }
 EXPORT_SYMBOL(lu_object_add);
 
@@ -1268,10 +1335,10 @@ EXPORT_SYMBOL(lu_object_add);
 int lu_object_header_init(struct lu_object_header *h)
 {
         memset(h, 0, sizeof *h);
-        cfs_atomic_set(&h->loh_ref, 1);
-        CFS_INIT_HLIST_NODE(&h->loh_hash);
-        CFS_INIT_LIST_HEAD(&h->loh_lru);
-        CFS_INIT_LIST_HEAD(&h->loh_layers);
+       atomic_set(&h->loh_ref, 1);
+       INIT_HLIST_NODE(&h->loh_hash);
+       INIT_LIST_HEAD(&h->loh_lru);
+       INIT_LIST_HEAD(&h->loh_layers);
         lu_ref_init(&h->loh_reference);
         return 0;
 }
@@ -1282,9 +1349,9 @@ EXPORT_SYMBOL(lu_object_header_init);
  */
 void lu_object_header_fini(struct lu_object_header *h)
 {
-        LASSERT(cfs_list_empty(&h->loh_layers));
-        LASSERT(cfs_list_empty(&h->loh_lru));
-        LASSERT(cfs_hlist_unhashed(&h->loh_hash));
+       LASSERT(list_empty(&h->loh_layers));
+       LASSERT(list_empty(&h->loh_lru));
+       LASSERT(hlist_unhashed(&h->loh_hash));
         lu_ref_fini(&h->loh_reference);
 }
 EXPORT_SYMBOL(lu_object_header_fini);
@@ -1296,18 +1363,16 @@ EXPORT_SYMBOL(lu_object_header_fini);
 struct lu_object *lu_object_locate(struct lu_object_header *h,
                                    const struct lu_device_type *dtype)
 {
-        struct lu_object *o;
+       struct lu_object *o;
 
-        cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
-                if (o->lo_dev->ld_type == dtype)
-                        return o;
-        }
-        return NULL;
+       list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+               if (o->lo_dev->ld_type == dtype)
+                       return o;
+       }
+       return NULL;
 }
 EXPORT_SYMBOL(lu_object_locate);
 
-
-
 /**
  * Finalize and free devices in the device stack.
  *
@@ -1369,8 +1434,8 @@ static unsigned key_set_version = 0;
  */
 int lu_context_key_register(struct lu_context_key *key)
 {
-        int result;
-        int i;
+       int result;
+       unsigned int i;
 
         LASSERT(key->lct_init != NULL);
         LASSERT(key->lct_fini != NULL);
@@ -1382,7 +1447,7 @@ int lu_context_key_register(struct lu_context_key *key)
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 if (lu_keys[i] == NULL) {
                         key->lct_index = i;
-                        cfs_atomic_set(&key->lct_used, 1);
+                       atomic_set(&key->lct_used, 1);
                         lu_keys[i] = key;
                         lu_ref_init(&key->lct_reference);
                         result = 0;
@@ -1403,16 +1468,16 @@ static void key_fini(struct lu_context *ctx, int index)
                 key = lu_keys[index];
                 LASSERT(key != NULL);
                 LASSERT(key->lct_fini != NULL);
-                LASSERT(cfs_atomic_read(&key->lct_used) > 1);
+               LASSERT(atomic_read(&key->lct_used) > 1);
 
                 key->lct_fini(ctx, key, ctx->lc_value[index]);
                 lu_ref_del(&key->lct_reference, "ctx", ctx);
-                cfs_atomic_dec(&key->lct_used);
+               atomic_dec(&key->lct_used);
 
                LASSERT(key->lct_owner != NULL);
                if ((ctx->lc_tags & LCT_NOREF) == 0) {
-                       LINVRNT(cfs_module_refcount(key->lct_owner) > 0);
-                       cfs_module_put(key->lct_owner);
+                       LINVRNT(module_refcount(key->lct_owner) > 0);
+                       module_put(key->lct_owner);
                }
                ctx->lc_value[index] = NULL;
        }
@@ -1423,7 +1488,7 @@ static void key_fini(struct lu_context *ctx, int index)
  */
 void lu_context_key_degister(struct lu_context_key *key)
 {
-       LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
+       LASSERT(atomic_read(&key->lct_used) >= 1);
        LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
 
        lu_context_key_quiesce(key);
@@ -1437,9 +1502,9 @@ void lu_context_key_degister(struct lu_context_key *key)
        }
        spin_unlock(&lu_keys_guard);
 
-       LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+       LASSERTF(atomic_read(&key->lct_used) == 1,
                 "key has instances: %d\n",
-                cfs_atomic_read(&key->lct_used));
+                atomic_read(&key->lct_used));
 }
 EXPORT_SYMBOL(lu_context_key_degister);
 
@@ -1540,7 +1605,7 @@ EXPORT_SYMBOL(lu_context_key_get);
 /**
  * List of remembered contexts. XXX document me.
  */
-static CFS_LIST_HEAD(lu_context_remembered);
+static struct list_head lu_context_remembered;
 
 /**
  * Destroy \a key in all remembered contexts. This is used to destroy key
@@ -1550,18 +1615,20 @@ static CFS_LIST_HEAD(lu_context_remembered);
 void lu_context_key_quiesce(struct lu_context_key *key)
 {
         struct lu_context *ctx;
+        extern unsigned cl_env_cache_purge(unsigned nr);
 
         if (!(key->lct_tags & LCT_QUIESCENT)) {
                 /*
                  * XXX layering violation.
                  */
+                cl_env_cache_purge(~0);
                 key->lct_tags |= LCT_QUIESCENT;
                 /*
                  * XXX memory barrier has to go here.
                  */
                spin_lock(&lu_keys_guard);
-               cfs_list_for_each_entry(ctx, &lu_context_remembered,
-                                       lc_remember)
+               list_for_each_entry(ctx, &lu_context_remembered,
+                                   lc_remember)
                        key_fini(ctx, key->lct_index);
                spin_unlock(&lu_keys_guard);
                ++key_set_version;
@@ -1578,7 +1645,7 @@ EXPORT_SYMBOL(lu_context_key_revive);
 
 static void keys_fini(struct lu_context *ctx)
 {
-       int     i;
+       unsigned int i;
 
        if (ctx->lc_value == NULL)
                return;
@@ -1592,7 +1659,7 @@ static void keys_fini(struct lu_context *ctx)
 
 static int keys_fill(struct lu_context *ctx)
 {
-        int i;
+       unsigned int i;
 
         LINVRNT(ctx->lc_value != NULL);
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
@@ -1615,11 +1682,11 @@ static int keys_fill(struct lu_context *ctx)
                         if (unlikely(IS_ERR(value)))
                                 return PTR_ERR(value);
 
-                        LASSERT(key->lct_owner != NULL);
-                        if (!(ctx->lc_tags & LCT_NOREF))
-                                cfs_try_module_get(key->lct_owner);
-                        lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
-                        cfs_atomic_inc(&key->lct_used);
+                       LASSERT(key->lct_owner != NULL);
+                       if (!(ctx->lc_tags & LCT_NOREF))
+                               try_module_get(key->lct_owner);
+                       lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
+                       atomic_inc(&key->lct_used);
                         /*
                          * This is the only place in the code, where an
                          * element of ctx->lc_value[] array is set to non-NULL
@@ -1655,10 +1722,10 @@ int lu_context_init(struct lu_context *ctx, __u32 tags)
        ctx->lc_tags = tags;
        if (tags & LCT_REMEMBER) {
                spin_lock(&lu_keys_guard);
-               cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+               list_add(&ctx->lc_remember, &lu_context_remembered);
                spin_unlock(&lu_keys_guard);
        } else {
-               CFS_INIT_LIST_HEAD(&ctx->lc_remember);
+               INIT_LIST_HEAD(&ctx->lc_remember);
        }
 
        rc = keys_init(ctx);
@@ -1678,13 +1745,13 @@ void lu_context_fini(struct lu_context *ctx)
        ctx->lc_state = LCS_FINALIZED;
 
        if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
-               LASSERT(cfs_list_empty(&ctx->lc_remember));
+               LASSERT(list_empty(&ctx->lc_remember));
                keys_fini(ctx);
 
        } else { /* could race with key degister */
                spin_lock(&lu_keys_guard);
                keys_fini(ctx);
-               cfs_list_del_init(&ctx->lc_remember);
+               list_del_init(&ctx->lc_remember);
                spin_unlock(&lu_keys_guard);
        }
 }
@@ -1705,7 +1772,7 @@ EXPORT_SYMBOL(lu_context_enter);
  */
 void lu_context_exit(struct lu_context *ctx)
 {
-        int i;
+       unsigned int i;
 
         LINVRNT(ctx->lc_state == LCS_ENTERED);
         ctx->lc_state = LCS_LEFT;
@@ -1853,12 +1920,12 @@ typedef struct lu_site_stats{
 static void lu_site_stats_get(cfs_hash_t *hs,
                               lu_site_stats_t *stats, int populated)
 {
-        cfs_hash_bd_t bd;
-        int           i;
+       cfs_hash_bd_t bd;
+       unsigned int  i;
 
         cfs_hash_for_each_bucket(hs, &bd, i) {
                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
-                cfs_hlist_head_t        *hhead;
+               struct hlist_head       *hhead;
 
                 cfs_hash_bd_lock(hs, &bd, 1);
                 stats->lss_busy  += bkt->lsb_busy;
@@ -1871,15 +1938,76 @@ static void lu_site_stats_get(cfs_hash_t *hs,
                 }
 
                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
-                        if (!cfs_hlist_empty(hhead))
+                       if (!hlist_empty(hhead))
                                 stats->lss_populated++;
                 }
                 cfs_hash_bd_unlock(hs, &bd, 1);
         }
 }
 
-#ifdef __KERNEL__
 
+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
+                                          struct shrink_control *sc)
+{
+       lu_site_stats_t stats;
+       struct lu_site *s;
+       struct lu_site *tmp;
+       unsigned long cached = 0;
+
+       if (!(sc->gfp_mask & __GFP_FS))
+               return 0;
+
+       mutex_lock(&lu_sites_guard);
+       list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+               memset(&stats, 0, sizeof(stats));
+               lu_site_stats_get(s->ls_obj_hash, &stats, 0);
+               cached += stats.lss_total - stats.lss_busy;
+       }
+       mutex_unlock(&lu_sites_guard);
+
+       cached = (cached / 100) * sysctl_vfs_cache_pressure;
+       CDEBUG(D_INODE, "%ld objects cached\n", cached);
+       return cached;
+}
+
+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
+                                         struct shrink_control *sc)
+{
+       struct lu_site *s;
+       struct lu_site *tmp;
+       unsigned long remain = sc->nr_to_scan;
+       LIST_HEAD(splice);
+
+       if (!(sc->gfp_mask & __GFP_FS))
+               /* We must not take the lu_sites_guard lock when
+                * __GFP_FS is *not* set because of the deadlock
+                * possibility detailed above. Additionally,
+                * since we cannot determine the number of
+                * objects in the cache without taking this
+                * lock, we're in a particularly tough spot. As
+                * a result, we'll just lie and say our cache is
+                * empty. This _should_ be ok, as we can't
+                * reclaim objects when __GFP_FS is *not* set
+                * anyways.
+                */
+               return SHRINK_STOP;
+
+       mutex_lock(&lu_sites_guard);
+       list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+               remain = lu_site_purge(&lu_shrink_env, s, remain);
+               /*
+                * Move just shrunk site to the tail of site list to
+                * assure shrinking fairness.
+                */
+               list_move_tail(&s->ls_linkage, &splice);
+       }
+       list_splice(&splice, lu_sites.prev);
+       mutex_unlock(&lu_sites_guard);
+
+       return sc->nr_to_scan - remain;
+}
+
+#ifndef HAVE_SHRINKER_COUNT
 /*
  * There exists a potential lock inversion deadlock scenario when using
  * Lustre on top of ZFS. This occurs between one of ZFS's
@@ -1900,59 +2028,29 @@ static void lu_site_stats_get(cfs_hash_t *hs,
  */
 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
-        lu_site_stats_t stats;
-        struct lu_site *s;
-        struct lu_site *tmp;
         int cached = 0;
-        int remain = shrink_param(sc, nr_to_scan);
-        CFS_LIST_HEAD(splice);
-
-       if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
-               if (remain != 0)
-                        return -1;
-               else
-                       /* We must not take the lu_sites_guard lock when
-                        * __GFP_FS is *not* set because of the deadlock
-                        * possibility detailed above. Additionally,
-                        * since we cannot determine the number of
-                        * objects in the cache without taking this
-                        * lock, we're in a particularly tough spot. As
-                        * a result, we'll just lie and say our cache is
-                        * empty. This _should_ be ok, as we can't
-                        * reclaim objects when __GFP_FS is *not* set
-                        * anyways.
-                        */
-                       return 0;
-        }
+       struct shrink_control scv = {
+                .nr_to_scan = shrink_param(sc, nr_to_scan),
+                .gfp_mask   = shrink_param(sc, gfp_mask)
+       };
+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
+       struct shrinker* shrinker = NULL;
+#endif
 
-       CDEBUG(D_INODE, "Shrink %d objects\n", remain);
 
-       mutex_lock(&lu_sites_guard);
-        cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
-                if (shrink_param(sc, nr_to_scan) != 0) {
-                        remain = lu_site_purge(&lu_shrink_env, s, remain);
-                        /*
-                         * Move just shrunk site to the tail of site list to
-                         * assure shrinking fairness.
-                         */
-                        cfs_list_move_tail(&s->ls_linkage, &splice);
-                }
+       CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
 
-                memset(&stats, 0, sizeof(stats));
-                lu_site_stats_get(s->ls_obj_hash, &stats, 0);
-                cached += stats.lss_total - stats.lss_busy;
-                if (shrink_param(sc, nr_to_scan) && remain <= 0)
-                        break;
-        }
-        cfs_list_splice(&splice, lu_sites.prev);
-       mutex_unlock(&lu_sites_guard);
+       lu_cache_shrink_scan(shrinker, &scv);
 
-        cached = (cached / 100) * sysctl_vfs_cache_pressure;
-        if (shrink_param(sc, nr_to_scan) == 0)
-                CDEBUG(D_INODE, "%d objects cached\n", cached);
-        return cached;
+       cached = lu_cache_shrink_count(shrinker, &scv);
+       if (scv.nr_to_scan == 0)
+               CDEBUG(D_INODE, "%d objects cached\n", cached);
+       return cached;
 }
 
+#endif /* HAVE_SHRINKER_COUNT */
+
+
 /*
  * Debugging stuff.
  */
@@ -1983,7 +2081,7 @@ int lu_debugging_setup(void)
 
 void lu_context_keys_dump(void)
 {
-        int i;
+       unsigned int i;
 
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 struct lu_context_key *key;
@@ -1993,7 +2091,7 @@ void lu_context_keys_dump(void)
                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
                                i, key, key->lct_tags,
                                key->lct_init, key->lct_fini, key->lct_exit,
-                               key->lct_index, cfs_atomic_read(&key->lct_used),
+                              key->lct_index, atomic_read(&key->lct_used),
                                key->lct_owner ? key->lct_owner->name : "",
                                key->lct_owner);
                         lu_ref_print(&key->lct_reference);
@@ -2001,12 +2099,6 @@ void lu_context_keys_dump(void)
         }
 }
 EXPORT_SYMBOL(lu_context_keys_dump);
-#else  /* !__KERNEL__ */
-static int lu_cache_shrink(int nr, unsigned int gfp_mask)
-{
-        return 0;
-}
-#endif /* __KERNEL__ */
 
 /**
  * Initialization of global lu_* data.
@@ -2014,9 +2106,15 @@ static int lu_cache_shrink(int nr, unsigned int gfp_mask)
 int lu_global_init(void)
 {
         int result;
+       DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
+                        lu_cache_shrink_count, lu_cache_shrink_scan);
 
         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
 
+       INIT_LIST_HEAD(&lu_device_types);
+       INIT_LIST_HEAD(&lu_context_remembered);
+       INIT_LIST_HEAD(&lu_sites);
+
         result = lu_ref_global_init();
         if (result != 0)
                 return result;
@@ -2042,7 +2140,7 @@ int lu_global_init(void)
          * inode, one for ea. Unfortunately setting this high value results in
          * lu_object/inode cache consuming all the memory.
          */
-       lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
+       lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
         if (lu_site_shrinker == NULL)
                 return -ENOMEM;
 
@@ -2088,6 +2186,28 @@ static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
  * Output site statistical counters into a buffer. Suitable for
  * lprocfs_rd_*()-style functions.
  */
+int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
+{
+       lu_site_stats_t stats;
+
+       memset(&stats, 0, sizeof(stats));
+       lu_site_stats_get(s->ls_obj_hash, &stats, 1);
+
+       return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
+                         stats.lss_busy,
+                         stats.lss_total,
+                         stats.lss_populated,
+                         CFS_HASH_NHLIST(s->ls_obj_hash),
+                         stats.lss_max_search,
+                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
+                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
+                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
+                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
+                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
+                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
+}
+EXPORT_SYMBOL(lu_site_stats_seq_print);
+
 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
 {
         lu_site_stats_t stats;
@@ -2159,7 +2279,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
        struct lu_fid           *old = &o->lo_header->loh_fid;
        struct lu_site_bkt_data *bkt;
        struct lu_object        *shadow;
-       cfs_waitlink_t           waiter;
+       wait_queue_t             waiter;
        cfs_hash_t              *hs;
        cfs_hash_bd_t            bd;
        __u64                    version = 0;
@@ -2216,7 +2336,7 @@ void lu_buf_free(struct lu_buf *buf)
 }
 EXPORT_SYMBOL(lu_buf_free);
 
-void lu_buf_alloc(struct lu_buf *buf, int size)
+void lu_buf_alloc(struct lu_buf *buf, size_t size)
 {
        LASSERT(buf);
        LASSERT(buf->lb_buf == NULL);
@@ -2227,14 +2347,14 @@ void lu_buf_alloc(struct lu_buf *buf, int size)
 }
 EXPORT_SYMBOL(lu_buf_alloc);
 
-void lu_buf_realloc(struct lu_buf *buf, int size)
+void lu_buf_realloc(struct lu_buf *buf, size_t size)
 {
        lu_buf_free(buf);
        lu_buf_alloc(buf, size);
 }
 EXPORT_SYMBOL(lu_buf_realloc);
 
-struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len)
 {
        if (buf->lb_buf == NULL && buf->lb_len == 0)
                lu_buf_alloc(buf, len);
@@ -2252,7 +2372,7 @@ EXPORT_SYMBOL(lu_buf_check_and_alloc);
  * old buffer remains unchanged on error
  * \retval 0 or -ENOMEM
  */
-int lu_buf_check_and_grow(struct lu_buf *buf, int len)
+int lu_buf_check_and_grow(struct lu_buf *buf, size_t len)
 {
        char *ptr;