Whamcloud - gitweb
LU-3321 osc: Adjustment on osc LRU for performance 90/7890/9
authorJinshan Xiong <jinshan.xiong@intel.com>
Mon, 30 Sep 2013 21:24:17 +0000 (14:24 -0700)
committerOleg Drokin <oleg.drokin@intel.com>
Tue, 5 Nov 2013 01:50:49 +0000 (01:50 +0000)
Add and discard pages from LRU in batch.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Change-Id: I90287b73c05066d420a31bff21866dfa1ffec665
Reviewed-on: http://review.whamcloud.com/7890
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Niu Yawei <yawei.niu@intel.com>
Reviewed-by: Lai Siyao <lai.siyao@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
libcfs/include/libcfs/list.h
lustre/llite/llite_lib.c
lustre/osc/lproc_osc.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c

index b747804..404d08a 100644 (file)
@@ -46,6 +46,7 @@ typedef struct list_head cfs_list_t;
 
 #define __cfs_list_splice(list, head)        __list_splice(list, head)
 #define cfs_list_splice(list, head)          list_splice(list, head)
+#define cfs_list_splice_tail(list, head)     list_splice_tail(list, head)
 
 #define cfs_list_splice_init(list, head)     list_splice_init(list, head)
 
@@ -305,6 +306,12 @@ static inline void cfs_list_splice(cfs_list_t *list,
                __cfs_list_splice(list, head);
 }
 
+static inline void cfs_list_splice_tail(cfs_list_t *list, cfs_list_t *head)
+{
+       if (!cfs_list_empty(list))
+               __cfs_list_splice(list, head->prev);
+}
+
 /**
  * Join two lists and reinitialise the emptied list.
  * \param list the new list to add.
index 4f032c3..c6b85f1 100644 (file)
@@ -93,11 +93,7 @@ static struct ll_sb_info *ll_init_sbi(void)
 
         si_meminfo(&si);
         pages = si.totalram - si.totalhigh;
-       if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
-               lru_page_max = pages / 2;
-       } else {
-               lru_page_max = (pages / 4) * 3;
-       }
+       lru_page_max = pages / 2;
 
        /* initialize ll_cache data */
        cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
index 0ea5719..e765e48 100644 (file)
@@ -195,7 +195,7 @@ static int osc_wr_cached_mb(struct file *file, const char *buffer,
 
        rc = cfs_atomic_read(&cli->cl_lru_in_list) - pages_number;
        if (rc > 0)
-               (void)osc_lru_shrink(cli, rc);
+               (void)osc_lru_shrink(cli, rc, true);
 
        return count;
 }
index 4d97b3e..94cc695 100644 (file)
@@ -811,6 +811,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
 
        ext->oe_rc = rc ?: ext->oe_nr_pages;
        EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
+
+       osc_lru_add_batch(cli, &ext->oe_pages);
        cfs_list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
                                     oap_pending_item) {
                cfs_list_del_init(&oap->oap_rpc_item);
index a8c2ec5..405db95 100644 (file)
@@ -80,6 +80,8 @@ struct osc_io {
         * page writeback from happening. */
        struct osc_extent *oi_trunc;
 
+       int oi_lru_reserved;
+
        struct obd_info    oi_info;
        struct obdo        oi_oa;
        struct osc_async_cbargs {
@@ -103,7 +105,7 @@ struct osc_session {
         struct osc_io       os_io;
 };
 
-#define OTI_PVEC_SIZE 64
+#define OTI_PVEC_SIZE 256
 struct osc_thread_info {
         struct ldlm_res_id      oti_resname;
         ldlm_policy_data_t      oti_policy;
@@ -368,21 +370,18 @@ struct osc_page {
         */
                              ops_in_lru:1,
        /**
-         * Set if the page must be transferred with OBD_BRW_SRVLOCK.
-         */
-                              ops_srvlock:1;
-       union {
-               /**
-                * lru page list. ops_inflight and ops_lru are exclusive so
-                * that they can share the same data.
-                */
-               cfs_list_t            ops_lru;
-               /**
-                * Linkage into a per-osc_object list of pages in flight. For
-                * debugging.
-                */
-               cfs_list_t            ops_inflight;
-       };
+        * Set if the page must be transferred with OBD_BRW_SRVLOCK.
+        */
+                             ops_srvlock:1;
+       /**
+        * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
+        */
+       cfs_list_t            ops_lru;
+       /**
+        * Linkage into a per-osc_object list of pages in flight. For
+        * debugging.
+        */
+       cfs_list_t            ops_inflight;
        /**
         * Thread that submitted this page for transfer. For debugging.
         */
@@ -434,6 +433,7 @@ void osc_index2policy  (ldlm_policy_data_t *policy, const struct cl_object *obj,
 int  osc_lvb_print     (const struct lu_env *env, void *cookie,
                         lu_printer_t p, const struct ost_lvb *lvb);
 
+void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *list);
 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
                     enum cl_req_type crt, int brw_flags);
 int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
index 40279d4..63210c6 100644 (file)
@@ -129,7 +129,8 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                  cfs_list_t *ext_list, int cmd, pdl_policy_t p);
-int osc_lru_shrink(struct client_obd *cli, int target);
+int osc_lru_shrink(struct client_obd *cli, int target, bool force);
+int osc_lru_reclaim(struct client_obd *cli);
 
 extern spinlock_t osc_ast_guard;
 
index bf93df5..bb5b6dd 100644 (file)
@@ -311,6 +311,56 @@ static int osc_io_commit_write(const struct lu_env *env,
         RETURN(0);
 }
 
+static int osc_io_rw_iter_init(const struct lu_env *env,
+                               const struct cl_io_slice *ios)
+{
+       struct cl_io *io = ios->cis_io;
+       struct osc_io *oio = osc_env_io(env);
+       struct osc_object *osc = cl2osc(ios->cis_obj);
+       struct client_obd *cli = osc_cli(osc);
+       unsigned long c;
+       unsigned int npages;
+       unsigned int max_pages;
+       ENTRY;
+
+       if (cl_io_is_append(io))
+               RETURN(0);
+
+       npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT;
+       if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK)
+               ++npages;
+
+       max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+       if (npages > max_pages)
+               npages = max_pages;
+
+       c = cfs_atomic_read(cli->cl_lru_left);
+       if (c < npages && osc_lru_reclaim(cli) > 0)
+               c = cfs_atomic_read(cli->cl_lru_left);
+       while (c >= npages) {
+               if (c == cfs_atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+                       oio->oi_lru_reserved = npages;
+                       break;
+               }
+               c = cfs_atomic_read(cli->cl_lru_left);
+       }
+
+       RETURN(0);
+}
+
+static void osc_io_rw_iter_fini(const struct lu_env *env,
+                               const struct cl_io_slice *ios)
+{
+       struct osc_io *oio = osc_env_io(env);
+       struct osc_object *osc = cl2osc(ios->cis_obj);
+       struct client_obd *cli = osc_cli(osc);
+
+       if (oio->oi_lru_reserved > 0) {
+               cfs_atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+               oio->oi_lru_reserved = 0;
+       }
+}
+
 static int osc_io_fault_start(const struct lu_env *env,
                               const struct cl_io_slice *ios)
 {
@@ -677,6 +727,8 @@ static const struct cl_io_operations osc_io_ops = {
                         .cio_fini   = osc_io_fini
                 },
                 [CIT_WRITE] = {
+                       .cio_iter_init = osc_io_rw_iter_init,
+                       .cio_iter_fini = osc_io_rw_iter_fini,
                         .cio_start  = osc_io_write_start,
                        .cio_end    = osc_io_end,
                         .cio_fini   = osc_io_fini
index affd0d2..70a84cf 100644 (file)
@@ -42,8 +42,8 @@
 
 #include "osc_cl_internal.h"
 
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                           struct osc_page *opg);
 
@@ -204,7 +204,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
 
        /* ops_lru and ops_inflight share the same field, so take it from LRU
         * first and then use it as inflight. */
-       osc_lru_del(osc_cli(obj), opg, false);
+       osc_lru_use(osc_cli(obj), opg);
 
        spin_lock(&obj->oo_seatbelt);
        cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
@@ -320,21 +320,15 @@ static void osc_page_completion_read(const struct lu_env *env,
                                      int ioret)
 {
        struct osc_page   *opg = cl2osc_page(slice);
-       struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
 
        if (likely(opg->ops_lock))
                osc_page_putref_lock(env, opg);
-       osc_lru_add(osc_cli(obj), opg);
 }
 
 static void osc_page_completion_write(const struct lu_env *env,
                                      const struct cl_page_slice *slice,
                                      int ioret)
 {
-       struct osc_page   *opg = cl2osc_page(slice);
-       struct osc_object *obj = cl2osc(slice->cpl_obj);
-
-       osc_lru_add(osc_cli(obj), opg);
 }
 
 static int osc_page_fail(const struct lu_env *env,
@@ -439,7 +433,8 @@ static void osc_page_delete(const struct lu_env *env,
        }
        spin_unlock(&obj->oo_seatbelt);
 
-       osc_lru_del(osc_cli(obj), opg, true);
+       osc_lru_del(osc_cli(obj), opg);
+
        EXIT;
 }
 
@@ -617,12 +612,11 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
  */
 
 static CFS_DECL_WAITQ(osc_lru_waitq);
-static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0);
 /* LRU pages are freed in batch mode. OSC should at least free this
  * number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
 /* free this number at most otherwise it will take too long time to finsih. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+static const int lru_shrink_max = 8 << (20 - PAGE_CACHE_SHIFT); /* 8M */
 
 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
  * we should free slots aggressively. In this way, slots are freed in a steady
@@ -632,59 +626,124 @@ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
 static int osc_cache_too_much(struct client_obd *cli)
 {
        struct cl_client_cache *cache = cli->cl_cache;
-       int pages = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
+       int pages = cfs_atomic_read(&cli->cl_lru_in_list);
+       unsigned long budget;
 
-       if (cfs_atomic_read(&osc_lru_waiters) > 0 &&
-           cfs_atomic_read(cli->cl_lru_left) < lru_shrink_max)
-               /* drop lru pages aggressively */
-               return min(pages, lru_shrink_max);
+       budget = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
 
        /* if it's going to run out LRU slots, we should free some, but not
         * too much to maintain faireness among OSCs. */
        if (cfs_atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
-               unsigned long tmp;
+               if (pages >= budget)
+                       return lru_shrink_max;
+               else if (pages >= budget / 2)
+                       return lru_shrink_min;
+       } else if (pages >= budget * 2)
+               return lru_shrink_min;
+       return 0;
+}
 
-               tmp = cache->ccc_lru_max / cfs_atomic_read(&cache->ccc_users);
-               if (pages > tmp)
-                       return min(pages, lru_shrink_max);
+void osc_lru_add_batch(struct client_obd *cli, cfs_list_t *plist)
+{
+       CFS_LIST_HEAD(lru);
+       struct osc_async_page *oap;
+       int npages = 0;
 
-               return pages > lru_shrink_min ? lru_shrink_min : 0;
+       cfs_list_for_each_entry(oap, plist, oap_pending_item) {
+               struct osc_page *opg = oap2osc_page(oap);
+
+               if (!opg->ops_in_lru)
+                       continue;
+
+               ++npages;
+               LASSERT(cfs_list_empty(&opg->ops_lru));
+               cfs_list_add(&opg->ops_lru, &lru);
        }
 
-       return 0;
+       if (npages > 0) {
+               client_obd_list_lock(&cli->cl_lru_list_lock);
+               cfs_list_splice_tail(&lru, &cli->cl_lru_list);
+               cfs_atomic_sub(npages, &cli->cl_lru_busy);
+               cfs_atomic_add(npages, &cli->cl_lru_in_list);
+               client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+               /* XXX: May set force to be true for better performance */
+               osc_lru_shrink(cli, osc_cache_too_much(cli), false);
+       }
 }
 
-/* Return how many pages are not discarded in @pvec. */
-static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
-                          struct cl_page **pvec, int max_index)
+static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 {
-       int count;
-       int i;
+       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
+       cfs_list_del_init(&opg->ops_lru);
+       cfs_atomic_dec(&cli->cl_lru_in_list);
+}
 
-       for (count = 0, i = 0; i < max_index; i++) {
-               struct cl_page *page = pvec[i];
-               if (cl_page_own_try(env, io, page) == 0) {
-                       /* free LRU page only if nobody is using it.
-                        * This check is necessary to avoid freeing the pages
-                        * having already been removed from LRU and pinned
-                        * for IO. */
-                       if (!cl_page_in_use(page)) {
-                               cl_page_unmap(env, io, page);
-                               cl_page_discard(env, io, page);
-                               ++count;
-                       }
-                       cl_page_disown(env, io, page);
+/**
+ * Page is being destroyed. The page may be not in LRU list, if the transfer
+ * has never finished(error occurred).
+ */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+       if (opg->ops_in_lru) {
+               client_obd_list_lock(&cli->cl_lru_list_lock);
+               if (!cfs_list_empty(&opg->ops_lru)) {
+                       __osc_lru_del(cli, opg);
+               } else {
+                       LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
+                       cfs_atomic_dec(&cli->cl_lru_busy);
                }
-               cl_page_put(env, page);
-               pvec[i] = NULL;
+               client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+               cfs_atomic_inc(cli->cl_lru_left);
+               /* this is a great place to release more LRU pages if
+                * this osc occupies too many LRU pages and kernel is
+                * stealing one of them. */
+               if (!memory_pressure_get())
+                       osc_lru_shrink(cli, osc_cache_too_much(cli), false);
+               wake_up(&osc_lru_waitq);
+       } else {
+               LASSERT(cfs_list_empty(&opg->ops_lru));
+       }
+}
+
+/**
+ * Delete page from LRUlist for redirty.
+ */
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
+{
+       /* If page is being transfered for the first time,
+        * ops_lru should be empty */
+       if (opg->ops_in_lru && !cfs_list_empty(&opg->ops_lru)) {
+               client_obd_list_lock(&cli->cl_lru_list_lock);
+               __osc_lru_del(cli, opg);
+               client_obd_list_unlock(&cli->cl_lru_list_lock);
+               cfs_atomic_inc(&cli->cl_lru_busy);
        }
-       return max_index - count;
+}
+
+static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
+                               struct cl_page **pvec, int max_index)
+{
+        int i;
+
+        for (i = 0; i < max_index; i++) {
+                struct cl_page *page = pvec[i];
+
+               LASSERT(cl_page_is_owned(page, io));
+               cl_page_unmap(env, io, page);
+               cl_page_discard(env, io, page);
+               cl_page_disown(env, io, page);
+                cl_page_put(env, page);
+
+                pvec[i] = NULL;
+        }
 }
 
 /**
  * Drop @target of pages from LRU at most.
  */
-int osc_lru_shrink(struct client_obd *cli, int target)
+int osc_lru_shrink(struct client_obd *cli, int target, bool force)
 {
        struct cl_env_nest nest;
        struct lu_env *env;
@@ -702,18 +761,30 @@ int osc_lru_shrink(struct client_obd *cli, int target)
        if (cfs_atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
                RETURN(0);
 
+       if (!force) {
+               if (cfs_atomic_read(&cli->cl_lru_shrinkers) > 0)
+                       RETURN(-EBUSY);
+
+               if (cfs_atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+                       cfs_atomic_dec(&cli->cl_lru_shrinkers);
+                       RETURN(-EBUSY);
+               }
+       } else {
+               cfs_atomic_inc(&cli->cl_lru_shrinkers);
+       }
+
        env = cl_env_nested_get(&nest);
        if (IS_ERR(env))
-               RETURN(PTR_ERR(env));
+               GOTO(out, rc = PTR_ERR(env));
 
        pvec = osc_env_info(env)->oti_pvec;
        io = &osc_env_info(env)->oti_io;
 
        client_obd_list_lock(&cli->cl_lru_list_lock);
-       cfs_atomic_inc(&cli->cl_lru_shrinkers);
        maxscan = min(target << 1, cfs_atomic_read(&cli->cl_lru_in_list));
        while (!cfs_list_empty(&cli->cl_lru_list)) {
                struct cl_page *page;
+               bool will_free = false;
 
                if (--maxscan < 0)
                        break;
@@ -734,7 +805,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
                        client_obd_list_unlock(&cli->cl_lru_list_lock);
 
                        if (clobj != NULL) {
-                               count -= discard_pagevec(env, io, pvec, index);
+                               discard_pagevec(env, io, pvec, index);
                                index = 0;
 
                                cl_io_fini(env, io);
@@ -756,94 +827,55 @@ int osc_lru_shrink(struct client_obd *cli, int target)
                        continue;
                }
 
-               /* move this page to the end of list as it will be discarded
-                * soon. The page will be finally removed from LRU list in
-                * osc_page_delete().  */
-               cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+               if (cl_page_own_try(env, io, page) == 0) {
+                       if (!cl_page_in_use_noref(page)) {
+                               /* remove it from lru list earlier to avoid
+                                * lock contention */
+                               __osc_lru_del(cli, opg);
+                               opg->ops_in_lru = 0; /* will be discarded */
+
+                               cl_page_get(page);
+                               will_free = true;
+                       } else {
+                               cl_page_disown(env, io, page);
+                       }
+               }
 
-               /* it's okay to grab a refcount here w/o holding lock because
-                * it has to grab cl_lru_list_lock to delete the page. */
-               cl_page_get(page);
-               pvec[index++] = page;
-               if (++count >= target)
-                       break;
+               if (!will_free) {
+                       cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+                       continue;
+               }
 
+               /* Don't discard and free the page with cl_lru_list held */
+               pvec[index++] = page;
                if (unlikely(index == OTI_PVEC_SIZE)) {
                        client_obd_list_unlock(&cli->cl_lru_list_lock);
-                       count -= discard_pagevec(env, io, pvec, index);
+                       discard_pagevec(env, io, pvec, index);
                        index = 0;
 
                        client_obd_list_lock(&cli->cl_lru_list_lock);
                }
+
+               if (++count >= target)
+                       break;
        }
        client_obd_list_unlock(&cli->cl_lru_list_lock);
 
        if (clobj != NULL) {
-               count -= discard_pagevec(env, io, pvec, index);
+               discard_pagevec(env, io, pvec, index);
 
                cl_io_fini(env, io);
                cl_object_put(env, clobj);
        }
        cl_env_nested_put(&nest, env);
 
+out:
        cfs_atomic_dec(&cli->cl_lru_shrinkers);
-       RETURN(count > 0 ? count : rc);
-}
-
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
-{
-       bool wakeup = false;
-
-       if (!opg->ops_in_lru)
-               return;
-
-       cfs_atomic_dec(&cli->cl_lru_busy);
-       client_obd_list_lock(&cli->cl_lru_list_lock);
-       if (cfs_list_empty(&opg->ops_lru)) {
-               cfs_list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
-               cfs_atomic_inc_return(&cli->cl_lru_in_list);
-               wakeup = cfs_atomic_read(&osc_lru_waiters) > 0;
-       }
-       client_obd_list_unlock(&cli->cl_lru_list_lock);
-
-       if (wakeup) {
-               osc_lru_shrink(cli, osc_cache_too_much(cli));
+       if (count > 0) {
+               cfs_atomic_add(count, cli->cl_lru_left);
                wake_up_all(&osc_lru_waitq);
        }
-}
-
-/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache. */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
-{
-       if (opg->ops_in_lru) {
-               client_obd_list_lock(&cli->cl_lru_list_lock);
-               if (!cfs_list_empty(&opg->ops_lru)) {
-                       LASSERT(cfs_atomic_read(&cli->cl_lru_in_list) > 0);
-                       cfs_list_del_init(&opg->ops_lru);
-                       cfs_atomic_dec(&cli->cl_lru_in_list);
-                       if (!del)
-                               cfs_atomic_inc(&cli->cl_lru_busy);
-               } else if (del) {
-                       LASSERT(cfs_atomic_read(&cli->cl_lru_busy) > 0);
-                       cfs_atomic_dec(&cli->cl_lru_busy);
-               }
-               client_obd_list_unlock(&cli->cl_lru_list_lock);
-               if (del) {
-                       cfs_atomic_inc(cli->cl_lru_left);
-                       /* this is a great place to release more LRU pages if
-                        * this osc occupies too many LRU pages and kernel is
-                        * stealing one of them.
-                        * cl_lru_shrinkers is to avoid recursive call in case
-                        * we're already in the context of osc_lru_shrink(). */
-                       if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
-                           !memory_pressure_get())
-                               osc_lru_shrink(cli, osc_cache_too_much(cli));
-                       wake_up(&osc_lru_waitq);
-               }
-       } else {
-               LASSERT(cfs_list_empty(&opg->ops_lru));
-       }
+       RETURN(count > 0 ? count : rc);
 }
 
 static inline int max_to_shrink(struct client_obd *cli)
@@ -851,17 +883,20 @@ static inline int max_to_shrink(struct client_obd *cli)
        return min(cfs_atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
 }
 
-static int osc_lru_reclaim(struct client_obd *cli)
+int osc_lru_reclaim(struct client_obd *cli)
 {
        struct cl_client_cache *cache = cli->cl_cache;
        int max_scans;
-       int rc;
+       int rc = 0;
 
        LASSERT(cache != NULL);
        LASSERT(!cfs_list_empty(&cache->ccc_lru));
 
-       rc = osc_lru_shrink(cli, lru_shrink_min);
+       rc = osc_lru_shrink(cli, osc_cache_too_much(cli), false);
        if (rc != 0) {
+               if (rc == -EBUSY)
+                       rc = 0;
+
                CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
                        cli->cl_import->imp_obd->obd_name, rc, cli);
                return rc;
@@ -889,10 +924,10 @@ static int osc_lru_reclaim(struct client_obd *cli)
                        cfs_atomic_read(&cli->cl_lru_busy));
 
                cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
-               if (cfs_atomic_read(&cli->cl_lru_in_list) > 0) {
+               if (osc_cache_too_much(cli) > 0) {
                        spin_unlock(&cache->ccc_lru_lock);
 
-                       rc = osc_lru_shrink(cli, max_to_shrink(cli));
+                       rc = osc_lru_shrink(cli, osc_cache_too_much(cli), true);
                        spin_lock(&cache->ccc_lru_lock);
                        if (rc != 0)
                                break;
@@ -909,6 +944,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                           struct osc_page *opg)
 {
        struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+       struct osc_io *oio = osc_env_io(env);
        struct client_obd *cli = osc_cli(obj);
        int rc = 0;
        ENTRY;
@@ -916,9 +952,13 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
        if (cli->cl_cache == NULL) /* shall not be in LRU */
                RETURN(0);
 
+       if (oio->oi_lru_reserved > 0) {
+               --oio->oi_lru_reserved;
+               goto out;
+       }
+
        LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
        while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
-               int gen;
 
                /* run out of LRU spaces, try to drop some by itself */
                rc = osc_lru_reclaim(cli);
@@ -928,23 +968,14 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
                        continue;
 
                cond_resched();
-
-               /* slowest case, all of caching pages are busy, notifying
-                * other OSCs that we're lack of LRU slots. */
-               cfs_atomic_inc(&osc_lru_waiters);
-
-               gen = cfs_atomic_read(&cli->cl_lru_in_list);
                rc = l_wait_event(osc_lru_waitq,
-                               cfs_atomic_read(cli->cl_lru_left) > 0 ||
-                               (cfs_atomic_read(&cli->cl_lru_in_list) > 0 &&
-                                gen != cfs_atomic_read(&cli->cl_lru_in_list)),
+                               cfs_atomic_read(cli->cl_lru_left) > 0,
                                &lwi);
-
-               cfs_atomic_dec(&osc_lru_waiters);
                if (rc < 0)
                        break;
        }
 
+out:
        if (rc >= 0) {
                cfs_atomic_inc(&cli->cl_lru_busy);
                opg->ops_in_lru = 1;
index 2aa6b30..9f144ed 100644 (file)
@@ -3233,7 +3233,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
                int nr = cfs_atomic_read(&cli->cl_lru_in_list) >> 1;
                int target = *(int *)val;
 
-               nr = osc_lru_shrink(cli, min(nr, target));
+               nr = osc_lru_shrink(cli, min(nr, target), true);
                *(int *)val -= nr;
                RETURN(0);
        }