+static int osc_lru_reclaim(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ struct client_obd *victim;
+ struct client_obd *tmp;
+ int rc;
+
+ LASSERT(cache != NULL);
+ LASSERT(!cfs_list_empty(&cache->ccc_lru));
+
+ rc = osc_lru_shrink(cli, lru_shrink_min);
+ if (rc > 0) {
+ CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ cli->cl_import->imp_obd->obd_name, rc, cli);
+ return rc;
+ }
+
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ cfs_atomic_read(&cli->cl_lru_in_list),
+ cfs_atomic_read(&cli->cl_lru_busy));
+
+ /* Reclaim LRU slots from other client_obd as it can't free enough
+ * from its own. This should rarely happen. */
+ spin_lock(&cache->ccc_lru_lock);
+ cache->ccc_lru_shrinkers++;
+ cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
+ if (victim == cli)
+ break;
+
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ victim->cl_import->imp_obd->obd_name, victim,
+ cfs_atomic_read(&victim->cl_lru_in_list),
+ cfs_atomic_read(&victim->cl_lru_busy));
+
+ cfs_list_move_tail(&victim->cl_lru_osc, &cache->ccc_lru);
+ if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
+ break;
+ }
+ spin_unlock(&cache->ccc_lru_lock);
+ if (victim == cli) {
+ CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
+ cli->cl_import->imp_obd->obd_name);
+ return 0;
+ }
+
+ rc = osc_lru_shrink(victim,
+ min(cfs_atomic_read(&victim->cl_lru_in_list) >> 1,
+ lru_shrink_max));
+
+ CDEBUG(D_CACHE, "%s: Free %d pages from other cli: %p.\n",
+ cli->cl_import->imp_obd->obd_name, rc, victim);
+
+ return rc;
+}
+
+static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *opg)
+{
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct client_obd *cli = osc_cli(obj);
+ int rc = 0;
+ ENTRY;
+
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
+ RETURN(0);
+
+ LASSERT(cfs_atomic_read(cli->cl_lru_left) >= 0);
+ while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ int gen;
+
+ /* run out of LRU spaces, try to drop some by itself */
+ rc = osc_lru_reclaim(cli);
+ if (rc < 0)
+ break;
+ if (rc > 0)
+ continue;
+
+ cfs_cond_resched();
+
+ /* slowest case, all of caching pages are busy, notifying
+ * other OSCs that we're lack of LRU slots. */
+ cfs_atomic_inc(&osc_lru_waiters);
+
+ gen = cfs_atomic_read(&cli->cl_lru_in_list);
+ rc = l_wait_event(osc_lru_waitq,
+ cfs_atomic_read(cli->cl_lru_left) > 0 ||
+ (cfs_atomic_read(&cli->cl_lru_in_list) > 0 &&
+ gen != cfs_atomic_read(&cli->cl_lru_in_list)),
+ &lwi);
+
+ cfs_atomic_dec(&osc_lru_waiters);
+ if (rc < 0)
+ break;
+ }
+
+ if (rc >= 0) {
+ cfs_atomic_inc(&cli->cl_lru_busy);
+ opg->ops_in_lru = 1;
+ rc = 0;
+ }
+
+ RETURN(rc);