unsigned int ppp_waitqlen; /* wait queue length */
unsigned long ppp_pages_short; /* # of pages wanted of in-q users */
unsigned int ppp_growing:1; /* during adding pages */
+ unsigned int ppp_index; /* pool array index, used to determine
+ * pool element size */
/*
* indicating how idle the pools are, from 0 to MAX_IDLE_IDX
return 0;
}
-static void pool_release_free_pages(long npages, unsigned int pool_idx)
+static void pool_release_free_pages(long npages, struct ptlrpc_page_pool *pool)
{
int p_idx, g_idx;
int p_idx_max1, p_idx_max2;
- struct ptlrpc_page_pool *pool = page_pools[pool_idx];
LASSERT(npages > 0);
LASSERT(npages <= pool->ppp_free_pages);
LASSERT(pool->ppp_pools[p_idx]);
LASSERT(pool->ppp_pools[p_idx][g_idx] != NULL);
- if (pool_idx == PAGES_POOL)
+ if (pool->ppp_index == PAGES_POOL)
__free_page(pool->ppp_pools[p_idx][g_idx]);
else
OBD_FREE_LARGE(pool->ppp_pools[p_idx][g_idx],
- ELEMENT_SIZE(pool_idx));
+ ELEMENT_SIZE(pool->ppp_index));
pool->ppp_pools[p_idx][g_idx] = NULL;
sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
pool->ppp_free_pages - PTLRPC_MAX_BRW_PAGES);
if (sc->nr_to_scan > 0) {
- pool_release_free_pages(sc->nr_to_scan, pool_index);
+ pool_release_free_pages(sc->nr_to_scan, pool);
CDEBUG(D_SEC, "released %ld pages, %ld left\n",
(long)sc->nr_to_scan, pool->ppp_free_pages);
/*
* return how many pages cleaned up.
*/
-static unsigned long pool_cleanup(void ***pools, int npools, int pool_idx)
+static unsigned long pool_cleanup(void ***pools, int npools,
+ struct ptlrpc_page_pool *pool)
{
unsigned long cleaned = 0;
int i, j;
if (pools[i]) {
for (j = 0; j < PAGES_PER_POOL; j++) {
if (pools[i][j]) {
- if (pool_idx == PAGES_POOL) {
+ if (pool->ppp_index == PAGES_POOL) {
__free_page(pools[i][j]);
} else {
OBD_FREE_LARGE(pools[i][j],
- ELEMENT_SIZE(pool_idx));
+ ELEMENT_SIZE(pool->ppp_index));
}
cleaned++;
}
* the simplest way to avoid complexity. It's not frequently called.
*/
static void pool_insert(void ***pools, int npools, int npages,
- unsigned int pool_idx)
+ struct ptlrpc_page_pool *page_pool)
{
int freeslot;
int op_idx, np_idx, og_idx, ng_idx;
int cur_npools, end_npools;
- struct ptlrpc_page_pool *page_pool = page_pools[pool_idx];
LASSERT(npages > 0);
LASSERT(page_pool->ppp_total_pages+npages <= page_pool->ppp_max_pages);
}
#define POOL_INIT_SIZE (PTLRPC_MAX_BRW_SIZE / 4)
-static int pool_add_pages(int npages, int pool_index)
+static int pool_add_pages(int npages, struct ptlrpc_page_pool *page_pool)
{
void ***pools;
int npools, alloced = 0;
int i, j, rc = -ENOMEM;
- struct ptlrpc_page_pool *page_pool = page_pools[pool_index];
+ unsigned int pool_index = page_pool->ppp_index;
if (pool_index == PAGES_POOL) {
if (npages < POOL_INIT_SIZE >> PAGE_SHIFT)
}
LASSERT(alloced == npages);
- pool_insert(pools, npools, npages, pool_index);
+ pool_insert(pools, npools, npages, page_pool);
CDEBUG(D_SEC, "added %d pages into pools\n", npages);
OBD_FREE_PTR_ARRAY(pools, npools);
rc = 0;
out_pools:
if (rc) {
- pool_cleanup(pools, npools, pool_index);
+ pool_cleanup(pools, npools, page_pool);
}
out:
if (rc) {
return rc;
}
-static inline void pool_wakeup(unsigned int pool)
+static inline void pool_wakeup(struct ptlrpc_page_pool *pool)
{
- assert_spin_locked(&page_pools[pool]->ppp_lock);
+ assert_spin_locked(&pool->ppp_lock);
/* waitqueue_active */
- if (unlikely(waitqueue_active(&page_pools[pool]->ppp_waitq)))
- wake_up_all(&page_pools[pool]->ppp_waitq);
+ if (unlikely(waitqueue_active(&pool->ppp_waitq)))
+ wake_up_all(&pool->ppp_waitq);
}
-static int pool_should_grow(int needed, unsigned int pool_index)
+static int pool_should_grow(int needed, struct ptlrpc_page_pool *pool)
{
- struct ptlrpc_page_pool *pool = page_pools[pool_index];
-
/*
* don't grow if someone else is growing the pools right now,
* or the pools has reached its full capacity
page_pool->ppp_st_missings++;
page_pool->ppp_pages_short += count;
- if (pool_should_grow(count, pool_idx)) {
+ if (pool_should_grow(count, page_pool)) {
page_pool->ppp_growing = 1;
spin_unlock(&page_pool->ppp_lock);
CDEBUG(D_SEC, "ppp_pages_short: %lu\n",
page_pool->ppp_pages_short);
- pool_add_pages(8, pool_idx);
+ pool_add_pages(8, page_pool);
spin_lock(&page_pool->ppp_lock);
page_pool->ppp_growing = 0;
- pool_wakeup(pool_idx);
+ pool_wakeup(page_pool);
} else {
if (page_pool->ppp_growing) {
if (++page_pool->ppp_waitqlen >
}
page_pool->ppp_free_pages += count;
- pool_wakeup(pool_idx);
+ pool_wakeup(page_pool);
out_unlock:
spin_unlock(&page_pool->ppp_lock);
/* ask for 1 page - so if the pool is empty, it will grow
* (this might also grow an in-use pool if it's full, which is fine)
*/
- if (pool_should_grow(1, PAGES_POOL)) {
+ if (pool_should_grow(1, pool)) {
pool->ppp_growing = 1;
spin_unlock(&pool->ppp_lock);
- pool_add_pages(PTLRPC_MAX_BRW_PAGES * 2, PAGES_POOL);
+ pool_add_pages(PTLRPC_MAX_BRW_PAGES * 2, pool);
spin_lock(&pool->ppp_lock);
pool->ppp_growing = 0;
- pool_wakeup(PAGES_POOL);
+ pool_wakeup(pool);
}
spin_unlock(&pool->ppp_lock);
return 0;
sizeof(*pool->ppp_pools));
}
-static inline void pool_free(unsigned int pool_index)
+static inline void pool_free(struct ptlrpc_page_pool *pool)
{
- struct ptlrpc_page_pool *pool = page_pools[pool_index];
-
LASSERT(pool->ppp_max_pools);
LASSERT(pool->ppp_pools);
pool->ppp_st_max_wait = ktime_set(0, 0);
pool_alloc(pool);
+ pool->ppp_index = pool_index;
CDEBUG(D_SEC, "Allocated pool %i\n", pool_index);
if (pool->ppp_pools == NULL)
GOTO(fail, rc = -ENOMEM);
if (pool) {
unregister_shrinker(&pool->pool_shrinker);
if (pool->ppp_pools)
- pool_free(pool_index);
+ pool_free(pool);
OBD_FREE(pool, sizeof(**page_pools));
}
}
LASSERT(pool->ppp_total_pages == pool->ppp_free_pages);
npools = npages_to_npools(pool->ppp_total_pages);
- cleaned = pool_cleanup(pool->ppp_pools, npools, pool_index);
+ cleaned = pool_cleanup(pool->ppp_pools, npools, pool);
LASSERT(cleaned == pool->ppp_total_pages);
- pool_free(pool_index);
+ pool_free(pool);
if (pool->ppp_st_access > 0) {
CDEBUG(D_SEC,