ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+ /* do not try to go further if there is not enough memory in enc_pool */
+ if (req->rq_sent && req->rq_bulk != NULL)
+ if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+ pool_is_at_full_capacity())
+ RETURN(-ENOMEM);
+
if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&req->rq_import->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ RETURN(rc);
+ }
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
spin_lock(&req->rq_lock);
}
rc = ptl_send_rpc(req, 0);
+ if (rc == -ENOMEM) {
+ spin_lock(&imp->imp_lock);
+ if (!list_empty(&req->rq_list))
+ list_del_init(&req->rq_list);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+ continue;
+ }
if (rc) {
DEBUG_REQ(D_HA, req,
"send failed: rc = %d", rc);
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
cfs_time_t epp_st_max_wait; /* in jeffies */
+ unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools
*/
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%lu\n"
+ "out of mem: %lu\n"
,
totalram_pages,
PAGES_PER_POOL,
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC)
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem
);
spin_unlock(&page_pools.epp_lock);
}
/*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+ return page_pools.epp_free_pages;
+}
+EXPORT_SYMBOL(get_free_pages_in_pool);
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+ return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+EXPORT_SYMBOL(pool_is_at_full_capacity);
+
+/*
* we allocate the requested pages atomically.
*/
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
enc_pools_wakeup();
} else {
- if (++page_pools.epp_waitqlen >
- page_pools.epp_st_max_wqlen)
- page_pools.epp_st_max_wqlen =
- page_pools.epp_waitqlen;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- init_waitqueue_entry(&waitlink, current);
- add_wait_queue(&page_pools.epp_waitq, &waitlink);
-
- spin_unlock(&page_pools.epp_lock);
- schedule();
- remove_wait_queue(&page_pools.epp_waitq, &waitlink);
- LASSERT(page_pools.epp_waitqlen > 0);
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_waitqlen--;
+ if (page_pools.epp_growing) {
+ if (++page_pools.epp_waitqlen >
+ page_pools.epp_st_max_wqlen)
+ page_pools.epp_st_max_wqlen =
+ page_pools.epp_waitqlen;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ init_waitqueue_entry(&waitlink, current);
+ add_wait_queue(&page_pools.epp_waitq,
+ &waitlink);
+
+ spin_unlock(&page_pools.epp_lock);
+ schedule();
+ remove_wait_queue(&page_pools.epp_waitq,
+ &waitlink);
+ LASSERT(page_pools.epp_waitqlen > 0);
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_waitqlen--;
+ } else {
+ /* ptlrpcd thread should not sleep in that case,
+ * or deadlock may occur!
+ * Instead, return -ENOMEM so that upper layers
+ * will put request back in queue. */
+ page_pools.epp_st_outofmem++;
+ spin_unlock(&page_pools.epp_lock);
+ OBD_FREE(GET_ENC_KIOV(desc),
+ desc->bd_iov_count *
+ sizeof(*GET_ENC_KIOV(desc)));
+ GET_ENC_KIOV(desc) = NULL;
+ return -ENOMEM;
+ }
}
LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
page_pools.epp_st_max_wait = 0;
+ page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
if (page_pools.epp_pools == NULL)
CDEBUG(D_SEC,
"max pages %lu, grows %u, grow fails %u, shrinks %u, "
"access %lu, missing %lu, max qlen %u, max wait "
- CFS_TIME_T"/%lu\n",
+ CFS_TIME_T"/%lu, out of mem %lu\n",
page_pools.epp_st_max_pages, page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
page_pools.epp_st_max_wait,
- msecs_to_jiffies(MSEC_PER_SEC));
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
}
}