Use kgnilnd_vzalloc() for copy buffer allocation in kgnilnd_rdma so
we don't stall allocating in low memory situations.
Clean up freeing of memory that uses kgnilnd_vzalloc to make sure we
call vfree.
Booted on test node and verify we don't break anything. Ran IOR tests.
Test-Parameters: trivial
Signed-off-by: Chris Horn <hornc@cray.com>
Change-Id: Icb1dfe5f91f20195cd3a1093c57dc1157e127e9b
Reviewed-on: http://review.whamcloud.com/21154
Reviewed-by: James Shimek <jshimek@cray.com>
Reviewed-by: Chuck Fossen <chuckf@cray.com>
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
failed:
atomic_dec(&kgnilnd_data.kgn_nconns);
failed:
atomic_dec(&kgnilnd_data.kgn_nconns);
- LIBCFS_FREE(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *));
+ kgnilnd_vfree(conn->gnc_tx_ref_table,
+ GNILND_MAX_MSG_ID * sizeof(void *));
LIBCFS_FREE(conn, sizeof(*conn));
return rc;
}
LIBCFS_FREE(conn, sizeof(*conn));
return rc;
}
kgnilnd_peer_decref(conn->gnc_peer);
if (conn->gnc_tx_ref_table != NULL) {
kgnilnd_peer_decref(conn->gnc_peer);
if (conn->gnc_tx_ref_table != NULL) {
- LIBCFS_FREE(conn->gnc_tx_ref_table,
- GNILND_MAX_MSG_ID * sizeof(void *));
+ kgnilnd_vfree(conn->gnc_tx_ref_table,
+ GNILND_MAX_MSG_ID * sizeof(void *));
}
LIBCFS_FREE(conn, sizeof(*conn));
}
LIBCFS_FREE(conn, sizeof(*conn));
+static inline void kgnilnd_vfree(void *ptr, int size)
+{
+ libcfs_kmem_dec(ptr, size);
+ vfree(ptr);
+}
+
/* Copied from DEBUG_REQ in Lustre - the dance is needed to save stack space */
extern void
/* Copied from DEBUG_REQ in Lustre - the dance is needed to save stack space */
extern void
/* Only free the buffer if we used it */
if (tx->tx_buffer_copy != NULL) {
/* Only free the buffer if we used it */
if (tx->tx_buffer_copy != NULL) {
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy, tx->tx_rdma_desc.length);
tx->tx_buffer_copy = NULL;
CDEBUG(D_MALLOC, "vfreed buffer2\n");
}
tx->tx_buffer_copy = NULL;
CDEBUG(D_MALLOC, "vfreed buffer2\n");
}
if (tx->tx_buffer_copy == NULL) {
/* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
* and require at most we allocate a few extra bytes. */
if (tx->tx_buffer_copy == NULL) {
/* Allocate the largest copy buffer we will need, this will prevent us from overwriting data
* and require at most we allocate a few extra bytes. */
- tx->tx_buffer_copy = vmalloc(desc_nob);
+ tx->tx_buffer_copy = kgnilnd_vzalloc(desc_nob);
if (!tx->tx_buffer_copy) {
/* allocation of buffer failed nak the rdma */
if (!tx->tx_buffer_copy) {
/* allocation of buffer failed nak the rdma */
rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
if (rc != GNI_RC_SUCCESS) {
/* Registration Failed nak rdma and kill the tx. */
rc = kgnilnd_mem_register(conn->gnc_device->gnd_handle, (__u64)tx->tx_buffer_copy, desc_nob, NULL, GNI_MEM_READWRITE, &tx->tx_buffer_copy_map_key);
if (rc != GNI_RC_SUCCESS) {
/* Registration Failed nak rdma and kill the tx. */
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy,
+ desc_nob);
tx->tx_buffer_copy = NULL;
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
tx->tx_buffer_copy = NULL;
kgnilnd_nak_rdma(tx->tx_conn, tx->tx_msg.gnm_type, -EFAULT, cookie, tx->tx_msg.gnm_srcnid);
kgnilnd_tx_done(tx, -EFAULT);
kgnilnd_unmap_buffer(tx, 0);
if (tx->tx_buffer_copy != NULL) {
kgnilnd_unmap_buffer(tx, 0);
if (tx->tx_buffer_copy != NULL) {
- vfree(tx->tx_buffer_copy);
+ kgnilnd_vfree(tx->tx_buffer_copy, desc_nob);
tx->tx_buffer_copy = NULL;
}
tx->tx_buffer_copy = NULL;
}
LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof (unsigned long));
free_blk:
if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof (unsigned long));
free_blk:
if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
- LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
+ kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
} else {
kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
}
} else {
kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
}
if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
} else {
if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
} else {
- LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
+ kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
}
fma_blk->gnm_state = GNILND_FMABLK_FREED;
}
fma_blk->gnm_state = GNILND_FMABLK_FREED;