"Set to 1 to drop asymmetrical route messages.");
#define LNET_TRANSACTION_TIMEOUT_NO_HEALTH_DEFAULT 50
-#define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 10
+#define LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT 50
unsigned lnet_transaction_timeout = LNET_TRANSACTION_TIMEOUT_HEALTH_DEFAULT;
static int transaction_to_set(const char *val, cfs_kernel_param_arg_t *kp);
MODULE_PARM_DESC(lnet_transaction_timeout,
"Maximum number of seconds to wait for a peer response.");
-#define LNET_RETRY_COUNT_HEALTH_DEFAULT 3
+#define LNET_RETRY_COUNT_HEALTH_DEFAULT 2
unsigned lnet_retry_count = LNET_RETRY_COUNT_HEALTH_DEFAULT;
static int retry_count_set(const char *val, cfs_kernel_param_arg_t *kp);
#ifdef HAVE_KERNEL_PARAM_OPS
struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
* MDs kmem_cache */
+struct kmem_cache *lnet_rspt_cachep; /* response tracker cache */
+struct kmem_cache *lnet_msg_cachep;
static int
-lnet_descriptor_setup(void)
+lnet_slab_setup(void)
{
/* create specific kmem_cache for MEs and small MDs (i.e., originally
* allocated in <size-xxx> kmem_cache).
if (!lnet_small_mds_cachep)
return -ENOMEM;
+ lnet_rspt_cachep = kmem_cache_create("lnet_rspt", sizeof(struct lnet_rsp_tracker),
+ 0, 0, NULL);
+ if (!lnet_rspt_cachep)
+ return -ENOMEM;
+
+ lnet_msg_cachep = kmem_cache_create("lnet_msg", sizeof(struct lnet_msg),
+ 0, 0, NULL);
+ if (!lnet_msg_cachep)
+ return -ENOMEM;
+
return 0;
}
static void
-lnet_descriptor_cleanup(void)
+lnet_slab_cleanup(void)
{
+ if (lnet_msg_cachep) {
+ kmem_cache_destroy(lnet_msg_cachep);
+ lnet_msg_cachep = NULL;
+ }
+
+
+ if (lnet_rspt_cachep) {
+ kmem_cache_destroy(lnet_rspt_cachep);
+ lnet_rspt_cachep = NULL;
+ }
if (lnet_small_mds_cachep) {
kmem_cache_destroy(lnet_small_mds_cachep);
LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
init_completion(&the_lnet.ln_started);
- rc = lnet_descriptor_setup();
+ rc = lnet_slab_setup();
if (rc != 0)
goto failed;
the_lnet.ln_counters = NULL;
}
lnet_destroy_remote_nets_table();
- lnet_descriptor_cleanup();
+ lnet_slab_cleanup();
return 0;
}
void
lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
{
- LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
+ LASSERT(atomic_read(&pbuf->pb_refcnt) == 0);
LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
}
fail_unlink_ping_me:
LNetMEUnlink(me);
fail_decref_ping_buffer:
- LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
+ LASSERT(atomic_read(&(*ppbuf)->pb_refcnt) == 1);
lnet_ping_buffer_decref(*ppbuf);
*ppbuf = NULL;
fail_free_eq:
LNetInvalidateMDHandle(ping_mdh);
/* NB the MD could be busy; this just starts the unlink */
- while (lnet_ping_buffer_numref(pbuf) > 1) {
+ while (atomic_read(&pbuf->pb_refcnt) > 1) {
CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
cfs_restore_sigs(blocked);
LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
/* Wait for the unlink to complete. */
- while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
+ while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) {
CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
lnet_ping_buffer_decref(the_lnet.ln_push_target);
"Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX);
continue;
}
lnet_shutdown_lndnets(void)
{
struct lnet_net *net;
- struct list_head resend;
+ LIST_HEAD(resend);
struct lnet_msg *msg, *tmp;
- INIT_LIST_HEAD(&resend);
-
/* NB called holding the global mutex */
/* All quiet on the API front */
lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
{
struct lnet_ni *ni;
- struct lnet_net *net_l = NULL;
- struct list_head local_ni_list;
- int rc;
- int ni_count = 0;
- __u32 lnd_type;
+ struct lnet_net *net_l = NULL;
+ LIST_HEAD(local_ni_list);
+ int rc;
+ int ni_count = 0;
+ __u32 lnd_type;
const struct lnet_lnd *lnd;
- int peer_timeout =
+ int peer_timeout =
net->net_tunables.lct_peer_timeout;
- int maxtxcredits =
+ int maxtxcredits =
net->net_tunables.lct_max_tx_credits;
- int peerrtrcredits =
+ int peerrtrcredits =
net->net_tunables.lct_peer_rtr_credits;
- INIT_LIST_HEAD(&local_ni_list);
-
/*
* make sure that this net is unique. If it isn't then
* we are adding interfaces to an already existing network, and
if (rc < 0)
goto failed1;
- LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
- ni->ni_net->net_lnd->lnd_query != NULL);
-
lnet_ni_addref(ni);
list_add_tail(&ni->ni_netlist, &local_ni_list);
int ni_count;
struct lnet_ping_buffer *pbuf;
struct lnet_handle_md ping_mdh;
- struct list_head net_head;
+ LIST_HEAD(net_head);
struct lnet_net *net;
- INIT_LIST_HEAD(&net_head);
-
mutex_lock(&the_lnet.ln_api_mutex);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
struct lnet_net *net;
char *nets;
int rc;
- struct list_head net_head;
-
- INIT_LIST_HEAD(&net_head);
+ LIST_HEAD(net_head);
rc = lnet_parse_ip2nets(&nets, ip2nets);
if (rc < 0)
int
lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
{
- struct lnet_net *net;
- struct list_head net_head;
- int rc;
+ struct lnet_net *net;
+ LIST_HEAD(net_head);
+ int rc;
struct lnet_ioctl_config_lnd_tunables tun;
char *nets = conf->cfg_config_u.cfg_net.net_intf;
- INIT_LIST_HEAD(&net_head);
-
/* Create a net/ni structures for the network string */
rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
if (rc <= 0)