* Author: Eric Barton <eric@bartonsoftware.com>
*/
-#include <linux/pci.h>
#include "socklnd.h"
+#include <linux/inetdevice.h>
static struct lnet_lnd the_ksocklnd;
struct ksock_nal_data ksocknal_data;
static struct ksock_sched *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
- struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- struct ksock_sched *sched;
+ struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt];
int i;
- if (info->ksi_nthreads == 0) {
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_nthreads > 0) {
+ if (sched->kss_nthreads == 0) {
+ cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
+ if (sched->kss_nthreads > 0) {
CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
- cpt, info->ksi_cpt);
- goto select_sched;
+ cpt, sched->kss_cpt);
+ return sched;
}
}
return NULL;
}
-select_sched:
- sched = &info->ksi_scheds[0];
- /*
- * NB: it's safe so far, but info->ksi_nthreads could be changed
- * at runtime when we have dynamic LNet configuration, then we
- * need to take care of this.
- */
- for (i = 1; i < info->ksi_nthreads; i++) {
- if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
- sched = &info->ksi_scheds[i];
- }
-
return sched;
}
* The cpt might have changed if we ended up selecting a non cpt
* native scheduler. So use the scheduler's cpt instead.
*/
- cpt = sched->kss_info->ksi_cpt;
+ cpt = sched->kss_cpt;
sched->kss_nconns++;
conn->ksnc_scheduler = sched;
*/
CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
- " incarnation:%lld sched[%d:%d]\n",
+ " incarnation:%lld sched[%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
- conn->ksnc_port, incarnation, cpt,
- (int)(sched - &sched->kss_info->ksi_scheds[0]));
+ conn->ksnc_port, incarnation, cpt);
if (active) {
/* additional routes after interface exchange? */
&conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
ktime_get_seconds() - last_rcv);
- lnet_finalize(conn->ksnc_cookie, -EIO);
+ if (conn->ksnc_lnet_msg)
+ conn->ksnc_lnet_msg->msg_health_status =
+ LNET_MSG_STATUS_REMOTE_ERROR;
+ lnet_finalize(conn->ksnc_lnet_msg, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
data->ioc_u32[1] = conn->ksnc_port;
data->ioc_u32[2] = conn->ksnc_myipaddr;
data->ioc_u32[3] = conn->ksnc_type;
- data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
+ data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
data->ioc_u32[5] = rxmem;
data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
ksocknal_conn_decref(conn);
{
LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
- if (ksocknal_data.ksnd_sched_info != NULL) {
- struct ksock_sched_info *info;
- int i;
-
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds != NULL) {
- LIBCFS_FREE(info->ksi_scheds,
- info->ksi_nthreads_max *
- sizeof(info->ksi_scheds[0]));
- }
- }
- cfs_percpt_free(ksocknal_data.ksnd_sched_info);
- }
+ if (ksocknal_data.ksnd_schedulers != NULL)
+ cfs_percpt_free(ksocknal_data.ksnd_schedulers);
LIBCFS_FREE (ksocknal_data.ksnd_peers,
sizeof(struct list_head) *
static void
ksocknal_base_shutdown(void)
{
- struct ksock_sched_info *info;
struct ksock_sched *sched;
int i;
- int j;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read (&libcfs_kmemory));
LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
- if (ksocknal_data.ksnd_sched_info != NULL) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
+ if (ksocknal_data.ksnd_schedulers != NULL) {
+ cfs_percpt_for_each(sched, i,
+ ksocknal_data.ksnd_schedulers) {
- sched = &info->ksi_scheds[j];
- LASSERT(list_empty(&sched->\
- kss_tx_conns));
- LASSERT(list_empty(&sched->\
- kss_rx_conns));
- LASSERT(list_empty(&sched-> \
- kss_zombie_noop_txs));
- LASSERT(sched->kss_nconns == 0);
- }
+ LASSERT(list_empty(&sched->kss_tx_conns));
+ LASSERT(list_empty(&sched->kss_rx_conns));
+ LASSERT(list_empty(&sched->kss_zombie_noop_txs));
+ LASSERT(sched->kss_nconns == 0);
}
}
wake_up_all(&ksocknal_data.ksnd_connd_waitq);
wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
- if (ksocknal_data.ksnd_sched_info != NULL) {
- cfs_percpt_for_each(info, i,
- ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
- continue;
-
- for (j = 0; j < info->ksi_nthreads_max; j++) {
- sched = &info->ksi_scheds[j];
+ if (ksocknal_data.ksnd_schedulers != NULL) {
+ cfs_percpt_for_each(sched, i,
+ ksocknal_data.ksnd_schedulers)
wake_up_all(&sched->kss_waitq);
- }
- }
}
i = 4;
static int
ksocknal_base_startup(void)
{
- struct ksock_sched_info *info;
- int rc;
- int i;
+ struct ksock_sched *sched;
+ int rc;
+ int i;
LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
LASSERT (ksocknal_data.ksnd_nnets == 0);
ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
try_module_get(THIS_MODULE);
- ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(*info));
- if (ksocknal_data.ksnd_sched_info == NULL)
+ /* Create a scheduler block per available CPT */
+ ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*sched));
+ if (ksocknal_data.ksnd_schedulers == NULL)
goto failed;
- cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- struct ksock_sched *sched;
+ cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
int nthrs;
+ /*
+ * make sure not to allocate more threads than there are
+ * cores/CPUs in teh CPT
+ */
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
} else {
- /* max to half of CPUs, assume another half should be
- * reserved for upper layer modules */
+ /*
+ * max to half of CPUs, assume another half should be
+ * reserved for upper layer modules
+ */
nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
}
- info->ksi_nthreads_max = nthrs;
- info->ksi_cpt = i;
-
- if (nthrs != 0) {
- LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
- info->ksi_nthreads_max *
- sizeof(*sched));
- if (info->ksi_scheds == NULL)
- goto failed;
-
- for (; nthrs > 0; nthrs--) {
- sched = &info->ksi_scheds[nthrs - 1];
-
- sched->kss_info = info;
- spin_lock_init(&sched->kss_lock);
- INIT_LIST_HEAD(&sched->kss_rx_conns);
- INIT_LIST_HEAD(&sched->kss_tx_conns);
- INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
- init_waitqueue_head(&sched->kss_waitq);
- }
- }
+ sched->kss_nthreads_max = nthrs;
+ sched->kss_cpt = i;
+
+ spin_lock_init(&sched->kss_lock);
+ INIT_LIST_HEAD(&sched->kss_rx_conns);
+ INIT_LIST_HEAD(&sched->kss_tx_conns);
+ INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+ init_waitqueue_head(&sched->kss_waitq);
}
ksocknal_data.ksnd_connd_starting = 0;
}
static int
-ksocknal_enumerate_interfaces(struct ksock_net *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net, char *iname)
{
- char **names;
- int i;
- int j;
- int rc;
- int n;
-
- n = lnet_ipif_enumerate(&names);
- if (n <= 0) {
- CERROR("Can't enumerate interfaces: %d\n", n);
- return n;
- }
-
- for (i = j = 0; i < n; i++) {
- int up;
- __u32 ip;
- __u32 mask;
+ struct net_device *dev;
- if (!strcmp(names[i], "lo")) /* skip the loopback IF */
- continue;
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ /* The iname specified by an user land configuration can
+ * map to an ifa_label so always treat iname as an ifa_label.
+ * If iname is NULL then fall back to the net device name.
+ */
+ const char *name = iname ? iname : dev->name;
+ struct in_device *in_dev;
- rc = lnet_ipif_query(names[i], &up, &ip, &mask);
- if (rc != 0) {
- CWARN("Can't get interface %s info: %d\n",
- names[i], rc);
- continue;
- }
+ if (strcmp(dev->name, "lo") == 0) /* skip the loopback IF */
+ continue;
- if (!up) {
- CWARN("Ignoring interface %s (down)\n",
- names[i]);
- continue;
- }
+ if (!(dev_get_flags(dev) & IFF_UP)) {
+ CWARN("Ignoring interface %s (down)\n", dev->name);
+ continue;
+ }
- if (j == LNET_INTERFACES_NUM) {
- CWARN("Ignoring interface %s (too many interfaces)\n",
- names[i]);
+ in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev) {
+ CWARN("Interface %s has no IPv4 status.\n", dev->name);
continue;
}
- net->ksnn_interfaces[j].ksni_ipaddr = ip;
- net->ksnn_interfaces[j].ksni_netmask = mask;
- strlcpy(net->ksnn_interfaces[j].ksni_name,
- names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
- j++;
- }
+ for_ifa(in_dev)
+ if (strcmp(name, ifa->ifa_label) == 0) {
+ int idx = net->ksnn_ninterfaces;
+ struct ksock_interface *ksi;
+
+ if (idx >= ARRAY_SIZE(net->ksnn_interfaces)) {
+ rtnl_unlock();
+ return -E2BIG;
+ }
- lnet_ipif_free_enumeration(names, n);
+ ksi = &net->ksnn_interfaces[idx];
+ ksi->ksni_ipaddr = ntohl(ifa->ifa_local);
+ ksi->ksni_netmask = ifa->ifa_mask;
+ strlcpy(ksi->ksni_name,
+ name, sizeof(ksi->ksni_name));
+ net->ksnn_ninterfaces++;
+ break;
+ }
+ endfor_ifa(in_dev);
+ }
+ rtnl_unlock();
- if (j == 0)
+ if (net->ksnn_ninterfaces == 0)
CERROR("Can't find any usable interfaces\n");
- return j;
+ return net->ksnn_ninterfaces > 0 ? 0 : -ENOENT;
}
static int
}
static int
-ksocknal_start_schedulers(struct ksock_sched_info *info)
+ksocknal_start_schedulers(struct ksock_sched *sched)
{
int nthrs;
int rc = 0;
int i;
- if (info->ksi_nthreads == 0) {
+ if (sched->kss_nthreads == 0) {
if (*ksocknal_tunables.ksnd_nscheds > 0) {
- nthrs = info->ksi_nthreads_max;
+ nthrs = sched->kss_nthreads_max;
} else {
nthrs = cfs_cpt_weight(lnet_cpt_table(),
- info->ksi_cpt);
+ sched->kss_cpt);
nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
}
- nthrs = min(nthrs, info->ksi_nthreads_max);
+ nthrs = min(nthrs, sched->kss_nthreads_max);
} else {
- LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
+ LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max);
/* increase two threads if there is new interface */
- nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
+ nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads);
}
for (i = 0; i < nthrs; i++) {
long id;
char name[20];
- struct ksock_sched *sched;
- id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
- sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+ id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i);
snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
- info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
+ sched->kss_cpt, (int)KSOCK_THREAD_SID(id));
rc = ksocknal_thread_start(ksocknal_scheduler,
(void *)id, name);
continue;
CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
- info->ksi_cpt, info->ksi_nthreads + i, rc);
+ sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc);
break;
}
- info->ksi_nthreads += i;
+ sched->kss_nthreads += i;
return rc;
}
return -EINVAL;
for (i = 0; i < ncpts; i++) {
- struct ksock_sched_info *info;
+ struct ksock_sched *sched;
int cpt = (cpts == NULL) ? i : cpts[i];
LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
- info = ksocknal_data.ksnd_sched_info[cpt];
+ sched = ksocknal_data.ksnd_schedulers[cpt];
- if (!newif && info->ksi_nthreads > 0)
+ if (!newif && sched->kss_nthreads > 0)
continue;
- rc = ksocknal_start_schedulers(info);
+ rc = ksocknal_start_schedulers(sched);
if (rc != 0)
return rc;
}
ksocknal_startup(struct lnet_ni *ni)
{
struct ksock_net *net;
+ struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
int rc;
int i;
struct net_device *net_dev;
spin_lock_init(&net->ksnn_lock);
net->ksnn_incarnation = ktime_get_real_ns();
ni->ni_data = net;
- if (!ni->ni_net->net_tunables_set) {
- ni->ni_net->net_tunables.lct_peer_timeout =
+ net_tunables = &ni->ni_net->net_tunables;
+
+ if (net_tunables->lct_peer_timeout == -1)
+ net_tunables->lct_peer_timeout =
*ksocknal_tunables.ksnd_peertimeout;
- ni->ni_net->net_tunables.lct_max_tx_credits =
+
+ if (net_tunables->lct_max_tx_credits == -1)
+ net_tunables->lct_max_tx_credits =
*ksocknal_tunables.ksnd_credits;
- ni->ni_net->net_tunables.lct_peer_tx_credits =
+
+ if (net_tunables->lct_peer_tx_credits == -1)
+ net_tunables->lct_peer_tx_credits =
*ksocknal_tunables.ksnd_peertxcredits;
- ni->ni_net->net_tunables.lct_peer_rtr_credits =
- *ksocknal_tunables.ksnd_peerrtrcredits;
- ni->ni_net->net_tunables_set = true;
- }
+ if (net_tunables->lct_peer_tx_credits >
+ net_tunables->lct_max_tx_credits)
+ net_tunables->lct_peer_tx_credits =
+ net_tunables->lct_max_tx_credits;
- if (ni->ni_interfaces[0] == NULL) {
- rc = ksocknal_enumerate_interfaces(net);
- if (rc <= 0)
- goto fail_1;
+ if (net_tunables->lct_peer_rtr_credits == -1)
+ net_tunables->lct_peer_rtr_credits =
+ *ksocknal_tunables.ksnd_peerrtrcredits;
- net->ksnn_ninterfaces = 1;
+ if (!ni->ni_interfaces[0]) {
+ rc = ksocknal_enumerate_interfaces(net, NULL);
+ if (rc < 0)
+ goto fail_1;
} else {
+ /* Before Multi-Rail ksocklnd would manage
+ * multiple interfaces with its own tcp bonding.
+ * If we encounter an old configuration using
+ * this tcp bonding approach then we need to
+ * handle more than one ni_interfaces.
+ *
+ * In Multi-Rail configuration only ONE ni_interface
+ * should exist. Each IP alias should be mapped to
+ * each 'struct net_ni'.
+ */
for (i = 0; i < LNET_INTERFACES_NUM; i++) {
- int up;
+ int j;
- if (ni->ni_interfaces[i] == NULL)
+ if (!ni->ni_interfaces[i])
break;
- rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
- &net->ksnn_interfaces[i].ksni_ipaddr,
- &net->ksnn_interfaces[i].ksni_netmask);
+ for (j = 0; j < net->ksnn_ninterfaces; j++) {
+ struct ksock_interface *ksi;
- if (rc != 0) {
- CERROR("Can't get interface %s info: %d\n",
- ni->ni_interfaces[i], rc);
- goto fail_1;
- }
+ ksi = &net->ksnn_interfaces[j];
- if (!up) {
- CERROR("Interface %s is down\n",
- ni->ni_interfaces[i]);
- goto fail_1;
+ if (strcmp(ni->ni_interfaces[i],
+ ksi->ksni_name) == 0) {
+ CERROR("found duplicate %s\n",
+ ksi->ksni_name);
+ rc = -EEXIST;
+ goto fail_1;
+ }
}
- strlcpy(net->ksnn_interfaces[i].ksni_name,
- ni->ni_interfaces[i],
- sizeof(net->ksnn_interfaces[i].ksni_name));
-
+ rc = ksocknal_enumerate_interfaces(net, ni->ni_interfaces[i]);
+ if (rc < 0)
+ goto fail_1;
}
- net->ksnn_ninterfaces = i;
}
net_dev = dev_get_by_name(&init_net,