X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fsocklnd%2Fsocklnd.c;h=5d60a0a067656fea6b8ed91f2d16e976a8c4f997;hp=1e3674605bca00502db1d4e3afd4a22c96326a93;hb=1aae733c16161513b07d7f8cc046299e2de5aad3;hpb=5b5538e9e728292f1cb5501228a13b8f4787dd97 diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 1e36746..5d60a0a 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -37,8 +37,8 @@ * Author: Eric Barton */ -#include #include "socklnd.h" +#include static struct lnet_lnd the_ksocklnd; struct ksock_nal_data ksocknal_data; @@ -665,33 +665,20 @@ ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index) static struct ksock_sched * ksocknal_choose_scheduler_locked(unsigned int cpt) { - struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt]; - struct ksock_sched *sched; + struct ksock_sched *sched = ksocknal_data.ksnd_schedulers[cpt]; int i; - if (info->ksi_nthreads == 0) { - cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_nthreads > 0) { + if (sched->kss_nthreads == 0) { + cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) { + if (sched->kss_nthreads > 0) { CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n", - cpt, info->ksi_cpt); - goto select_sched; + cpt, sched->kss_cpt); + return sched; } } return NULL; } -select_sched: - sched = &info->ksi_scheds[0]; - /* - * NB: it's safe so far, but info->ksi_nthreads could be changed - * at runtime when we have dynamic LNet configuration, then we - * need to take care of this. - */ - for (i = 1; i < info->ksi_nthreads; i++) { - if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns) - sched = &info->ksi_scheds[i]; - } - return sched; } @@ -1280,7 +1267,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, * The cpt might have changed if we ended up selecting a non cpt * native scheduler. So use the scheduler's cpt instead. */ - cpt = sched->kss_info->ksi_cpt; + cpt = sched->kss_cpt; sched->kss_nconns++; conn->ksnc_scheduler = sched; @@ -1288,7 +1275,7 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, /* Set the deadline for the outgoing HELLO to drain */ conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued; conn->ksnc_tx_deadline = ktime_get_seconds() + - *ksocknal_tunables.ksnd_timeout; + lnet_get_lnd_timeout(); smp_mb(); /* order with adding to peer_ni's conn list */ list_add(&conn->ksnc_list, &peer_ni->ksnp_conns); @@ -1319,11 +1306,10 @@ ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route, */ CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d" - " incarnation:%lld sched[%d:%d]\n", + " incarnation:%lld sched[%d]\n", libcfs_id2str(peerid), conn->ksnc_proto->pro_version, &conn->ksnc_myipaddr, &conn->ksnc_ipaddr, - conn->ksnc_port, incarnation, cpt, - (int)(sched - &sched->kss_info->ksi_scheds[0])); + conn->ksnc_port, incarnation, cpt); if (active) { /* additional routes after interface exchange? */ @@ -1535,8 +1521,8 @@ ksocknal_peer_failed(struct ksock_peer_ni *peer_ni) read_unlock(&ksocknal_data.ksnd_global_lock); if (notify) - lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, 0, - last_alive); + lnet_notify(peer_ni->ksnp_ni, peer_ni->ksnp_id.nid, + false, false, last_alive); } void @@ -1669,7 +1655,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn) switch (conn->ksnc_rx_state) { case SOCKNAL_RX_LNET_PAYLOAD: last_rcv = conn->ksnc_rx_deadline - - *ksocknal_tunables.ksnd_timeout; + lnet_get_lnd_timeout(); CERROR("Completing partial receive from %s[%d], " "ip %pI4h:%d, with error, wanted: %d, left: %d, " "last alive is %lld secs ago\n", @@ -1677,7 +1663,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn) &conn->ksnc_ipaddr, conn->ksnc_port, conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left, ktime_get_seconds() - last_rcv); - lnet_finalize(conn->ksnc_cookie, -EIO); + if (conn->ksnc_lnet_msg) + conn->ksnc_lnet_msg->msg_health_status = + LNET_MSG_STATUS_REMOTE_ERROR; + lnet_finalize(conn->ksnc_lnet_msg, -EIO); break; case SOCKNAL_RX_LNET_HEADER: if (conn->ksnc_rx_started) @@ -1792,7 +1781,7 @@ ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr) } void -ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive) +ksocknal_notify_gw_down(lnet_nid_t gw_nid) { /* The router is telling me she's been notified of a change in * gateway state.... @@ -1802,17 +1791,14 @@ ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive) .pid = LNET_PID_ANY, }; - CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), - alive ? "up" : "down"); + CDEBUG(D_NET, "gw %s down\n", libcfs_nid2str(gw_nid)); - if (!alive) { - /* If the gateway crashed, close all open connections... */ - ksocknal_close_matching_conns (id, 0); - return; - } + /* If the gateway crashed, close all open connections... */ + ksocknal_close_matching_conns(id, 0); + return; - /* ...otherwise do nothing. We can only establish new connections - * if we have autroutes, and these connect on demand. */ + /* We can only establish new connections + * if we have autroutes, and these connect on demand. */ } void @@ -1843,7 +1829,7 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, time64_t *when) if (bufnob < conn->ksnc_tx_bufnob) { /* something got ACKed */ conn->ksnc_tx_deadline = ktime_get_seconds() + - *ksocknal_tunables.ksnd_timeout; + lnet_get_lnd_timeout(); peer_ni->ksnp_last_alive = now; conn->ksnc_tx_bufnob = bufnob; } @@ -2205,7 +2191,7 @@ ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg) data->ioc_u32[1] = conn->ksnc_port; data->ioc_u32[2] = conn->ksnc_myipaddr; data->ioc_u32[3] = conn->ksnc_type; - data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt; + data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt; data->ioc_u32[5] = rxmem; data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid; ksocknal_conn_decref(conn); @@ -2244,19 +2230,8 @@ ksocknal_free_buffers (void) { LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); - if (ksocknal_data.ksnd_sched_info != NULL) { - struct ksock_sched_info *info; - int i; - - cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds != NULL) { - LIBCFS_FREE(info->ksi_scheds, - info->ksi_nthreads_max * - sizeof(info->ksi_scheds[0])); - } - } - cfs_percpt_free(ksocknal_data.ksnd_sched_info); - } + if (ksocknal_data.ksnd_schedulers != NULL) + cfs_percpt_free(ksocknal_data.ksnd_schedulers); LIBCFS_FREE (ksocknal_data.ksnd_peers, sizeof(struct list_head) * @@ -2285,10 +2260,8 @@ ksocknal_free_buffers (void) static void ksocknal_base_shutdown(void) { - struct ksock_sched_info *info; struct ksock_sched *sched; int i; - int j; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read (&libcfs_kmemory)); @@ -2311,23 +2284,14 @@ ksocknal_base_shutdown(void) LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs)); LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes)); - if (ksocknal_data.ksnd_sched_info != NULL) { - cfs_percpt_for_each(info, i, - ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) - continue; - - for (j = 0; j < info->ksi_nthreads_max; j++) { + if (ksocknal_data.ksnd_schedulers != NULL) { + cfs_percpt_for_each(sched, i, + ksocknal_data.ksnd_schedulers) { - sched = &info->ksi_scheds[j]; - LASSERT(list_empty(&sched->\ - kss_tx_conns)); - LASSERT(list_empty(&sched->\ - kss_rx_conns)); - LASSERT(list_empty(&sched-> \ - kss_zombie_noop_txs)); - LASSERT(sched->kss_nconns == 0); - } + LASSERT(list_empty(&sched->kss_tx_conns)); + LASSERT(list_empty(&sched->kss_rx_conns)); + LASSERT(list_empty(&sched->kss_zombie_noop_txs)); + LASSERT(sched->kss_nconns == 0); } } @@ -2336,17 +2300,10 @@ ksocknal_base_shutdown(void) wake_up_all(&ksocknal_data.ksnd_connd_waitq); wake_up_all(&ksocknal_data.ksnd_reaper_waitq); - if (ksocknal_data.ksnd_sched_info != NULL) { - cfs_percpt_for_each(info, i, - ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) - continue; - - for (j = 0; j < info->ksi_nthreads_max; j++) { - sched = &info->ksi_scheds[j]; + if (ksocknal_data.ksnd_schedulers != NULL) { + cfs_percpt_for_each(sched, i, + ksocknal_data.ksnd_schedulers) wake_up_all(&sched->kss_waitq); - } - } } i = 4; @@ -2379,9 +2336,9 @@ ksocknal_base_shutdown(void) static int ksocknal_base_startup(void) { - struct ksock_sched_info *info; - int rc; - int i; + struct ksock_sched *sched; + int rc; + int i; LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); LASSERT (ksocknal_data.ksnd_nnets == 0); @@ -2421,45 +2378,38 @@ ksocknal_base_startup(void) ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA; try_module_get(THIS_MODULE); - ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(), - sizeof(*info)); - if (ksocknal_data.ksnd_sched_info == NULL) + /* Create a scheduler block per available CPT */ + ksocknal_data.ksnd_schedulers = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(*sched)); + if (ksocknal_data.ksnd_schedulers == NULL) goto failed; - cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - struct ksock_sched *sched; + cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) { int nthrs; + /* + * make sure not to allocate more threads than there are + * cores/CPUs in teh CPT + */ nthrs = cfs_cpt_weight(lnet_cpt_table(), i); if (*ksocknal_tunables.ksnd_nscheds > 0) { nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds); } else { - /* max to half of CPUs, assume another half should be - * reserved for upper layer modules */ + /* + * max to half of CPUs, assume another half should be + * reserved for upper layer modules + */ nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); } - info->ksi_nthreads_max = nthrs; - info->ksi_cpt = i; - - if (nthrs != 0) { - LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i, - info->ksi_nthreads_max * - sizeof(*sched)); - if (info->ksi_scheds == NULL) - goto failed; - - for (; nthrs > 0; nthrs--) { - sched = &info->ksi_scheds[nthrs - 1]; - - sched->kss_info = info; - spin_lock_init(&sched->kss_lock); - INIT_LIST_HEAD(&sched->kss_rx_conns); - INIT_LIST_HEAD(&sched->kss_tx_conns); - INIT_LIST_HEAD(&sched->kss_zombie_noop_txs); - init_waitqueue_head(&sched->kss_waitq); - } - } + sched->kss_nthreads_max = nthrs; + sched->kss_cpt = i; + + spin_lock_init(&sched->kss_lock); + INIT_LIST_HEAD(&sched->kss_rx_conns); + INIT_LIST_HEAD(&sched->kss_tx_conns); + INIT_LIST_HEAD(&sched->kss_zombie_noop_txs); + init_waitqueue_head(&sched->kss_waitq); } ksocknal_data.ksnd_connd_starting = 0; @@ -2618,60 +2568,59 @@ ksocknal_shutdown(struct lnet_ni *ni) } static int -ksocknal_enumerate_interfaces(struct ksock_net *net) +ksocknal_enumerate_interfaces(struct ksock_net *net, char *iname) { - char **names; - int i; - int j; - int rc; - int n; - - n = lnet_ipif_enumerate(&names); - if (n <= 0) { - CERROR("Can't enumerate interfaces: %d\n", n); - return n; - } - - for (i = j = 0; i < n; i++) { - int up; - __u32 ip; - __u32 mask; + struct net_device *dev; - if (!strcmp(names[i], "lo")) /* skip the loopback IF */ - continue; + rtnl_lock(); + for_each_netdev(&init_net, dev) { + /* The iname specified by an user land configuration can + * map to an ifa_label so always treat iname as an ifa_label. + * If iname is NULL then fall back to the net device name. + */ + const char *name = iname ? iname : dev->name; + struct in_device *in_dev; - rc = lnet_ipif_query(names[i], &up, &ip, &mask); - if (rc != 0) { - CWARN("Can't get interface %s info: %d\n", - names[i], rc); - continue; - } + if (strcmp(dev->name, "lo") == 0) /* skip the loopback IF */ + continue; - if (!up) { - CWARN("Ignoring interface %s (down)\n", - names[i]); - continue; - } + if (!(dev_get_flags(dev) & IFF_UP)) { + CWARN("Ignoring interface %s (down)\n", dev->name); + continue; + } - if (j == LNET_INTERFACES_NUM) { - CWARN("Ignoring interface %s (too many interfaces)\n", - names[i]); + in_dev = __in_dev_get_rtnl(dev); + if (!in_dev) { + CWARN("Interface %s has no IPv4 status.\n", dev->name); continue; } - net->ksnn_interfaces[j].ksni_ipaddr = ip; - net->ksnn_interfaces[j].ksni_netmask = mask; - strlcpy(net->ksnn_interfaces[j].ksni_name, - names[i], sizeof(net->ksnn_interfaces[j].ksni_name)); - j++; - } + for_ifa(in_dev) + if (strcmp(name, ifa->ifa_label) == 0) { + int idx = net->ksnn_ninterfaces; + struct ksock_interface *ksi; + + if (idx >= ARRAY_SIZE(net->ksnn_interfaces)) { + rtnl_unlock(); + return -E2BIG; + } - lnet_ipif_free_enumeration(names, n); + ksi = &net->ksnn_interfaces[idx]; + ksi->ksni_ipaddr = ntohl(ifa->ifa_local); + ksi->ksni_netmask = ifa->ifa_mask; + strlcpy(ksi->ksni_name, + name, sizeof(ksi->ksni_name)); + net->ksnn_ninterfaces++; + break; + } + endfor_ifa(in_dev); + } + rtnl_unlock(); - if (j == 0) + if (net->ksnn_ninterfaces == 0) CERROR("Can't find any usable interfaces\n"); - return j; + return net->ksnn_ninterfaces > 0 ? 0 : -ENOENT; } static int @@ -2717,37 +2666,35 @@ ksocknal_search_new_ipif(struct ksock_net *net) } static int -ksocknal_start_schedulers(struct ksock_sched_info *info) +ksocknal_start_schedulers(struct ksock_sched *sched) { int nthrs; int rc = 0; int i; - if (info->ksi_nthreads == 0) { + if (sched->kss_nthreads == 0) { if (*ksocknal_tunables.ksnd_nscheds > 0) { - nthrs = info->ksi_nthreads_max; + nthrs = sched->kss_nthreads_max; } else { nthrs = cfs_cpt_weight(lnet_cpt_table(), - info->ksi_cpt); + sched->kss_cpt); nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs); } - nthrs = min(nthrs, info->ksi_nthreads_max); + nthrs = min(nthrs, sched->kss_nthreads_max); } else { - LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max); + LASSERT(sched->kss_nthreads <= sched->kss_nthreads_max); /* increase two threads if there is new interface */ - nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads); + nthrs = min(2, sched->kss_nthreads_max - sched->kss_nthreads); } for (i = 0; i < nthrs; i++) { long id; char name[20]; - struct ksock_sched *sched; - id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i); - sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; + id = KSOCK_THREAD_ID(sched->kss_cpt, sched->kss_nthreads + i); snprintf(name, sizeof(name), "socknal_sd%02d_%02d", - info->ksi_cpt, (int)(sched - &info->ksi_scheds[0])); + sched->kss_cpt, (int)KSOCK_THREAD_SID(id)); rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id, name); @@ -2755,11 +2702,11 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", - info->ksi_cpt, info->ksi_nthreads + i, rc); + sched->kss_cpt, (int) KSOCK_THREAD_SID(id), rc); break; } - info->ksi_nthreads += i; + sched->kss_nthreads += i; return rc; } @@ -2774,16 +2721,16 @@ ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts) return -EINVAL; for (i = 0; i < ncpts; i++) { - struct ksock_sched_info *info; + struct ksock_sched *sched; int cpt = (cpts == NULL) ? i : cpts[i]; LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); - info = ksocknal_data.ksnd_sched_info[cpt]; + sched = ksocknal_data.ksnd_schedulers[cpt]; - if (!newif && info->ksi_nthreads > 0) + if (!newif && sched->kss_nthreads > 0) continue; - rc = ksocknal_start_schedulers(info); + rc = ksocknal_start_schedulers(sched); if (rc != 0) return rc; } @@ -2794,6 +2741,7 @@ int ksocknal_startup(struct lnet_ni *ni) { struct ksock_net *net; + struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables; int rc; int i; struct net_device *net_dev; @@ -2814,54 +2762,68 @@ ksocknal_startup(struct lnet_ni *ni) spin_lock_init(&net->ksnn_lock); net->ksnn_incarnation = ktime_get_real_ns(); ni->ni_data = net; - if (!ni->ni_net->net_tunables_set) { - ni->ni_net->net_tunables.lct_peer_timeout = + net_tunables = &ni->ni_net->net_tunables; + + if (net_tunables->lct_peer_timeout == -1) + net_tunables->lct_peer_timeout = *ksocknal_tunables.ksnd_peertimeout; - ni->ni_net->net_tunables.lct_max_tx_credits = + + if (net_tunables->lct_max_tx_credits == -1) + net_tunables->lct_max_tx_credits = *ksocknal_tunables.ksnd_credits; - ni->ni_net->net_tunables.lct_peer_tx_credits = + + if (net_tunables->lct_peer_tx_credits == -1) + net_tunables->lct_peer_tx_credits = *ksocknal_tunables.ksnd_peertxcredits; - ni->ni_net->net_tunables.lct_peer_rtr_credits = - *ksocknal_tunables.ksnd_peerrtrcredits; - ni->ni_net->net_tunables_set = true; - } + if (net_tunables->lct_peer_tx_credits > + net_tunables->lct_max_tx_credits) + net_tunables->lct_peer_tx_credits = + net_tunables->lct_max_tx_credits; - if (ni->ni_interfaces[0] == NULL) { - rc = ksocknal_enumerate_interfaces(net); - if (rc <= 0) - goto fail_1; + if (net_tunables->lct_peer_rtr_credits == -1) + net_tunables->lct_peer_rtr_credits = + *ksocknal_tunables.ksnd_peerrtrcredits; - net->ksnn_ninterfaces = 1; + if (!ni->ni_interfaces[0]) { + rc = ksocknal_enumerate_interfaces(net, NULL); + if (rc < 0) + goto fail_1; } else { + /* Before Multi-Rail ksocklnd would manage + * multiple interfaces with its own tcp bonding. + * If we encounter an old configuration using + * this tcp bonding approach then we need to + * handle more than one ni_interfaces. + * + * In Multi-Rail configuration only ONE ni_interface + * should exist. Each IP alias should be mapped to + * each 'struct net_ni'. + */ for (i = 0; i < LNET_INTERFACES_NUM; i++) { - int up; + int j; - if (ni->ni_interfaces[i] == NULL) + if (!ni->ni_interfaces[i]) break; - rc = lnet_ipif_query(ni->ni_interfaces[i], &up, - &net->ksnn_interfaces[i].ksni_ipaddr, - &net->ksnn_interfaces[i].ksni_netmask); + for (j = 0; j < net->ksnn_ninterfaces; j++) { + struct ksock_interface *ksi; - if (rc != 0) { - CERROR("Can't get interface %s info: %d\n", - ni->ni_interfaces[i], rc); - goto fail_1; - } + ksi = &net->ksnn_interfaces[j]; - if (!up) { - CERROR("Interface %s is down\n", - ni->ni_interfaces[i]); - goto fail_1; + if (strcmp(ni->ni_interfaces[i], + ksi->ksni_name) == 0) { + CERROR("found duplicate %s\n", + ksi->ksni_name); + rc = -EEXIST; + goto fail_1; + } } - strlcpy(net->ksnn_interfaces[i].ksni_name, - ni->ni_interfaces[i], - sizeof(net->ksnn_interfaces[i].ksni_name)); - + rc = ksocknal_enumerate_interfaces(net, ni->ni_interfaces[i]); + if (rc < 0) + goto fail_1; } - net->ksnn_ninterfaces = i; } net_dev = dev_get_by_name(&init_net, @@ -2917,7 +2879,7 @@ static int __init ksocklnd_init(void) the_ksocklnd.lnd_ctl = ksocknal_ctl; the_ksocklnd.lnd_send = ksocknal_send; the_ksocklnd.lnd_recv = ksocknal_recv; - the_ksocklnd.lnd_notify = ksocknal_notify; + the_ksocklnd.lnd_notify_peer_down = ksocknal_notify_gw_down; the_ksocklnd.lnd_query = ksocknal_query; the_ksocklnd.lnd_accept = ksocknal_accept;