#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
+#include <lnet/lib-dlc.h>
#ifdef __KERNEL__
#include <linux/log2.h>
#endif
lnet_init_locks(void)
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
- cfs_waitq_init(&the_lnet.ln_eq_waitq);
+ init_waitqueue_head(&the_lnet.ln_eq_waitq);
mutex_init(&the_lnet.ln_lnd_mutex);
mutex_init(&the_lnet.ln_api_mutex);
}
char *
lnet_get_networks (void)
{
- static char default_networks[256];
- char *networks = getenv ("LNET_NETWORKS");
- char *ip2nets = getenv ("LNET_IP2NETS");
- char *str;
- char *sep;
- int len;
- int nob;
- int rc;
- cfs_list_t *tmp;
-
-#ifdef NOT_YET
- if (networks != NULL && ip2nets != NULL) {
- LCONSOLE_ERROR_MSG(0x103, "Please set EITHER 'LNET_NETWORKS' or"
- " 'LNET_IP2NETS' but not both at once\n");
- return NULL;
- }
+ static char default_networks[256];
+ char *networks = getenv("LNET_NETWORKS");
+ char *str;
+ char *sep;
+ int len;
+ int nob;
+ struct list_head *tmp;
- if (ip2nets != NULL) {
- rc = lnet_parse_ip2nets(&networks, ip2nets);
- return (rc == 0) ? networks : NULL;
- }
-#else
- SET_BUT_UNUSED(ip2nets);
- SET_BUT_UNUSED(rc);
-#endif
- if (networks != NULL)
- return networks;
+ if (networks != NULL)
+ return networks;
/* In userland, the default 'networks=' is the list of known net types */
-
len = sizeof(default_networks);
str = default_networks;
*str = 0;
sep = "";
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
-
- nob = snprintf(str, len, "%s%s", sep,
- libcfs_lnd2str(lnd->lnd_type));
- len -= nob;
- if (len < 0) {
- /* overflowed the string; leave it where it was */
- *str = 0;
- break;
- }
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
- str += nob;
- sep = ",";
- }
+ nob = snprintf(str, len, "%s%s", sep,
+ libcfs_lnd2str(lnd->lnd_type));
+ if (nob >= len) {
+ /* overflowed the string; leave it where it was */
+ *str = 0;
+ break;
+ }
+ len -= nob;
+ str += nob;
+ sep = ",";
+ }
- return default_networks;
+ return default_networks;
}
# ifndef HAVE_LIBPTHREAD
static int
lnet_create_remote_nets_table(void)
{
- int i;
- cfs_list_t *hash;
+ int i;
+ struct list_head *hash;
LASSERT(the_lnet.ln_remote_nets_hash == NULL);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
}
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&hash[i]);
+ INIT_LIST_HEAD(&hash[i]);
the_lnet.ln_remote_nets_hash = hash;
return 0;
}
static void
lnet_destroy_remote_nets_table(void)
{
- int i;
- cfs_list_t *hash;
+ int i;
if (the_lnet.ln_remote_nets_hash == NULL)
return;
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
- LASSERT(cfs_list_empty(&the_lnet.ln_remote_nets_hash[i]));
+ LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
- LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ LNET_REMOTE_NETS_HASH_SIZE *
+ sizeof(the_lnet.ln_remote_nets_hash[0]));
the_lnet.ln_remote_nets_hash = NULL;
}
lnd_t *
lnet_find_lnd_by_type (int type)
{
- lnd_t *lnd;
- cfs_list_t *tmp;
+ lnd_t *lnd;
+ struct list_head *tmp;
- /* holding lnd mutex */
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
+ /* holding lnd mutex */
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd = list_entry(tmp, lnd_t, lnd_list);
- if ((int)lnd->lnd_type == type)
- return lnd;
- }
-
- return NULL;
+ if ((int)lnd->lnd_type == type)
+ return lnd;
+ }
+ return NULL;
}
void
lnet_register_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
- cfs_list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
+ list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
+ lnd->lnd_refcount = 0;
- CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
+ CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
EXPORT_SYMBOL(lnet_register_lnd);
void
lnet_unregister_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT (lnd->lnd_refcount == 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
+ LASSERT(lnd->lnd_refcount == 0);
- cfs_list_del (&lnd->lnd_list);
- CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
+ list_del(&lnd->lnd_list);
+ CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
EXPORT_SYMBOL(lnet_unregister_lnd);
counters->send_count += ctr->send_count;
counters->recv_count += ctr->recv_count;
counters->route_count += ctr->route_count;
- counters->drop_length += ctr->drop_length;
+ counters->drop_count += ctr->drop_count;
counters->send_length += ctr->send_length;
counters->recv_length += ctr->recv_length;
counters->route_length += ctr->route_length;
#ifdef LNET_USE_LIB_FREELIST
int
-lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
+lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
{
char *space;
if (space == NULL)
return (-ENOMEM);
- CFS_INIT_LIST_HEAD (&fl->fl_list);
- fl->fl_objs = space;
- fl->fl_nobjs = n;
- fl->fl_objsize = size;
+ INIT_LIST_HEAD(&fl->fl_list);
+ fl->fl_objs = space;
+ fl->fl_nobjs = n;
+ fl->fl_objsize = size;
- do
- {
- memset (space, 0, size);
- cfs_list_add ((cfs_list_t *)space, &fl->fl_list);
- space += size;
- } while (--n != 0);
+ do {
+ list_add((struct list_head *)space, &fl->fl_list);
+ space += size;
+ } while (--n != 0);
- return (0);
+ return 0;
}
void
-lnet_freelist_fini (lnet_freelist_t *fl)
+lnet_freelist_fini(lnet_freelist_t *fl)
{
- cfs_list_t *el;
- int count;
+ struct list_head *el;
+ int count;
if (fl->fl_nobjs == 0)
return;
#endif /* LNET_USE_LIB_FREELIST */
-__u64
-lnet_create_interface_cookie (void)
+__u64 lnet_create_interface_cookie (void)
{
- /* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot. Initialisation time,
- * even if it's only implemented to millisecond resolution is probably
- * easily good enough. */
- struct timeval tv;
- __u64 cookie;
+ /* NB the interface cookie in wire handles guards against delayed
+ * replies and ACKs appearing valid after reboot. Initialisation time,
+ * even if it's only implemented to millisecond resolution is probably
+ * easily good enough. */
+ struct timeval tv;
+ __u64 cookie;
#ifndef __KERNEL__
- int rc = gettimeofday (&tv, NULL);
- LASSERT (rc == 0);
+ int rc = gettimeofday (&tv, NULL);
+ LASSERT (rc == 0);
#else
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
#endif
- cookie = tv.tv_sec;
- cookie *= 1000000;
- cookie += tv.tv_usec;
- return cookie;
+ cookie = tv.tv_sec;
+ cookie *= 1000000;
+ cookie += tv.tv_usec;
+ return cookie;
}
static char *
if (rec->rec_type == 0) /* not set yet, it's uninitialized */
return;
- while (!cfs_list_empty(&rec->rec_active)) {
- cfs_list_t *e = rec->rec_active.next;
+ while (!list_empty(&rec->rec_active)) {
+ struct list_head *e = rec->rec_active.next;
- cfs_list_del_init(e);
+ list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(cfs_list_entry(e, lnet_eq_t, eq_list));
+ lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(cfs_list_entry(e, lnet_libmd_t, md_list));
+ lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
LASSERT(rec->rec_type == 0);
rec->rec_type = type;
- CFS_INIT_LIST_HEAD(&rec->rec_active);
+ INIT_LIST_HEAD(&rec->rec_active);
#ifdef LNET_USE_LIB_FREELIST
memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist));
}
for (i = 0; i < LNET_LH_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
+ INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
return 0;
lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
{
/* ALWAYS called with lnet_res_lock held */
- cfs_list_t *head;
+ struct list_head *head;
lnet_libhandle_t *lh;
unsigned int hash;
hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
- cfs_list_for_each_entry(lh, head, lh_hash_chain) {
+ list_for_each_entry(lh, head, lh_hash_chain) {
if (lh->lh_cookie == cookie)
return lh;
}
hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
- cfs_list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
+ list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
#ifndef __KERNEL__
struct lnet_res_container **recs;
int rc = 0;
+ if (requested_pid == LNET_PID_ANY) {
+ /* Don't instantiate LNET just for me */
+ return -ENETDOWN;
+ }
+
LASSERT (the_lnet.ln_refcount == 0);
the_lnet.ln_routing = 0;
the_lnet.ln_pid = requested_pid;
#else
if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */
- LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
+ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
- if (cfs_curproc_uid())/* Only root can run user-space server */
- return -EPERM;
- the_lnet.ln_pid = requested_pid;
+ if (current_uid() != 0) /* Only root can run user-space server */
+ return -EPERM;
+ the_lnet.ln_pid = requested_pid;
} else {/* client case (liblustre) */
}
#endif
- CFS_INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_routers);
+ INIT_LIST_HEAD(&the_lnet.ln_test_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_nis);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_routers);
rc = lnet_create_remote_nets_table();
if (rc != 0)
int
lnet_unprepare (void)
{
- /* NB no LNET_LOCK since this is the last reference. All LND instances
- * have shut down already, so it is safe to unlink and free all
- * descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count) */
+ /* NB no LNET_LOCK since this is the last reference. All LND instances
+ * have shut down already, so it is safe to unlink and free all
+ * descriptors, even those that appear committed to a network op (eg MD
+ * with non-zero pending count) */
lnet_fail_nid(LNET_NID_ANY, 0);
LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_test_peers));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_cpt));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
+ LASSERT(list_empty(&the_lnet.ln_test_peers));
+ LASSERT(list_empty(&the_lnet.ln_nis));
+ LASSERT(list_empty(&the_lnet.ln_nis_cpt));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
lnet_portals_destroy();
lnet_msg_containers_destroy();
lnet_peer_tables_destroy();
- lnet_rtrpools_free();
+ lnet_rtrpools_free(0);
if (the_lnet.ln_counters != NULL) {
cfs_percpt_free(the_lnet.ln_counters);
lnet_ni_t *
lnet_net2ni_locked(__u32 net, int cpt)
{
- cfs_list_t *tmp;
- lnet_ni_t *ni;
+ struct list_head *tmp;
+ lnet_ni_t *ni;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net) {
lnet_ni_addref_locked(ni, cpt);
if (number == 1)
return 0;
- val = cfs_hash_long(key, LNET_CPT_BITS);
+ val = hash_long(key, LNET_CPT_BITS);
/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
if (val < number)
return val;
return 0; /* the only one */
/* take lnet_net_lock(any) would be OK */
- if (!cfs_list_empty(&the_lnet.ln_nis_cpt)) {
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
+ if (!list_empty(&the_lnet.ln_nis_cpt)) {
+ list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
continue;
if (LNET_CPT_NUMBER == 1)
return 0; /* the only one */
- if (cfs_list_empty(&the_lnet.ln_nis_cpt))
+ if (list_empty(&the_lnet.ln_nis_cpt))
return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
cpt = lnet_net_lock_current();
lnet_ni_t *
lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_nid == nid) {
lnet_ni_addref_locked(ni, cpt);
lnet_count_acceptor_nis (void)
{
/* Return the # of NIs that need the acceptor. */
- int count = 0;
+ int count = 0;
#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
- cfs_list_t *tmp;
- struct lnet_ni *ni;
- int cpt;
+ struct list_head *tmp;
+ struct lnet_ni *ni;
+ int cpt;
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_lnd->lnd_accept != NULL)
count++;
return count;
}
-static int
-lnet_ni_tq_credits(lnet_ni_t *ni)
+static lnet_ping_info_t *
+lnet_ping_info_create(int num_ni)
{
- int credits;
+ lnet_ping_info_t *ping_info;
+ unsigned int infosz;
- LASSERT(ni->ni_ncpts >= 1);
-
- if (ni->ni_ncpts == 1)
- return ni->ni_maxtxcredits;
+ infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]);
+ LIBCFS_ALLOC(ping_info, infosz);
+ if (ping_info == NULL) {
+ CERROR("Can't allocate ping info[%d]\n", num_ni);
+ return NULL;
+ }
- credits = ni->ni_maxtxcredits / ni->ni_ncpts;
- credits = max(credits, 8 * ni->ni_peertxcredits);
- credits = min(credits, ni->ni_maxtxcredits);
+ ping_info->pi_nnis = num_ni;
+ ping_info->pi_pid = the_lnet.ln_pid;
+ ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
+ ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
- return credits;
+ return ping_info;
}
-void
-lnet_shutdown_lndnis (void)
+static inline int
+lnet_get_ni_count(void)
{
- int i;
- int islo;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
+ int count = 0;
- /* NB called holding the global mutex */
+ lnet_net_lock(0);
- /* All quiet on the API front */
- LASSERT(!the_lnet.ln_shutdown);
- LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
+ count++;
+
+ lnet_net_unlock(0);
+
+ return count;
+}
+
+static inline void
+lnet_ping_info_free(lnet_ping_info_t *pinfo)
+{
+ LIBCFS_FREE(pinfo,
+ offsetof(lnet_ping_info_t,
+ pi_ni[pinfo->pi_nnis]));
+}
+
+static void
+lnet_ping_info_destroy(void)
+{
+ struct lnet_ni *ni;
lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 1; /* flag shutdown */
- /* Unlink NIs from the global table */
- while (!cfs_list_empty(&the_lnet.ln_nis)) {
- ni = cfs_list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
- /* move it to zombie list and nobody can find it anymore */
- cfs_list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
- lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ lnet_ni_lock(ni);
+ ni->ni_status = NULL;
+ lnet_ni_unlock(ni);
+ }
- if (!cfs_list_empty(&ni->ni_cptlist)) {
- cfs_list_del_init(&ni->ni_cptlist);
- lnet_ni_decref_locked(ni, 0);
+ lnet_ping_info_free(the_lnet.ln_ping_info);
+ the_lnet.ln_ping_info = NULL;
+
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static void
+lnet_ping_event_handler(lnet_event_t *event)
+{
+ lnet_ping_info_t *pinfo = event->md.user_ptr;
+
+ if (event->unlinked)
+ pinfo->pi_features = LNET_PING_FEAT_INVAL;
+}
+
+static int
+lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle,
+ int ni_count, bool set_eq)
+{
+ lnet_handle_me_t me_handle;
+ lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
+ lnet_md_t md = {0};
+ int rc, rc2;
+
+ if (set_eq) {
+ rc = LNetEQAlloc(0, lnet_ping_event_handler,
+ &the_lnet.ln_ping_target_eq);
+ if (rc != 0) {
+ CERROR("Can't allocate ping EQ: %d\n", rc);
+ return rc;
}
}
- /* Drop the cached eqwait NI. */
- if (the_lnet.ln_eq_waitni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
- the_lnet.ln_eq_waitni = NULL;
+ *ppinfo = lnet_ping_info_create(ni_count);
+ if (*ppinfo == NULL) {
+ rc = -ENOMEM;
+ goto failed_0;
}
- /* Drop the cached loopback NI. */
- if (the_lnet.ln_loni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_loni, 0);
- the_lnet.ln_loni = NULL;
+ rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
+ LNET_UNLINK, LNET_INS_AFTER,
+ &me_handle);
+ if (rc != 0) {
+ CERROR("Can't create ping ME: %d\n", rc);
+ goto failed_1;
}
- lnet_net_unlock(LNET_LOCK_EX);
+ /* initialize md content */
+ md.start = *ppinfo;
+ md.length = offsetof(lnet_ping_info_t,
+ pi_ni[(*ppinfo)->pi_nnis]);
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.user_ptr = NULL;
+ md.eq_handle = the_lnet.ln_ping_target_eq;
+ md.user_ptr = *ppinfo;
+
+ rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
+ if (rc != 0) {
+ CERROR("Can't attach ping MD: %d\n", rc);
+ goto failed_2;
+ }
+
+ return 0;
+
+failed_2:
+ rc2 = LNetMEUnlink(me_handle);
+ LASSERT(rc2 == 0);
+failed_1:
+ lnet_ping_info_free(*ppinfo);
+ *ppinfo = NULL;
+failed_0:
+ if (set_eq)
+ LNetEQFree(the_lnet.ln_ping_target_eq);
+ return rc;
+}
- /* Clear lazy portals and drop delayed messages which hold refs
- * on their lnet_msg_t::msg_rxpeer */
- for (i = 0; i < the_lnet.ln_nportals; i++)
- LNetClearLazyPortal(i);
+static void
+lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle)
+{
+ sigset_t blocked = cfs_block_allsigs();
+
+ LNetMDUnlink(*md_handle);
+ LNetInvalidateHandle(md_handle);
+
+ /* NB md could be busy; this just starts the unlink */
+ while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
+ CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
+ cfs_pause(cfs_time_seconds(1));
+ }
+
+ cfs_restore_sigs(blocked);
+}
- /* Clear the peer table and wait for all peers to go (they hold refs on
- * their NIs) */
- lnet_peer_tables_cleanup();
+static void
+lnet_ping_info_install_locked(lnet_ping_info_t *ping_info)
+{
+ int i;
+ lnet_ni_t *ni;
+ lnet_ni_status_t *ns;
+
+ i = 0;
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ LASSERT(i < ping_info->pi_nnis);
+ ns = &ping_info->pi_ni[i];
+
+ ns->ns_nid = ni->ni_nid;
+
+ lnet_ni_lock(ni);
+ ns->ns_status = (ni->ni_status != NULL) ?
+ ni->ni_status->ns_status : LNET_NI_STATUS_UP;
+ ni->ni_status = ns;
+ lnet_ni_unlock(ni);
+
+ i++;
+ }
+}
+
+static void
+lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle)
+{
+ lnet_ping_info_t *old_pinfo = NULL;
+ lnet_handle_md_t old_md;
+
+ /* switch the NIs to point to the new ping info created */
lnet_net_lock(LNET_LOCK_EX);
+
+ if (!the_lnet.ln_routing)
+ pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+ lnet_ping_info_install_locked(pinfo);
+
+ if (the_lnet.ln_ping_info != NULL) {
+ old_pinfo = the_lnet.ln_ping_info;
+ old_md = the_lnet.ln_ping_target_md;
+ }
+ the_lnet.ln_ping_target_md = md_handle;
+ the_lnet.ln_ping_info = pinfo;
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (old_pinfo != NULL) {
+ /* unlink the old ping info */
+ lnet_ping_md_unlink(old_pinfo, &old_md);
+ lnet_ping_info_free(old_pinfo);
+ }
+}
+
+static void
+lnet_ping_target_fini(void)
+{
+ int rc;
+
+ lnet_ping_md_unlink(the_lnet.ln_ping_info,
+ &the_lnet.ln_ping_target_md);
+
+ rc = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT(rc == 0);
+
+ lnet_ping_info_destroy();
+}
+
+static int
+lnet_ni_tq_credits(lnet_ni_t *ni)
+{
+ int credits;
+
+ LASSERT(ni->ni_ncpts >= 1);
+
+ if (ni->ni_ncpts == 1)
+ return ni->ni_maxtxcredits;
+
+ credits = ni->ni_maxtxcredits / ni->ni_ncpts;
+ credits = max(credits, 8 * ni->ni_peertxcredits);
+ credits = min(credits, ni->ni_maxtxcredits);
+
+ return credits;
+}
+
+static void
+lnet_clear_zombies_nis_locked(void)
+{
+ int i;
+ int islo;
+ lnet_ni_t *ni;
+
/* Now wait for the NI's I just nuked to show up on ln_zombie_nis
* and shut them down in guaranteed thread context */
i = 2;
- while (!cfs_list_empty(&the_lnet.ln_nis_zombie)) {
+ while (!list_empty(&the_lnet.ln_nis_zombie)) {
int *ref;
int j;
- ni = cfs_list_entry(the_lnet.ln_nis_zombie.next,
- lnet_ni_t, ni_list);
- cfs_list_del_init(&ni->ni_list);
+ ni = list_entry(the_lnet.ln_nis_zombie.next,
+ lnet_ni_t, ni_list);
+ list_del_init(&ni->ni_list);
cfs_percpt_for_each(ref, j, ni->ni_refs) {
if (*ref == 0)
continue;
/* still busy, add it back to zombie list */
- cfs_list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
break;
}
- while (!cfs_list_empty(&ni->ni_list)) {
+ if (!list_empty(&ni->ni_list)) {
lnet_net_unlock(LNET_LOCK_EX);
++i;
if ((i & (-i)) == i) {
ni->ni_lnd->lnd_refcount--;
lnet_net_unlock(LNET_LOCK_EX);
- islo = ni->ni_lnd->lnd_type == LOLND;
+ islo = ni->ni_lnd->lnd_type == LOLND;
- LASSERT (!cfs_in_interrupt ());
- (ni->ni_lnd->lnd_shutdown)(ni);
+ LASSERT(!in_interrupt());
+ (ni->ni_lnd->lnd_shutdown)(ni);
- /* can't deref lnd anymore now; it might have unregistered
- * itself... */
+ /* can't deref lnd anymore now; it might have unregistered
+ * itself... */
- if (!islo)
- CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ if (!islo)
+ CDEBUG(D_LNI, "Removed LNI %s\n",
+ libcfs_nid2str(ni->ni_nid));
lnet_ni_free(ni);
+ i = 2;
lnet_net_lock(LNET_LOCK_EX);
}
+}
+static void
+lnet_shutdown_lndnis(void)
+{
+ int i;
+ lnet_ni_t *ni;
+
+ /* NB called holding the global mutex */
+
+ /* All quiet on the API front */
+ LASSERT(!the_lnet.ln_shutdown);
+ LASSERT(the_lnet.ln_refcount == 0);
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_shutdown = 1; /* flag shutdown */
+
+ /* Unlink NIs from the global table */
+ while (!list_empty(&the_lnet.ln_nis)) {
+ ni = list_entry(the_lnet.ln_nis.next,
+ lnet_ni_t, ni_list);
+ /* move it to zombie list and nobody can find it anymore */
+ list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
+
+ if (!list_empty(&ni->ni_cptlist)) {
+ list_del_init(&ni->ni_cptlist);
+ lnet_ni_decref_locked(ni, 0);
+ }
+ }
+
+ /* Drop the cached eqwait NI. */
+ if (the_lnet.ln_eq_waitni != NULL) {
+ lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
+ the_lnet.ln_eq_waitni = NULL;
+ }
+
+ /* Drop the cached loopback NI. */
+ if (the_lnet.ln_loni != NULL) {
+ lnet_ni_decref_locked(the_lnet.ln_loni, 0);
+ the_lnet.ln_loni = NULL;
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* Clear lazy portals and drop delayed messages which hold refs
+ * on their lnet_msg_t::msg_rxpeer */
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ LNetClearLazyPortal(i);
+
+ /* Clear the peer table and wait for all peers to go (they hold refs on
+ * their NIs) */
+ lnet_peer_tables_cleanup(NULL);
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ lnet_clear_zombies_nis_locked();
the_lnet.ln_shutdown = 0;
lnet_net_unlock(LNET_LOCK_EX);
+}
+
+int
+lnet_shutdown_lndni(__u32 net)
+{
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ lnet_ni_t *found_ni = NULL;
+ int ni_count;
+ int rc;
+
+ if (LNET_NETTYP(net) == LOLND)
+ return -EINVAL;
+
+ ni_count = lnet_get_ni_count();
+
+ /* create and link a new ping info, before removing the old one */
+ rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count - 1, false);
+ if (rc != 0)
+ return rc;
+
+ /* proceed with shutting down the NI */
+ lnet_net_lock(LNET_LOCK_EX);
+
+ found_ni = lnet_net2ni_locked(net, 0);
+ if (found_ni == NULL) {
+ lnet_net_unlock(LNET_LOCK_EX);
+ lnet_ping_md_unlink(pinfo, &md_handle);
+ lnet_ping_info_free(pinfo);
+ return -EINVAL;
+ }
+
+ /* decrement the reference counter on found_ni which was
+ * incremented when we called lnet_net2ni_locked() */
+ lnet_ni_decref_locked(found_ni, 0);
+
+ /* Move ni to zombie list so nobody can find it anymore */
+ list_move(&found_ni->ni_list, &the_lnet.ln_nis_zombie);
- if (the_lnet.ln_network_tokens != NULL) {
- LIBCFS_FREE(the_lnet.ln_network_tokens,
- the_lnet.ln_network_tokens_nob);
- the_lnet.ln_network_tokens = NULL;
+ /* Drop the lock reference for the ln_nis ref. */
+ lnet_ni_decref_locked(found_ni, 0);
+
+ if (!list_empty(&found_ni->ni_cptlist)) {
+ list_del_init(&found_ni->ni_cptlist);
+ lnet_ni_decref_locked(found_ni, 0);
}
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* Do peer table cleanup for this ni */
+ lnet_peer_tables_cleanup(found_ni);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_clear_zombies_nis_locked();
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ lnet_ping_target_update(pinfo, md_handle);
+
+ return 0;
}
-int
-lnet_startup_lndnis (void)
+static int
+lnet_startup_lndnis(struct list_head *nilist, __s32 peer_timeout,
+ __s32 peer_cr, __s32 peer_buf_cr, __s32 credits,
+ int *ni_count)
{
- lnd_t *lnd;
+ int rc = 0;
struct lnet_ni *ni;
+ int lnd_type;
+ lnd_t *lnd;
struct lnet_tx_queue *tq;
- cfs_list_t nilist;
int i;
- int rc = 0;
- int lnd_type;
- int nicount = 0;
- char *nets = lnet_get_networks();
- CFS_INIT_LIST_HEAD(&nilist);
+ while (!list_empty(nilist)) {
+ ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- if (nets == NULL)
- goto failed;
+ if (!libcfs_isknown_lnd(lnd_type))
+ goto failed;
- rc = lnet_parse_networks(&nilist, nets);
- if (rc != 0)
- goto failed;
-
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ if (lnd_type == CIBLND ||
+ lnd_type == OPENIBLND ||
+ lnd_type == IIBLND ||
+ lnd_type == VIBLND) {
+ CERROR("LND %s obsoleted\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed;
+ }
- LASSERT (libcfs_isknown_lnd(lnd_type));
+ /* Make sure this new NI is unique. */
+ lnet_net_lock(LNET_LOCK_EX);
+ if (!lnet_net_unique(LNET_NIDNET(ni->ni_nid),
+ &the_lnet.ln_nis)) {
+ if (lnd_type == LOLND) {
+ lnet_net_unlock(LNET_LOCK_EX);
+ list_del(&ni->ni_list);
+ lnet_ni_free(ni);
+ continue;
+ }
- if (lnd_type == CIBLND ||
- lnd_type == OPENIBLND ||
- lnd_type == IIBLND ||
- lnd_type == VIBLND) {
- CERROR("LND %s obsoleted\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
+ CERROR("Net %s is not unique\n",
+ libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
+ lnet_net_unlock(LNET_LOCK_EX);
+ goto failed;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- lnd = lnet_find_lnd_by_type(lnd_type);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ lnd = lnet_find_lnd_by_type(lnd_type);
#ifdef __KERNEL__
- if (lnd == NULL) {
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- rc = cfs_request_module("%s",
- libcfs_lnd2modname(lnd_type));
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
-
- lnd = lnet_find_lnd_by_type(lnd_type);
- if (lnd == NULL) {
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- CERROR("Can't load LND %s, module %s, rc=%d\n",
- libcfs_lnd2str(lnd_type),
- libcfs_lnd2modname(lnd_type), rc);
+ if (lnd == NULL) {
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ rc = request_module("%s",
+ libcfs_lnd2modname(lnd_type));
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+
+ lnd = lnet_find_lnd_by_type(lnd_type);
+ if (lnd == NULL) {
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ CERROR("Can't load LND %s, module %s, rc=%d\n",
+ libcfs_lnd2str(lnd_type),
+ libcfs_lnd2modname(lnd_type), rc);
#ifndef HAVE_MODULE_LOADING_SUPPORT
- LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
- "compiled with kernel module "
- "loading support.");
+ LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
+ "compiled with kernel module "
+ "loading support.");
#endif
- goto failed;
- }
- }
+ goto failed;
+ }
+ }
#else
- if (lnd == NULL) {
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- CERROR("LND %s not supported\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
+ if (lnd == NULL) {
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ CERROR("LND %s not supported\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed;
+ }
#endif
lnet_net_lock(LNET_LOCK_EX);
lnd->lnd_refcount++;
lnet_net_unlock(LNET_LOCK_EX);
- ni->ni_lnd = lnd;
+ ni->ni_lnd = lnd;
- rc = (lnd->lnd_startup)(ni);
+ rc = (lnd->lnd_startup)(ni);
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- if (rc != 0) {
- LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s"
- "\n",
- rc, libcfs_lnd2str(lnd->lnd_type));
+ if (rc != 0) {
+ LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s"
+ "\n",
+ rc, libcfs_lnd2str(lnd->lnd_type));
lnet_net_lock(LNET_LOCK_EX);
lnd->lnd_refcount--;
lnet_net_unlock(LNET_LOCK_EX);
- goto failed;
- }
-
- LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+ goto failed;
+ }
- cfs_list_del(&ni->ni_list);
+ /* If given some LND tunable parameters, parse those now to
+ * override the values in the NI structure. */
+ if (peer_buf_cr >= 0)
+ ni->ni_peerrtrcredits = peer_buf_cr;
+ if (peer_timeout >= 0)
+ ni->ni_peertimeout = peer_timeout;
+ /*
+ * TODO
+ * Note: For now, don't allow the user to change
+ * peertxcredits as this number is used in the
+ * IB LND to control queue depth.
+ * if (peer_cr != -1)
+ * ni->ni_peertxcredits = peer_cr;
+ */
+ if (credits >= 0)
+ ni->ni_maxtxcredits = credits;
+
+ LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+
+ list_del(&ni->ni_list);
lnet_net_lock(LNET_LOCK_EX);
/* refcount for ln_nis */
lnet_ni_addref_locked(ni, 0);
- cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
if (ni->ni_cpts != NULL) {
- cfs_list_add_tail(&ni->ni_cptlist,
- &the_lnet.ln_nis_cpt);
+ list_add_tail(&ni->ni_cptlist,
+ &the_lnet.ln_nis_cpt);
lnet_ni_addref_locked(ni, 0);
}
lnet_net_unlock(LNET_LOCK_EX);
- if (lnd->lnd_type == LOLND) {
- lnet_ni_addref(ni);
- LASSERT (the_lnet.ln_loni == NULL);
- the_lnet.ln_loni = ni;
- continue;
- }
+ /* increment the ni_count here to account for the LOLND as
+ * well. If we increment past this point then the number
+ * of count will be missing the LOLND, and then ping and
+ * will not report the LOLND
+ */
+ if (ni_count != NULL)
+ (*ni_count)++;
+
+ if (lnd->lnd_type == LOLND) {
+ lnet_ni_addref(ni);
+ LASSERT(the_lnet.ln_loni == NULL);
+ the_lnet.ln_loni = ni;
+ continue;
+ }
#ifndef __KERNEL__
- if (lnd->lnd_wait != NULL) {
+ if (lnd->lnd_wait != NULL) {
if (the_lnet.ln_eq_waitni == NULL) {
lnet_ni_addref(ni);
the_lnet.ln_eq_waitni = ni;
}
- } else {
+ } else {
# ifndef HAVE_LIBPTHREAD
- LCONSOLE_ERROR_MSG(0x106, "LND %s not supported in a "
- "single-threaded runtime\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
+ LCONSOLE_ERROR_MSG(0x106, "LND %s not supported in a "
+ "single-threaded runtime\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed;
# endif
- }
+ }
#endif
- if (ni->ni_peertxcredits == 0 ||
- ni->ni_maxtxcredits == 0) {
- LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
- libcfs_lnd2str(lnd->lnd_type),
- ni->ni_peertxcredits == 0 ?
- "" : "per-peer ");
- goto failed;
- }
+ if (ni->ni_peertxcredits == 0 ||
+ ni->ni_maxtxcredits == 0) {
+ LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
+ libcfs_lnd2str(lnd->lnd_type),
+ ni->ni_peertxcredits == 0 ?
+ "" : "per-peer ");
+ goto failed;
+ }
cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
tq->tq_credits_min =
libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
ni->ni_peerrtrcredits, ni->ni_peertimeout);
-
- nicount++;
}
- if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
- lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
- LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network"
- "\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
-
- return 0;
-
- failed:
- lnet_shutdown_lndnis();
-
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- cfs_list_del(&ni->ni_list);
+ return 0;
+failed:
+ while (!list_empty(nilist)) {
+ ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+ list_del(&ni->ni_list);
lnet_ni_free(ni);
}
-
- return -ENETDOWN;
+ return -EINVAL;
}
/**
the_lnet.ln_refcount = 0;
the_lnet.ln_init = 1;
LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_lnds);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
+ INIT_LIST_HEAD(&the_lnet.ln_lnds);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
#ifdef __KERNEL__
/* The hash table size is the number of bits it takes to express the set
the_lnet.ln_remote_nets_hbits = max_t(int, 1,
order_base_2(rnet_htable_size) - 1);
- /* All LNDs apart from the LOLND are in separate modules. They
- * register themselves when their module loads, and unregister
- * themselves when their module is unloaded. */
+ /* All LNDs apart from the LOLND are in separate modules. They
+ * register themselves when their module loads, and unregister
+ * themselves when their module is unloaded. */
#else
the_lnet.ln_remote_nets_hbits = 8;
- /* Register LNDs
- * NB the order here determines default 'networks=' order */
+ /* Register LNDs
+ * NB the order here determines default 'networks=' order */
# ifdef HAVE_LIBPTHREAD
- LNET_REGISTER_ULND(the_tcplnd);
+ LNET_REGISTER_ULND(the_tcplnd);
# endif
#endif
- lnet_register_lnd(&the_lolnd);
- return 0;
+ lnet_register_lnd(&the_lolnd);
+ return 0;
}
EXPORT_SYMBOL(LNetInit);
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount == 0);
- while (!cfs_list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(cfs_list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ while (!list_empty(&the_lnet.ln_lnds))
+ lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
+ lnd_t, lnd_list));
lnet_destroy_locks();
the_lnet.ln_init = 0;
int
LNetNIInit(lnet_pid_t requested_pid)
{
- int im_a_router = 0;
- int rc;
+ int im_a_router = 0;
+ int rc;
+ int ni_count = 0;
+ int lnd_type;
+ struct lnet_ni *ni;
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ struct list_head net_head;
- LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+ INIT_LIST_HEAD(&net_head);
- LASSERT (the_lnet.ln_init);
- CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
- if (the_lnet.ln_refcount > 0) {
- rc = the_lnet.ln_refcount++;
- goto out;
- }
+ LASSERT(the_lnet.ln_init);
+ CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
- lnet_get_tunables();
+ if (the_lnet.ln_refcount > 0) {
+ rc = the_lnet.ln_refcount++;
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ return rc;
+ }
- if (requested_pid == LNET_PID_ANY) {
- /* Don't instantiate LNET just for me */
- rc = -ENETDOWN;
- goto failed0;
- }
+ rc = lnet_prepare(requested_pid);
+ if (rc != 0)
+ goto failed0;
+
+ rc = lnet_parse_networks(&net_head, lnet_get_networks());
+ if (rc < 0)
+ goto failed1;
- rc = lnet_prepare(requested_pid);
- if (rc != 0)
- goto failed0;
+ rc = lnet_startup_lndnis(&net_head, -1, -1, -1, -1, &ni_count);
+ if (rc != 0)
+ goto failed1;
- rc = lnet_startup_lndnis();
- if (rc != 0)
- goto failed1;
+ if (the_lnet.ln_eq_waitni != NULL && ni_count > 1) {
+ lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
+ LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network"
+ "\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed2;
+ }
- rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
- if (rc != 0)
- goto failed2;
+ rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
+ if (rc != 0)
+ goto failed2;
- rc = lnet_check_routes();
- if (rc != 0)
- goto failed2;
+ rc = lnet_check_routes();
+ if (rc != 0)
+ goto failed2;
rc = lnet_rtrpools_alloc(im_a_router);
- if (rc != 0)
- goto failed2;
-
- rc = lnet_acceptor_start();
- if (rc != 0)
- goto failed2;
-
- the_lnet.ln_refcount = 1;
- /* Now I may use my own API functions... */
-
- /* NB router checker needs the_lnet.ln_ping_info in
- * lnet_router_checker -> lnet_update_ni_status_locked */
- rc = lnet_ping_target_init();
- if (rc != 0)
- goto failed3;
-
- rc = lnet_router_checker_start();
- if (rc != 0)
- goto failed4;
-
- lnet_proc_init();
- goto out;
-
- failed4:
- lnet_ping_target_fini();
- failed3:
- the_lnet.ln_refcount = 0;
- lnet_acceptor_stop();
- failed2:
- lnet_destroy_routes();
- lnet_shutdown_lndnis();
- failed1:
- lnet_unprepare();
- failed0:
- LASSERT (rc < 0);
- out:
- LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
- return rc;
+ if (rc != 0)
+ goto failed2;
+
+ rc = lnet_acceptor_start();
+ if (rc != 0)
+ goto failed2;
+ the_lnet.ln_refcount = 1;
+ /* Now I may use my own API functions... */
+
+ rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
+ if (rc != 0)
+ goto failed3;
+
+ lnet_ping_target_update(pinfo, md_handle);
+
+ rc = lnet_router_checker_start();
+ if (rc != 0)
+ goto failed4;
+
+ lnet_proc_init();
+
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+
+ return 0;
+
+failed4:
+ the_lnet.ln_refcount = 0;
+ lnet_ping_md_unlink(pinfo, &md_handle);
+ lnet_ping_info_free(pinfo);
+failed3:
+ lnet_acceptor_stop();
+ rc = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT(rc == 0);
+failed2:
+ lnet_destroy_routes();
+ lnet_shutdown_lndnis();
+failed1:
+ lnet_unprepare();
+failed0:
+ LASSERT(rc < 0);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ while (!list_empty(&net_head)) {
+ ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+ list_del_init(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
+ return rc;
}
EXPORT_SYMBOL(LNetNIInit);
}
EXPORT_SYMBOL(LNetNIFini);
+int
+lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
+ __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
+ __s32 credits)
+{
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ struct lnet_ni *ni;
+ struct list_head net_head;
+ int rc;
+
+ INIT_LIST_HEAD(&net_head);
+
+ /* Create a ni structure for the network string */
+ rc = lnet_parse_networks(&net_head, nets);
+ if (rc < 0)
+ return rc;
+
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+
+ if (rc > 1) {
+ rc = -EINVAL; /* only add one interface per call */
+ goto failed0;
+ }
+
+ rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
+ false);
+ if (rc != 0)
+ goto failed0;
+
+ rc = lnet_startup_lndnis(&net_head, peer_timeout, peer_cr,
+ peer_buf_cr, credits, NULL);
+ if (rc != 0)
+ goto failed1;
+
+ lnet_ping_target_update(pinfo, md_handle);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+
+ return 0;
+
+failed1:
+ lnet_ping_md_unlink(pinfo, &md_handle);
+ lnet_ping_info_free(pinfo);
+failed0:
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ while (!list_empty(&net_head)) {
+ ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+ list_del_init(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
+ return rc;
+}
+
+int
+lnet_dyn_del_ni(__u32 net)
+{
+ int rc;
+
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+ rc = lnet_shutdown_lndni(net);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+
+ return rc;
+}
+
/**
* This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
* IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
int
LNetCtl(unsigned int cmd, void *arg)
{
- struct libcfs_ioctl_data *data = arg;
- lnet_process_id_t id = {0};
- lnet_ni_t *ni;
- int rc;
+ struct libcfs_ioctl_data *data = arg;
+ struct lnet_ioctl_config_data *config;
+ lnet_process_id_t id = {0};
+ lnet_ni_t *ni;
+ int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+
+ switch (cmd) {
+ case IOC_LIBCFS_GET_NI:
+ rc = LNetGetId(data->ioc_count, &id);
+ data->ioc_nid = id.nid;
+ return rc;
+
+ case IOC_LIBCFS_FAIL_NID:
+ return lnet_fail_nid(data->ioc_nid, data->ioc_count);
+
+ case IOC_LIBCFS_ADD_ROUTE:
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+ rc = lnet_add_route(data->ioc_net, data->ioc_count,
+ data->ioc_nid, data->ioc_priority);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ return (rc != 0) ? rc : lnet_check_routes();
+
+ case IOC_LIBCFS_DEL_ROUTE:
+ config = arg;
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+ rc = lnet_del_route(config->cfg_net, config->cfg_nid);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ return rc;
+
+ case IOC_LIBCFS_GET_ROUTE:
+ config = arg;
+ return lnet_get_route(config->cfg_count,
+ &config->cfg_net,
+ &config->cfg_config_u.cfg_route.rtr_hop,
+ &config->cfg_nid,
+ &config->cfg_config_u.cfg_route.rtr_flags,
+ &config->cfg_config_u.cfg_route.
+ rtr_priority);
+
+ case IOC_LIBCFS_ADD_NET:
+ return 0;
+
+ case IOC_LIBCFS_DEL_NET:
+ return 0;
+
+ case IOC_LIBCFS_GET_NET:
+ return 0;
+
+ case IOC_LIBCFS_GET_LNET_STATS:
+ {
+ struct lnet_ioctl_lnet_stats *lnet_stats = arg;
+
+ lnet_counters_get(&lnet_stats->st_cntrs);
+ return 0;
+ }
+
+#if defined(__KERNEL__) && defined(LNET_ROUTER)
+ case IOC_LIBCFS_CONFIG_RTR:
+ return 0;
+
+ case IOC_LIBCFS_ADD_BUF:
+ return 0;
+#endif
- switch (cmd) {
- case IOC_LIBCFS_GET_NI:
- rc = LNetGetId(data->ioc_count, &id);
- data->ioc_nid = id.nid;
- return rc;
-
- case IOC_LIBCFS_FAIL_NID:
- return lnet_fail_nid(data->ioc_nid, data->ioc_count);
-
- case IOC_LIBCFS_ADD_ROUTE:
- rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid);
- return (rc != 0) ? rc : lnet_check_routes();
-
- case IOC_LIBCFS_DEL_ROUTE:
- return lnet_del_route(data->ioc_net, data->ioc_nid);
-
- case IOC_LIBCFS_GET_ROUTE:
- return lnet_get_route(data->ioc_count,
- &data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags);
- case IOC_LIBCFS_NOTIFY_ROUTER:
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- cfs_time_current() -
- cfs_time_seconds(cfs_time_current_sec() -
- (time_t)data->ioc_u64[0]));
-
- case IOC_LIBCFS_PORTALS_COMPATIBILITY:
- /* This can be removed once lustre stops calling it */
- return 0;
-
- case IOC_LIBCFS_LNET_DIST:
- rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
- if (rc < 0 && rc != -EHOSTUNREACH)
- return rc;
-
- data->ioc_u32[0] = rc;
- return 0;
-
- case IOC_LIBCFS_TESTPROTOCOMPAT:
+ case IOC_LIBCFS_GET_BUF:
+ return 0;
+
+ case IOC_LIBCFS_GET_PEER_INFO:
+ return 0;
+
+ case IOC_LIBCFS_NOTIFY_ROUTER:
+ return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
+ cfs_time_current() -
+ cfs_time_seconds(cfs_time_current_sec() -
+ (time_t)data->ioc_u64[0]));
+
+ case IOC_LIBCFS_PORTALS_COMPATIBILITY:
+ /* This can be removed once lustre stops calling it */
+ return 0;
+
+ case IOC_LIBCFS_LNET_DIST:
+ rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
+ if (rc < 0 && rc != -EHOSTUNREACH)
+ return rc;
+
+ data->ioc_u32[0] = rc;
+ return 0;
+
+ case IOC_LIBCFS_TESTPROTOCOMPAT:
lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_testprotocompat = data->ioc_flags;
lnet_net_unlock(LNET_LOCK_EX);
- return 0;
-
- case IOC_LIBCFS_PING:
- id.nid = data->ioc_nid;
- id.pid = data->ioc_u32[0];
- rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
- (lnet_process_id_t *)data->ioc_pbuf1,
- data->ioc_plen1/sizeof(lnet_process_id_t));
- if (rc < 0)
- return rc;
- data->ioc_count = rc;
- return 0;
-
- case IOC_LIBCFS_DEBUG_PEER: {
- /* CAVEAT EMPTOR: this one designed for calling directly; not
- * via an ioctl */
- id = *((lnet_process_id_t *) arg);
-
- lnet_debug_peer(id.nid);
-
- ni = lnet_net2ni(LNET_NIDNET(id.nid));
- if (ni == NULL) {
- CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
- } else {
- if (ni->ni_lnd->lnd_ctl == NULL) {
- CDEBUG(D_WARNING, "No ctl for %s\n",
- libcfs_id2str(id));
- } else {
- (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
- }
-
- lnet_ni_decref(ni);
- }
- return 0;
- }
+ return 0;
- default:
- ni = lnet_net2ni(data->ioc_net);
- if (ni == NULL)
- return -EINVAL;
+ case IOC_LIBCFS_PING:
+ id.nid = data->ioc_nid;
+ id.pid = data->ioc_u32[0];
+ rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
+ (lnet_process_id_t *)data->ioc_pbuf1,
+ data->ioc_plen1/sizeof(lnet_process_id_t));
+ if (rc < 0)
+ return rc;
+ data->ioc_count = rc;
+ return 0;
- if (ni->ni_lnd->lnd_ctl == NULL)
- rc = -EINVAL;
- else
- rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
+ case IOC_LIBCFS_DEBUG_PEER: {
+ /* CAVEAT EMPTOR: this one designed for calling directly; not
+ * via an ioctl */
+ id = *((lnet_process_id_t *) arg);
+
+ lnet_debug_peer(id.nid);
+
+ ni = lnet_net2ni(LNET_NIDNET(id.nid));
+ if (ni == NULL) {
+ CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
+ } else {
+ if (ni->ni_lnd->lnd_ctl == NULL) {
+ CDEBUG(D_WARNING, "No ctl for %s\n",
+ libcfs_id2str(id));
+ } else {
+ (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
+ }
- lnet_ni_decref(ni);
- return rc;
- }
- /* not reached */
+ lnet_ni_decref(ni);
+ }
+ return 0;
+ }
+
+ default:
+ ni = lnet_net2ni(data->ioc_net);
+ if (ni == NULL)
+ return -EINVAL;
+
+ if (ni->ni_lnd->lnd_ctl == NULL)
+ rc = -EINVAL;
+ else
+ rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
+
+ lnet_ni_decref(ni);
+ return rc;
+ }
+ /* not reached */
}
EXPORT_SYMBOL(LNetCtl);
int
LNetGetId(unsigned int index, lnet_process_id_t *id)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
- int cpt;
- int rc = -ENOENT;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+ int cpt;
+ int rc = -ENOENT;
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- if (index-- != 0)
- continue;
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ if (index-- != 0)
+ continue;
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
- id->nid = ni->ni_nid;
- id->pid = the_lnet.ln_pid;
- rc = 0;
- break;
- }
+ id->nid = ni->ni_nid;
+ id->pid = the_lnet.ln_pid;
+ rc = 0;
+ break;
+ }
lnet_net_unlock(cpt);
return rc;
}
EXPORT_SYMBOL(LNetSnprintHandle);
-static int
-lnet_create_ping_info(void)
-{
- int i;
- int n;
- int rc;
- unsigned int infosz;
- lnet_ni_t *ni;
- lnet_process_id_t id;
- lnet_ping_info_t *pinfo;
-
- for (n = 0; ; n++) {
- rc = LNetGetId(n, &id);
- if (rc == -ENOENT)
- break;
-
- LASSERT (rc == 0);
- }
-
- infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
- LIBCFS_ALLOC(pinfo, infosz);
- if (pinfo == NULL) {
- CERROR("Can't allocate ping info[%d]\n", n);
- return -ENOMEM;
- }
-
- pinfo->pi_nnis = n;
- pinfo->pi_pid = the_lnet.ln_pid;
- pinfo->pi_magic = LNET_PROTO_PING_MAGIC;
- pinfo->pi_features = LNET_PING_FEAT_NI_STATUS;
-
- for (i = 0; i < n; i++) {
- lnet_ni_status_t *ns = &pinfo->pi_ni[i];
-
- rc = LNetGetId(i, &id);
- LASSERT (rc == 0);
-
- ns->ns_nid = id.nid;
- ns->ns_status = LNET_NI_STATUS_UP;
-
- lnet_net_lock(0);
-
- ni = lnet_nid2ni_locked(id.nid, 0);
- LASSERT(ni != NULL);
-
- lnet_ni_lock(ni);
- LASSERT(ni->ni_status == NULL);
- ni->ni_status = ns;
- lnet_ni_unlock(ni);
-
- lnet_ni_decref_locked(ni, 0);
- lnet_net_unlock(0);
- }
-
- the_lnet.ln_ping_info = pinfo;
- return 0;
-}
-
-static void
-lnet_destroy_ping_info(void)
-{
- struct lnet_ni *ni;
-
- lnet_net_lock(0);
-
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- lnet_ni_lock(ni);
- ni->ni_status = NULL;
- lnet_ni_unlock(ni);
- }
-
- lnet_net_unlock(0);
-
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]));
- the_lnet.ln_ping_info = NULL;
- return;
-}
-
int
-lnet_ping_target_init(void)
-{
- lnet_md_t md = {0};
- lnet_handle_me_t meh;
- lnet_process_id_t id;
- int rc;
- int rc2;
- int infosz;
-
- rc = lnet_create_ping_info();
- if (rc != 0)
- return rc;
-
- /* We can have a tiny EQ since we only need to see the unlink event on
- * teardown, which by definition is the last one! */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
- if (rc != 0) {
- CERROR("Can't allocate ping EQ: %d\n", rc);
- goto failed_0;
- }
-
- memset(&id, 0, sizeof(lnet_process_id_t));
- id.nid = LNET_NID_ANY;
- id.pid = LNET_PID_ANY;
-
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
- LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &meh);
- if (rc != 0) {
- CERROR("Can't create ping ME: %d\n", rc);
- goto failed_1;
- }
-
- /* initialize md content */
- infosz = offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]);
- md.start = the_lnet.ln_ping_info;
- md.length = infosz;
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.user_ptr = NULL;
- md.eq_handle = the_lnet.ln_ping_target_eq;
-
- rc = LNetMDAttach(meh, md,
- LNET_RETAIN,
- &the_lnet.ln_ping_target_md);
- if (rc != 0) {
- CERROR("Can't attach ping MD: %d\n", rc);
- goto failed_2;
- }
-
- return 0;
-
- failed_2:
- rc2 = LNetMEUnlink(meh);
- LASSERT (rc2 == 0);
- failed_1:
- rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT (rc2 == 0);
- failed_0:
- lnet_destroy_ping_info();
- return rc;
-}
-
-void
-lnet_ping_target_fini(void)
+lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids,
+ int n_ids)
{
- lnet_event_t event;
- int rc;
- int which;
- int timeout_ms = 1000;
- cfs_sigset_t blocked = cfs_block_allsigs();
-
- LNetMDUnlink(the_lnet.ln_ping_target_md);
- /* NB md could be busy; this just starts the unlink */
-
- for (;;) {
- rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
- timeout_ms, &event, &which);
+ lnet_handle_eq_t eqh;
+ lnet_handle_md_t mdh;
+ lnet_event_t event;
+ lnet_md_t md = {0};
+ int which;
+ int unlinked = 0;
+ int replied = 0;
+ const int a_long_time = 60000; /* mS */
+ int infosz;
+ lnet_ping_info_t *info;
+ lnet_process_id_t tmpid;
+ int i;
+ int nob;
+ int rc;
+ int rc2;
+ sigset_t blocked;
+
+ infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+
+ if (n_ids <= 0 ||
+ id.nid == LNET_NID_ANY ||
+ timeout_ms > 500000 || /* arbitrary limit! */
+ n_ids > 20) /* arbitrary limit! */
+ return -EINVAL;
+
+ if (id.pid == LNET_PID_ANY)
+ id.pid = LUSTRE_SRV_LNET_PID;
+
+ LIBCFS_ALLOC(info, infosz);
+ if (info == NULL)
+ return -ENOMEM;
- /* I expect overflow... */
- LASSERT (rc >= 0 || rc == -EOVERFLOW);
+ /* NB 2 events max (including any unlink event) */
+ rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
+ if (rc != 0) {
+ CERROR("Can't allocate EQ: %d\n", rc);
+ goto out_0;
+ }
- if (rc == 0) {
- /* timed out: provide a diagnostic */
- CWARN("Still waiting for ping MD to unlink\n");
- timeout_ms *= 2;
- continue;
- }
+ /* initialize md content */
+ md.start = info;
+ md.length = infosz;
+ md.threshold = 2; /*GET/REPLY*/
+ md.max_size = 0;
+ md.options = LNET_MD_TRUNCATE;
+ md.user_ptr = NULL;
+ md.eq_handle = eqh;
- /* Got a valid event */
- if (event.unlinked)
- break;
- }
-
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT (rc == 0);
- lnet_destroy_ping_info();
- cfs_restore_sigs(blocked);
-}
+ rc = LNetMDBind(md, LNET_UNLINK, &mdh);
+ if (rc != 0) {
+ CERROR("Can't bind MD: %d\n", rc);
+ goto out_1;
+ }
-int
-lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
-{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = {0};
- int which;
- int unlinked = 0;
- int replied = 0;
- const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
- lnet_ping_info_t *info;
- lnet_process_id_t tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
- cfs_sigset_t blocked;
-
- if (n_ids <= 0 ||
- id.nid == LNET_NID_ANY ||
- timeout_ms > 500000 || /* arbitrary limit! */
- n_ids > 20) /* arbitrary limit! */
- return -EINVAL;
-
- if (id.pid == LNET_PID_ANY)
- id.pid = LUSTRE_SRV_LNET_PID;
-
- LIBCFS_ALLOC(info, infosz);
- if (info == NULL)
- return -ENOMEM;
-
- /* NB 2 events max (including any unlink event) */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
- if (rc != 0) {
- CERROR("Can't allocate EQ: %d\n", rc);
- goto out_0;
- }
+ rc = LNetGet(LNET_NID_ANY, mdh, id,
+ LNET_RESERVED_PORTAL,
+ LNET_PROTO_PING_MATCHBITS, 0);
- /* initialize md content */
- md.start = info;
- md.length = infosz;
- md.threshold = 2; /*GET/REPLY*/
- md.max_size = 0;
- md.options = LNET_MD_TRUNCATE;
- md.user_ptr = NULL;
- md.eq_handle = eqh;
-
- rc = LNetMDBind(md, LNET_UNLINK, &mdh);
- if (rc != 0) {
- CERROR("Can't bind MD: %d\n", rc);
- goto out_1;
- }
+ if (rc != 0) {
+ /* Don't CERROR; this could be deliberate! */
- rc = LNetGet(LNET_NID_ANY, mdh, id,
- LNET_RESERVED_PORTAL,
- LNET_PROTO_PING_MATCHBITS, 0);
+ rc2 = LNetMDUnlink(mdh);
+ LASSERT(rc2 == 0);
- if (rc != 0) {
- /* Don't CERROR; this could be deliberate! */
+ /* NB must wait for the UNLINK event below... */
+ unlinked = 1;
+ timeout_ms = a_long_time;
+ }
- rc2 = LNetMDUnlink(mdh);
- LASSERT (rc2 == 0);
+ do {
+ /* MUST block for unlink to complete */
+ if (unlinked)
+ blocked = cfs_block_allsigs();
+
+ rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
+
+ if (unlinked)
+ cfs_restore_sigs(blocked);
+
+ CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
+ (rc2 <= 0) ? -1 : event.type,
+ (rc2 <= 0) ? -1 : event.status,
+ (rc2 > 0 && event.unlinked) ? " unlinked" : "");
+
+ LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
+
+ if (rc2 <= 0 || event.status != 0) {
+ /* timeout or error */
+ if (!replied && rc == 0)
+ rc = (rc2 < 0) ? rc2 :
+ (rc2 == 0) ? -ETIMEDOUT :
+ event.status;
+
+ if (!unlinked) {
+ /* Ensure completion in finite time... */
+ LNetMDUnlink(mdh);
+ /* No assertion (racing with network) */
+ unlinked = 1;
+ timeout_ms = a_long_time;
+ } else if (rc2 == 0) {
+ /* timed out waiting for unlink */
+ CWARN("ping %s: late network completion\n",
+ libcfs_id2str(id));
+ }
+ } else if (event.type == LNET_EVENT_REPLY) {
+ replied = 1;
+ rc = event.mlength;
+ }
- /* NB must wait for the UNLINK event below... */
- unlinked = 1;
- timeout_ms = a_long_time;
- }
+ } while (rc2 <= 0 || !event.unlinked);
- do {
- /* MUST block for unlink to complete */
- if (unlinked)
- blocked = cfs_block_allsigs();
-
- rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
-
- if (unlinked)
- cfs_restore_sigs(blocked);
-
- CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
- (rc2 <= 0) ? -1 : event.type,
- (rc2 <= 0) ? -1 : event.status,
- (rc2 > 0 && event.unlinked) ? " unlinked" : "");
-
- LASSERT (rc2 != -EOVERFLOW); /* can't miss anything */
-
- if (rc2 <= 0 || event.status != 0) {
- /* timeout or error */
- if (!replied && rc == 0)
- rc = (rc2 < 0) ? rc2 :
- (rc2 == 0) ? -ETIMEDOUT :
- event.status;
-
- if (!unlinked) {
- /* Ensure completion in finite time... */
- LNetMDUnlink(mdh);
- /* No assertion (racing with network) */
- unlinked = 1;
- timeout_ms = a_long_time;
- } else if (rc2 == 0) {
- /* timed out waiting for unlink */
- CWARN("ping %s: late network completion\n",
- libcfs_id2str(id));
- }
- } else if (event.type == LNET_EVENT_REPLY) {
- replied = 1;
- rc = event.mlength;
- }
-
- } while (rc2 <= 0 || !event.unlinked);
-
- if (!replied) {
- if (rc >= 0)
- CWARN("%s: Unexpected rc >= 0 but no reply!\n",
- libcfs_id2str(id));
- rc = -EIO;
- goto out_1;
- }
+ if (!replied) {
+ if (rc >= 0)
+ CWARN("%s: Unexpected rc >= 0 but no reply!\n",
+ libcfs_id2str(id));
+ rc = -EIO;
+ goto out_1;
+ }
- nob = rc;
- LASSERT (nob >= 0 && nob <= infosz);
+ nob = rc;
+ LASSERT(nob >= 0 && nob <= infosz);
- rc = -EPROTO; /* if I can't parse... */
+ rc = -EPROTO; /* if I can't parse... */
- if (nob < 8) {
- /* can't check magic/version */
- CERROR("%s: ping info too short %d\n",
- libcfs_id2str(id), nob);
- goto out_1;
- }
+ if (nob < 8) {
+ /* can't check magic/version */
+ CERROR("%s: ping info too short %d\n",
+ libcfs_id2str(id), nob);
+ goto out_1;
+ }
- if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
- lnet_swap_pinginfo(info);
- } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
- CERROR("%s: Unexpected magic %08x\n",
- libcfs_id2str(id), info->pi_magic);
- goto out_1;
- }
+ if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
+ lnet_swap_pinginfo(info);
+ } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+ CERROR("%s: Unexpected magic %08x\n",
+ libcfs_id2str(id), info->pi_magic);
+ goto out_1;
+ }
if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
CERROR("%s: ping w/o NI status: 0x%x\n",
goto out_1;
}
- if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
- CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
- goto out_1;
- }
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
+ CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
+ goto out_1;
+ }
- if (info->pi_nnis < n_ids)
- n_ids = info->pi_nnis;
+ if (info->pi_nnis < n_ids)
+ n_ids = info->pi_nnis;
- if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
- CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
- goto out_1;
- }
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
+ CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
+ goto out_1;
+ }
- rc = -EFAULT; /* If I SEGV... */
+ rc = -EFAULT; /* If I SEGV... */
- for (i = 0; i < n_ids; i++) {
- tmpid.pid = info->pi_pid;
- tmpid.nid = info->pi_ni[i].ns_nid;
+ for (i = 0; i < n_ids; i++) {
+ tmpid.pid = info->pi_pid;
+ tmpid.nid = info->pi_ni[i].ns_nid;
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
- goto out_1;
- }
- rc = info->pi_nnis;
+ goto out_1;
+ }
+ rc = info->pi_nnis;
out_1:
- rc2 = LNetEQFree(eqh);
- if (rc2 != 0)
- CERROR("rc2 %d\n", rc2);
- LASSERT (rc2 == 0);
+ rc2 = LNetEQFree(eqh);
+ if (rc2 != 0)
+ CERROR("rc2 %d\n", rc2);
+ LASSERT(rc2 == 0);
out_0:
- LIBCFS_FREE(info, infosz);
- return rc;
+ LIBCFS_FREE(info, infosz);
+ return rc;
}