/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * GPL HEADER START
*
- * This file is part of Lustre, http://www.sf.net/projects/lustre/
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LNET
int rc;
if (*networks != 0 && *ip2nets != 0) {
- LCONSOLE_ERROR(0x101, "Please specify EITHER 'networks' or "
- "'ip2nets' but not both at once\n");
+ LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
+ "'ip2nets' but not both at once\n");
return NULL;
}
-
+
if (*ip2nets != 0) {
rc = lnet_parse_ip2nets(&nets, ip2nets);
return (rc == 0) ? nets : NULL;
}
if (!strcmp(portals_compatibility, "weak")) {
- return 1;
LCONSOLE_WARN("Starting in weak portals-compatible mode\n");
+ return 1;
}
if (!strcmp(portals_compatibility, "strong")) {
- return 2;
LCONSOLE_WARN("Starting in strong portals-compatible mode\n");
- }
+ return 2;
+ }
- LCONSOLE_ERROR(0x102, "portals_compatibility=\"%s\" not supported\n",
- portals_compatibility);
+ LCONSOLE_ERROR_MSG(0x102, "portals_compatibility=\"%s\" not supported\n",
+ portals_compatibility);
return -EINVAL;
}
lnet_get_routes(void)
{
char *str = getenv("LNET_ROUTES");
-
+
return (str == NULL) ? "" : str;
}
#ifdef NOT_YET
if (networks != NULL && ip2nets != NULL) {
- LCONSOLE_ERROR(0x103, "Please set EITHER 'LNET_NETWORKS' or "
- "'LNET_IP2NETS' but not both at once\n");
+ LCONSOLE_ERROR_MSG(0x103, "Please set EITHER 'LNET_NETWORKS' or"
+ " 'LNET_IP2NETS' but not both at once\n");
return NULL;
}
str = default_networks;
*str = 0;
sep = "";
-
+
list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
-
- nob = snprintf(str, len, "%s%s", sep,
- libcfs_lnd2str(lnd->lnd_type));
- len -= nob;
- if (len < 0) {
- /* overflowed the string; leave it where it was */
- *str = 0;
- break;
- }
-
- str += nob;
- sep = ",";
+ lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
+
+ nob = snprintf(str, len, "%s%s", sep,
+ libcfs_lnd2str(lnd->lnd_type));
+ len -= nob;
+ if (len < 0) {
+ /* overflowed the string; leave it where it was */
+ *str = 0;
+ break;
+ }
+
+ str += nob;
+ sep = ",";
}
return default_networks;
}
lnd_t *
-lnet_find_lnd_by_type (int type)
+lnet_find_lnd_by_type (int type)
{
lnd_t *lnd;
struct list_head *tmp;
if (lnd->lnd_type == type)
return lnd;
}
-
+
return NULL;
}
LASSERT (the_lnet.ln_init);
LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
-
+
list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
lnd->lnd_refcount = 0;
LASSERT (the_lnet.ln_init);
LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
LASSERT (lnd->lnd_refcount == 0);
-
+
list_del (&lnd->lnd_list);
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
LASSERT (count == fl->fl_nobjs);
LIBCFS_FREE(fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
- memset (fl, 0, sizeof (fl));
+ memset (fl, 0, sizeof (*fl));
}
int
}
int
-lnet_setup_handle_hash (void)
+lnet_setup_handle_hash (void)
{
int i;
-
+
/* Arbitrary choice of hash table size */
#ifdef __KERNEL__
- the_lnet.ln_lh_hash_size = CFS_PAGE_SIZE / sizeof (struct list_head);
+ the_lnet.ln_lh_hash_size = (2 * CFS_PAGE_SIZE) / sizeof (struct list_head);
#else
the_lnet.ln_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
#endif
the_lnet.ln_lh_hash_size * sizeof (struct list_head));
if (the_lnet.ln_lh_hash_table == NULL)
return (-ENOMEM);
-
+
for (i = 0; i < the_lnet.ln_lh_hash_size; i++)
CFS_INIT_LIST_HEAD (&the_lnet.ln_lh_hash_table[i]);
the_lnet.ln_next_object_cookie = LNET_COOKIE_TYPES;
-
+
return (0);
}
{
if (the_lnet.ln_lh_hash_table == NULL)
return;
-
+
LIBCFS_FREE(the_lnet.ln_lh_hash_table,
the_lnet.ln_lh_hash_size * sizeof (struct list_head));
}
lnet_libhandle_t *
-lnet_lookup_cookie (__u64 cookie, int type)
+lnet_lookup_cookie (__u64 cookie, int type)
{
/* ALWAYS called with LNET_LOCK held */
struct list_head *list;
if ((cookie & (LNET_COOKIE_TYPES - 1)) != type)
return (NULL);
-
- hash = ((unsigned int)cookie) % the_lnet.ln_lh_hash_size;
+
+ hash = ((unsigned int)(cookie >> LNET_COOKIE_TYPE_BITS)) % the_lnet.ln_lh_hash_size;
list = &the_lnet.ln_lh_hash_table[hash];
-
+
list_for_each (el, list) {
lnet_libhandle_t *lh = list_entry (el, lnet_libhandle_t,
lh_hash_chain);
-
+
if (lh->lh_cookie == cookie)
return (lh);
}
-
+
return (NULL);
}
void
-lnet_initialise_handle (lnet_libhandle_t *lh, int type)
+lnet_initialise_handle (lnet_libhandle_t *lh, int type)
{
/* ALWAYS called with LNET_LOCK held */
unsigned int hash;
LASSERT (type >= 0 && type < LNET_COOKIE_TYPES);
lh->lh_cookie = the_lnet.ln_next_object_cookie | type;
the_lnet.ln_next_object_cookie += LNET_COOKIE_TYPES;
-
- hash = ((unsigned int)lh->lh_cookie) % the_lnet.ln_lh_hash_size;
+
+ hash = ((unsigned int)(lh->lh_cookie >> LNET_COOKIE_TYPE_BITS)) % the_lnet.ln_lh_hash_size;
list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);
}
list_del (&lh->lh_hash_chain);
}
+struct list_head *
+lnet_portal_mhash_alloc(void)
+{
+ struct list_head *mhash;
+ int i;
+
+ LIBCFS_ALLOC(mhash, sizeof(struct list_head) * LNET_PORTAL_HASH_SIZE);
+ if (mhash == NULL)
+ return NULL;
+
+ for (i = 0; i < LNET_PORTAL_HASH_SIZE; i++)
+ CFS_INIT_LIST_HEAD(&mhash[i]);
+
+ return mhash;
+}
+
+void
+lnet_portal_mhash_free(struct list_head *mhash)
+{
+ int i;
+
+ for (i = 0; i < LNET_PORTAL_HASH_SIZE; i++) {
+ while (!list_empty(&mhash[i])) {
+ lnet_me_t *me = list_entry (mhash[i].next,
+ lnet_me_t, me_list);
+ CERROR ("Active ME %p on exit portal mhash\n", me);
+ list_del (&me->me_list);
+ lnet_me_free (me);
+ }
+ }
+ LIBCFS_FREE(mhash, sizeof(struct list_head) * LNET_PORTAL_HASH_SIZE);
+}
+
int
lnet_init_finalizers(void)
{
the_lnet.ln_nfinalizers = num_online_cpus();
LIBCFS_ALLOC(the_lnet.ln_finalizers,
- the_lnet.ln_nfinalizers *
+ the_lnet.ln_nfinalizers *
sizeof(*the_lnet.ln_finalizers));
if (the_lnet.ln_finalizers == NULL) {
CERROR("Can't allocate ln_finalizers\n");
{
#ifdef __KERNEL__
int i;
-
+
for (i = 0; i < the_lnet.ln_nfinalizers; i++)
LASSERT (the_lnet.ln_finalizers[i] == NULL);
LASSERT (list_empty(&the_lnet.ln_finalizeq));
}
+#ifndef __KERNEL__
+/* Temporary workaround to allow uOSS and test programs force server
+ * mode in userspace. See comments near ln_server_mode_flag in
+ * lnet/lib-types.h */
+
+void
+lnet_server_mode() {
+ the_lnet.ln_server_mode_flag = 1;
+}
+#endif
+
int
lnet_prepare(lnet_pid_t requested_pid)
{
LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
the_lnet.ln_pid = requested_pid;
#else
- /* My PID must be unique on this node and flag I'm userspace */
- the_lnet.ln_pid = getpid() | LNET_PID_USERFLAG;
+ if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */
+ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
+
+ if (cfs_curproc_uid())/* Only root can run user-space server */
+ return -EPERM;
+ the_lnet.ln_pid = requested_pid;
+
+ } else {/* client case (liblustre) */
+
+ /* My PID must be unique on this node and flag I'm userspace */
+ the_lnet.ln_pid = getpid() | LNET_PID_USERFLAG;
+ }
#endif
rc = lnet_descriptor_setup();
if (rc != 0)
goto failed0;
- memset(&the_lnet.ln_counters, 0,
+ memset(&the_lnet.ln_counters, 0,
sizeof(the_lnet.ln_counters));
CFS_INIT_LIST_HEAD (&the_lnet.ln_active_msgs);
goto failed2;
the_lnet.ln_nportals = MAX_PORTALS;
- LIBCFS_ALLOC(the_lnet.ln_portals,
- the_lnet.ln_nportals *
+ LIBCFS_ALLOC(the_lnet.ln_portals,
+ the_lnet.ln_nportals *
sizeof(*the_lnet.ln_portals));
if (the_lnet.ln_portals == NULL) {
rc = -ENOMEM;
}
for (i = 0; i < the_lnet.ln_nportals; i++) {
- CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_ml));
+ CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_mlist));
CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_msgq));
the_lnet.ln_portals[i].ptl_options = 0;
}
return 0;
-
+
failed3:
lnet_fini_finalizers();
failed2:
lnet_unprepare (void)
{
int idx;
-
+
/* NB no LNET_LOCK since this is the last reference. All LND instances
* have shut down already, so it is safe to unlink and free all
* descriptors, even those that appear committed to a network op (eg MD
LASSERT (list_empty(&the_lnet.ln_nis));
LASSERT (list_empty(&the_lnet.ln_zombie_nis));
LASSERT (the_lnet.ln_nzombie_nis == 0);
-
- for (idx = 0; idx < the_lnet.ln_nportals; idx++) {
- LNetClearLazyPortal(idx);
+ for (idx = 0; idx < the_lnet.ln_nportals; idx++) {
LASSERT (list_empty(&the_lnet.ln_portals[idx].ptl_msgq));
- while (!list_empty (&the_lnet.ln_portals[idx].ptl_ml)) {
- lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_ml.next,
+ while (!list_empty (&the_lnet.ln_portals[idx].ptl_mlist)) {
+ lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_mlist.next,
lnet_me_t, me_list);
- CERROR ("Active me %p on exit\n", me);
+ CERROR ("Active ME %p on exit\n", me);
list_del (&me->me_list);
lnet_me_free (me);
}
+
+ if (the_lnet.ln_portals[idx].ptl_mhash != NULL) {
+ LASSERT (lnet_portal_is_unique(&the_lnet.ln_portals[idx]));
+ lnet_portal_mhash_free(the_lnet.ln_portals[idx].ptl_mhash);
+ }
}
while (!list_empty (&the_lnet.ln_active_mds)) {
lnet_libmd_t *md = list_entry (the_lnet.ln_active_mds.next,
lnet_libmd_t, md_list);
- CERROR ("Active md %p on exit\n", md);
- list_del (&md->md_list);
+ CERROR ("Active MD %p on exit\n", md);
+ list_del_init (&md->md_list);
lnet_md_free (md);
}
lnet_eq_t *eq = list_entry (the_lnet.ln_active_eqs.next,
lnet_eq_t, eq_list);
- CERROR ("Active eq %p on exit\n", eq);
+ CERROR ("Active EQ %p on exit\n", eq);
list_del (&eq->eq_list);
lnet_eq_free (eq);
}
return ni;
}
}
-
+
return NULL;
}
lnet_islocalnet (__u32 net)
{
lnet_ni_t *ni;
-
+
LNET_LOCK();
ni = lnet_net2ni_locked(net);
if (ni != NULL)
return ni;
}
}
-
+
return NULL;
}
lnet_islocalnid (lnet_nid_t nid)
{
lnet_ni_t *ni;
-
+
LNET_LOCK();
ni = lnet_nid2ni_locked(nid);
if (ni != NULL)
* *first_ni so the acceptor can pass it connections "blind" to retain
* binary compatibility. */
int count = 0;
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
struct list_head *tmp;
lnet_ni_t *ni;
count++;
}
}
-
+
LNET_UNLOCK();
-#endif
+
+#endif /* defined(__KERNEL__) || defined(HAVE_LIBPTHREAD) */
return count;
}
}
LNET_UNLOCK();
+
+ /* Clear lazy portals and drop delayed messages which hold refs
+ * on their lnet_msg_t::msg_rxpeer */
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ LNetClearLazyPortal(i);
+
/* Clear the peer table and wait for all peers to go (they hold refs on
* their NIs) */
-
lnet_clear_peer_table();
LNET_LOCK();
int nicount = 0;
char *nets = lnet_get_networks();
- INIT_LIST_HEAD(&nilist);
+ CFS_INIT_LIST_HEAD(&nilist);
if (nets == NULL)
goto failed;
#ifdef __KERNEL__
if (lnd == NULL) {
LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
- rc = request_module(libcfs_lnd2modname(lnd_type));
+ rc = request_module("%s", libcfs_lnd2modname(lnd_type));
LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
CERROR("Can't load LND %s, module %s, rc=%d\n",
libcfs_lnd2str(lnd_type),
libcfs_lnd2modname(lnd_type), rc);
-#ifndef CONFIG_KMOD
- LCONSOLE_ERROR(0x104, "Your kernel must be "
- "compiled with CONFIG_KMOD set for "
- "automatic module loading.");
+#ifndef HAVE_MODULE_LOADING_SUPPORT
+ LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
+ "compiled with kernel module "
+ "loading support.");
#endif
goto failed;
}
LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
if (rc != 0) {
- LCONSOLE_ERROR(0x105, "Error %d starting up LNI %s\n",
- rc, libcfs_lnd2str(lnd->lnd_type));
+ LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s"
+ "\n",
+ rc, libcfs_lnd2str(lnd->lnd_type));
LNET_LOCK();
lnd->lnd_refcount--;
LNET_UNLOCK();
goto failed;
}
+ LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+
list_del(&ni->ni_list);
LNET_LOCK();
}
} else {
# ifndef HAVE_LIBPTHREAD
- LCONSOLE_ERROR(0x106, "LND %s not supported in a "
- "single-threaded runtime\n",
- libcfs_lnd2str(lnd_type));
+ LCONSOLE_ERROR_MSG(0x106, "LND %s not supported in a "
+ "single-threaded runtime\n",
+ libcfs_lnd2str(lnd_type));
goto failed;
# endif
}
#endif
if (ni->ni_peertxcredits == 0 ||
ni->ni_maxtxcredits == 0) {
- LCONSOLE_ERROR(0x107, "LNI %s has no %scredits\n",
- libcfs_lnd2str(lnd->lnd_type),
- ni->ni_peertxcredits == 0 ?
- "" : "per-peer ");
+ LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
+ libcfs_lnd2str(lnd->lnd_type),
+ ni->ni_peertxcredits == 0 ?
+ "" : "per-peer ");
goto failed;
}
ni->ni_txcredits = ni->ni_mintxcredits = ni->ni_maxtxcredits;
- CDEBUG(D_LNI, "Added LNI %s [%d/%d]\n",
+ CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
libcfs_nid2str(ni->ni_nid),
- ni->ni_peertxcredits, ni->ni_txcredits);
+ ni->ni_peertxcredits, ni->ni_txcredits,
+ ni->ni_peerrtrcredits, ni->ni_peertimeout);
/* Handle nidstrings for network 0 just like this one */
if (the_lnet.ln_ptlcompat > 0) {
if (nicount > 0) {
- LCONSOLE_ERROR(0x108, "Can't run > 1 network "
- "when portals_compatibility is set\n");
+ LCONSOLE_ERROR_MSG(0x108, "Can't run > 1 "
+ "network when portals_compatibility is "
+ "set\n");
goto failed;
}
libcfs_setnet0alias(lnd->lnd_type);
}
-
+
nicount++;
}
if (the_lnet.ln_eqwaitni != NULL && nicount > 1) {
lnd_type = the_lnet.ln_eqwaitni->ni_lnd->lnd_type;
- LCONSOLE_ERROR(0x109, "LND %s can only run single-network\n",
- libcfs_lnd2str(lnd_type));
+ LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network"
+ "\n",
+ libcfs_lnd2str(lnd_type));
goto failed;
}
return rc;
lnet_init_locks();
- CFS_INIT_LIST_HEAD(&the_lnet.ln_lnds);
the_lnet.ln_ptlcompat = rc;
the_lnet.ln_refcount = 0;
the_lnet.ln_init = 1;
+ the_lnet.ln_rc_eqh = LNET_EQ_NONE;
+ CFS_INIT_LIST_HEAD(&the_lnet.ln_lnds);
+ CFS_INIT_LIST_HEAD(&the_lnet.ln_zombie_rcd);
#ifdef __KERNEL__
/* All LNDs apart from the LOLND are in separate modules. They
goto out;
}
+ lnet_get_tunables();
+
if (requested_pid == LNET_PID_ANY) {
/* Don't instantiate LNET just for me */
rc = -ENETDOWN;
the_lnet.ln_refcount = 1;
/* Now I may use my own API functions... */
- rc = lnet_router_checker_start();
+ /* NB router checker needs the_lnet.ln_ping_info in
+ * lnet_router_checker -> lnet_update_ni_status */
+ rc = lnet_ping_target_init();
if (rc != 0)
goto failed3;
- rc = lnet_ping_target_init();
+ rc = lnet_router_checker_start();
if (rc != 0)
goto failed4;
goto out;
failed4:
- lnet_router_checker_stop();
+ lnet_ping_target_fini();
failed3:
the_lnet.ln_refcount = 0;
lnet_acceptor_stop();
LASSERT (!the_lnet.ln_niinit_self);
lnet_proc_fini();
- lnet_ping_target_fini();
lnet_router_checker_stop();
+ lnet_ping_target_fini();
/* Teardown fns that use my own API functions BEFORE here */
the_lnet.ln_refcount = 0;
case IOC_LIBCFS_FAIL_NID:
return lnet_fail_nid(data->ioc_nid, data->ioc_count);
-
+
case IOC_LIBCFS_ADD_ROUTE:
- rc = lnet_add_route(data->ioc_net, data->ioc_count,
+ rc = lnet_add_route(data->ioc_net, data->ioc_count,
data->ioc_nid);
return (rc != 0) ? rc : lnet_check_routes();
-
+
case IOC_LIBCFS_DEL_ROUTE:
return lnet_del_route(data->ioc_net, data->ioc_nid);
case IOC_LIBCFS_GET_ROUTE:
- return lnet_get_route(data->ioc_count,
- &data->ioc_net, &data->ioc_count,
+ return lnet_get_route(data->ioc_count,
+ &data->ioc_net, &data->ioc_count,
&data->ioc_nid, &data->ioc_flags);
case IOC_LIBCFS_NOTIFY_ROUTER:
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- (time_t)data->ioc_u64[0]);
+ return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
+ cfs_time_current() -
+ cfs_time_seconds(cfs_time_current_sec() -
+ (time_t)data->ioc_u64[0]));
case IOC_LIBCFS_PORTALS_COMPATIBILITY:
return the_lnet.ln_ptlcompat;
rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
if (rc < 0 && rc != -EHOSTUNREACH)
return rc;
-
+
data->ioc_u32[0] = rc;
return 0;
} else {
(void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
}
-
+
lnet_ni_decref(ni);
}
return 0;
}
-
+
default:
ni = lnet_net2ni(data->ioc_net);
if (ni == NULL)
list_for_each(tmp, &the_lnet.ln_nis) {
if (index-- != 0)
continue;
-
+
ni = list_entry(tmp, lnet_ni_t, ni_list);
id->nid = ni->ni_nid;
snprintf(str, len, LPX64, h.cookie);
}
-
-int
-lnet_ping_target_init(void)
+static int
+lnet_create_ping_info(void)
{
- lnet_handle_me_t meh;
- lnet_process_id_t id;
- int rc;
- int rc2;
+ int i;
int n;
+ int rc;
int infosz;
- int i;
-
+ lnet_ni_t *ni;
+ lnet_process_id_t id;
+ lnet_ping_info_t *pinfo;
+
for (n = 0; ; n++) {
rc = LNetGetId(n, &id);
if (rc == -ENOENT)
LASSERT (rc == 0);
}
- infosz = offsetof(lnet_ping_info_t, pi_nid[n]);
- LIBCFS_ALLOC(the_lnet.ln_ping_info, infosz);
- if (the_lnet.ln_ping_info == NULL) {
+ infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
+ LIBCFS_ALLOC(pinfo, infosz);
+ if (pinfo == NULL) {
CERROR("Can't allocate ping info[%d]\n", n);
return -ENOMEM;
}
- the_lnet.ln_ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
- the_lnet.ln_ping_info->pi_version = LNET_PROTO_PING_VERSION;
- the_lnet.ln_ping_info->pi_pid = the_lnet.ln_pid;
- the_lnet.ln_ping_info->pi_nnids = n;
+ pinfo->pi_nnis = n;
+ pinfo->pi_pid = the_lnet.ln_pid;
+ pinfo->pi_magic = LNET_PROTO_PING_MAGIC;
+ pinfo->pi_version = LNET_PROTO_PING_VERSION;
for (i = 0; i < n; i++) {
+ lnet_ni_status_t *ns = &pinfo->pi_ni[i];
+
rc = LNetGetId(i, &id);
LASSERT (rc == 0);
- the_lnet.ln_ping_info->pi_nid[i] = id.nid;
+
+ ns->ns_nid = id.nid;
+ ns->ns_status = LNET_NI_STATUS_UP;
+
+ LNET_LOCK();
+
+ ni = lnet_nid2ni_locked(id.nid);
+ LASSERT (ni != NULL);
+ LASSERT (ni->ni_status == NULL);
+ ni->ni_status = ns;
+ lnet_ni_decref_locked(ni);
+
+ LNET_UNLOCK();
+ }
+
+ the_lnet.ln_ping_info = pinfo;
+ return 0;
+}
+
+static void
+lnet_destroy_ping_info(void)
+{
+ lnet_ni_t *ni;
+
+ LNET_LOCK();
+
+ list_for_each_entry (ni, &the_lnet.ln_nis, ni_list) {
+ ni->ni_status = NULL;
}
-
+
+ LNET_UNLOCK();
+
+ LIBCFS_FREE(the_lnet.ln_ping_info,
+ offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]));
+ the_lnet.ln_ping_info = NULL;
+ return;
+}
+
+int
+lnet_ping_target_init(void)
+{
+ lnet_handle_me_t meh;
+ int rc;
+ int rc2;
+ int infosz;
+
+ rc = lnet_create_ping_info();
+ if (rc != 0)
+ return rc;
+
/* We can have a tiny EQ since we only need to see the unlink event on
* teardown, which by definition is the last one! */
rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
goto failed_1;
}
+ infosz = offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]);
rc = LNetMDAttach(meh,
(lnet_md_t){.start = the_lnet.ln_ping_info,
.length = infosz,
rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
LASSERT (rc2 == 0);
failed_0:
- LIBCFS_FREE(the_lnet.ln_ping_info, infosz);
-
+ lnet_destroy_ping_info();
return rc;
}
rc = LNetEQFree(the_lnet.ln_ping_target_eq);
LASSERT (rc == 0);
-
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_nid[the_lnet.ln_ping_info->pi_nnids]));
-
+ lnet_destroy_ping_info();
cfs_restore_sigs(blocked);
}
int unlinked = 0;
int replied = 0;
const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_nid[n_ids]);
+ int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
lnet_ping_info_t *info;
lnet_process_id_t tmpid;
int i;
CWARN("ping %s: late network completion\n",
libcfs_id2str(id));
}
-
} else if (event.type == LNET_EVENT_REPLY) {
replied = 1;
rc = event.mlength;
}
if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
- /* NB I might be swabbing garbage until I check below, but it
- * doesn't matter */
- __swab32s(&info->pi_version);
- __swab32s(&info->pi_pid);
- __swab32s(&info->pi_nnids);
- for (i = 0; i < info->pi_nnids && i < n_ids; i++)
- __swab64s(&info->pi_nid[i]);
-
+ lnet_swap_pinginfo(info);
} else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
CERROR("%s: Unexpected magic %08x\n",
libcfs_id2str(id), info->pi_magic);
goto out_1;
}
- if (nob < offsetof(lnet_ping_info_t, pi_nid[0])) {
- CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_nid[0]));
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
+ CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
goto out_1;
}
- if (info->pi_nnids < n_ids)
- n_ids = info->pi_nnids;
+ if (info->pi_nnis < n_ids)
+ n_ids = info->pi_nnis;
- if (nob < offsetof(lnet_ping_info_t, pi_nid[n_ids])) {
- CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
- nob, (int)offsetof(lnet_ping_info_t, pi_nid[n_ids]));
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
+ CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
goto out_1;
}
for (i = 0; i < n_ids; i++) {
tmpid.pid = info->pi_pid;
- tmpid.nid = info->pi_nid[i];
+ tmpid.nid = info->pi_ni[i].ns_nid;
#ifdef __KERNEL__
if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
goto out_1;
ids[i] = tmpid;
#endif
}
- rc = info->pi_nnids;
+ rc = info->pi_nnis;
out_1:
rc2 = LNetEQFree(eqh);