* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
+#ifdef __KERNEL__
+#include <linux/log2.h>
+#endif
#ifdef __KERNEL__
#define D_LNI D_CONSOLE
#endif
lnet_t the_lnet; /* THE state of the network */
+EXPORT_SYMBOL(the_lnet);
#ifdef __KERNEL__
CFS_MODULE_PARM(routes, "s", charp, 0444,
"routes to non-local networks");
+static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
+CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
+ "size of remote network hash table");
+
char *
lnet_get_routes(void)
{
void
lnet_init_locks(void)
{
- cfs_spin_lock_init(&the_lnet.ln_eq_wait_lock);
- cfs_waitq_init(&the_lnet.ln_eq_waitq);
- cfs_mutex_init(&the_lnet.ln_lnd_mutex);
- cfs_mutex_init(&the_lnet.ln_api_mutex);
+ spin_lock_init(&the_lnet.ln_eq_wait_lock);
+ init_waitqueue_head(&the_lnet.ln_eq_waitq);
+ mutex_init(&the_lnet.ln_lnd_mutex);
+ mutex_init(&the_lnet.ln_api_mutex);
}
void
char *
lnet_get_networks (void)
{
- static char default_networks[256];
- char *networks = getenv ("LNET_NETWORKS");
- char *ip2nets = getenv ("LNET_IP2NETS");
- char *str;
- char *sep;
- int len;
- int nob;
- int rc;
- cfs_list_t *tmp;
+ static char default_networks[256];
+ char *networks = getenv("LNET_NETWORKS");
+ char *str;
+ char *sep;
+ int len;
+ int nob;
+ struct list_head *tmp;
-#ifdef NOT_YET
- if (networks != NULL && ip2nets != NULL) {
- LCONSOLE_ERROR_MSG(0x103, "Please set EITHER 'LNET_NETWORKS' or"
- " 'LNET_IP2NETS' but not both at once\n");
- return NULL;
- }
-
- if (ip2nets != NULL) {
- rc = lnet_parse_ip2nets(&networks, ip2nets);
- return (rc == 0) ? networks : NULL;
- }
-#else
- SET_BUT_UNUSED(ip2nets);
- SET_BUT_UNUSED(rc);
-#endif
- if (networks != NULL)
- return networks;
+ if (networks != NULL)
+ return networks;
/* In userland, the default 'networks=' is the list of known net types */
-
len = sizeof(default_networks);
str = default_networks;
*str = 0;
sep = "";
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
- nob = snprintf(str, len, "%s%s", sep,
- libcfs_lnd2str(lnd->lnd_type));
- len -= nob;
- if (len < 0) {
- /* overflowed the string; leave it where it was */
- *str = 0;
- break;
- }
-
- str += nob;
- sep = ",";
- }
+ nob = snprintf(str, len, "%s%s", sep,
+ libcfs_lnd2str(lnd->lnd_type));
+ if (nob >= len) {
+ /* overflowed the string; leave it where it was */
+ *str = 0;
+ break;
+ }
+ len -= nob;
+ str += nob;
+ sep = ",";
+ }
- return default_networks;
+ return default_networks;
}
# ifndef HAVE_LIBPTHREAD
#endif
static int
-lnet_create_locks(void)
+lnet_create_remote_nets_table(void)
{
- lnet_init_locks();
+ int i;
+ struct list_head *hash;
+
+ LASSERT(the_lnet.ln_remote_nets_hash == NULL);
+ LASSERT(the_lnet.ln_remote_nets_hbits > 0);
+ LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ if (hash == NULL) {
+ CERROR("Failed to create remote nets hash table\n");
+ return -ENOMEM;
+ }
- the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_res_lock == NULL)
- goto failed;
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&hash[i]);
+ the_lnet.ln_remote_nets_hash = hash;
+ return 0;
+}
- the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_net_lock == NULL)
- goto failed;
+static void
+lnet_destroy_remote_nets_table(void)
+{
+ int i;
- return 0;
+ if (the_lnet.ln_remote_nets_hash == NULL)
+ return;
- failed:
- lnet_fini_locks();
- return -ENOMEM;
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
+ LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
+
+ LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
+ LNET_REMOTE_NETS_HASH_SIZE *
+ sizeof(the_lnet.ln_remote_nets_hash[0]));
+ the_lnet.ln_remote_nets_hash = NULL;
}
static void
lnet_fini_locks();
}
+static int
+lnet_create_locks(void)
+{
+ lnet_init_locks();
+
+ the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
+ if (the_lnet.ln_res_lock == NULL)
+ goto failed;
+
+ the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
+ if (the_lnet.ln_net_lock == NULL)
+ goto failed;
+
+ return 0;
+
+ failed:
+ lnet_destroy_locks();
+ return -ENOMEM;
+}
+
void lnet_assert_wire_constants (void)
{
/* Wire protocol assertions generated by 'wirecheck'
lnd_t *
lnet_find_lnd_by_type (int type)
{
- lnd_t *lnd;
- cfs_list_t *tmp;
+ lnd_t *lnd;
+ struct list_head *tmp;
- /* holding lnd mutex */
- cfs_list_for_each (tmp, &the_lnet.ln_lnds) {
- lnd = cfs_list_entry(tmp, lnd_t, lnd_list);
+ /* holding lnd mutex */
+ list_for_each(tmp, &the_lnet.ln_lnds) {
+ lnd = list_entry(tmp, lnd_t, lnd_list);
- if ((int)lnd->lnd_type == type)
- return lnd;
- }
-
- return NULL;
+ if ((int)lnd->lnd_type == type)
+ return lnd;
+ }
+ return NULL;
}
void
lnet_register_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
- cfs_list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
- lnd->lnd_refcount = 0;
+ list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
+ lnd->lnd_refcount = 0;
- CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
+ CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
+EXPORT_SYMBOL(lnet_register_lnd);
void
lnet_unregister_lnd (lnd_t *lnd)
{
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT (the_lnet.ln_init);
- LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT (lnd->lnd_refcount == 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
+ LASSERT(lnd->lnd_refcount == 0);
- cfs_list_del (&lnd->lnd_list);
- CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
+ list_del(&lnd->lnd_list);
+ CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
+EXPORT_SYMBOL(lnet_unregister_lnd);
void
lnet_counters_get(lnet_counters_t *counters)
counters->send_count += ctr->send_count;
counters->recv_count += ctr->recv_count;
counters->route_count += ctr->route_count;
- counters->drop_length += ctr->drop_length;
+ counters->drop_count += ctr->drop_count;
counters->send_length += ctr->send_length;
counters->recv_length += ctr->recv_length;
counters->route_length += ctr->route_length;
#ifdef LNET_USE_LIB_FREELIST
int
-lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
+lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
{
char *space;
if (space == NULL)
return (-ENOMEM);
- CFS_INIT_LIST_HEAD (&fl->fl_list);
- fl->fl_objs = space;
- fl->fl_nobjs = n;
- fl->fl_objsize = size;
+ INIT_LIST_HEAD(&fl->fl_list);
+ fl->fl_objs = space;
+ fl->fl_nobjs = n;
+ fl->fl_objsize = size;
- do
- {
- memset (space, 0, size);
- cfs_list_add ((cfs_list_t *)space, &fl->fl_list);
- space += size;
- } while (--n != 0);
+ do {
+ list_add((struct list_head *)space, &fl->fl_list);
+ space += size;
+ } while (--n != 0);
- return (0);
+ return 0;
}
void
-lnet_freelist_fini (lnet_freelist_t *fl)
+lnet_freelist_fini(lnet_freelist_t *fl)
{
- cfs_list_t *el;
- int count;
+ struct list_head *el;
+ int count;
if (fl->fl_nobjs == 0)
return;
#endif /* LNET_USE_LIB_FREELIST */
-__u64
-lnet_create_interface_cookie (void)
+__u64 lnet_create_interface_cookie (void)
{
- /* NB the interface cookie in wire handles guards against delayed
- * replies and ACKs appearing valid after reboot. Initialisation time,
- * even if it's only implemented to millisecond resolution is probably
- * easily good enough. */
- struct timeval tv;
- __u64 cookie;
+ /* NB the interface cookie in wire handles guards against delayed
+ * replies and ACKs appearing valid after reboot. Initialisation time,
+ * even if it's only implemented to millisecond resolution is probably
+ * easily good enough. */
+ struct timeval tv;
+ __u64 cookie;
#ifndef __KERNEL__
- int rc = gettimeofday (&tv, NULL);
- LASSERT (rc == 0);
+ int rc = gettimeofday (&tv, NULL);
+ LASSERT (rc == 0);
#else
- cfs_gettimeofday(&tv);
+ do_gettimeofday(&tv);
#endif
- cookie = tv.tv_sec;
- cookie *= 1000000;
- cookie += tv.tv_usec;
- return cookie;
+ cookie = tv.tv_sec;
+ cookie *= 1000000;
+ cookie += tv.tv_usec;
+ return cookie;
}
static char *
if (rec->rec_type == 0) /* not set yet, it's uninitialized */
return;
- while (!cfs_list_empty(&rec->rec_active)) {
- cfs_list_t *e = rec->rec_active.next;
+ while (!list_empty(&rec->rec_active)) {
+ struct list_head *e = rec->rec_active.next;
- cfs_list_del_init(e);
+ list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(cfs_list_entry(e, lnet_eq_t, eq_list));
+ lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(cfs_list_entry(e, lnet_libmd_t, md_list));
+ lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
LASSERT(rec->rec_type == 0);
rec->rec_type = type;
- CFS_INIT_LIST_HEAD(&rec->rec_active);
+ INIT_LIST_HEAD(&rec->rec_active);
#ifdef LNET_USE_LIB_FREELIST
memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist));
}
for (i = 0; i < LNET_LH_HASH_SIZE; i++)
- CFS_INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
+ INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
return 0;
lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
{
/* ALWAYS called with lnet_res_lock held */
- cfs_list_t *head;
+ struct list_head *head;
lnet_libhandle_t *lh;
unsigned int hash;
hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
- cfs_list_for_each_entry(lh, head, lh_hash_chain) {
+ list_for_each_entry(lh, head, lh_hash_chain) {
if (lh->lh_cookie == cookie)
return lh;
}
hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
- cfs_list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
+ list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
#ifndef __KERNEL__
the_lnet.ln_pid = requested_pid;
#else
if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */
- LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
+ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
- if (cfs_curproc_uid())/* Only root can run user-space server */
- return -EPERM;
- the_lnet.ln_pid = requested_pid;
+ if (current_uid() != 0) /* Only root can run user-space server */
+ return -EPERM;
+ the_lnet.ln_pid = requested_pid;
} else {/* client case (liblustre) */
}
#endif
- CFS_INIT_LIST_HEAD(&the_lnet.ln_test_peers);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_remote_nets);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_routers);
+ INIT_LIST_HEAD(&the_lnet.ln_test_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_nis);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_routers);
+
+ rc = lnet_create_remote_nets_table();
+ if (rc != 0)
+ goto failed;
the_lnet.ln_interface_cookie = lnet_create_interface_cookie();
int
lnet_unprepare (void)
{
- /* NB no LNET_LOCK since this is the last reference. All LND instances
- * have shut down already, so it is safe to unlink and free all
- * descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count) */
+ /* NB no LNET_LOCK since this is the last reference. All LND instances
+ * have shut down already, so it is safe to unlink and free all
+ * descriptors, even those that appear committed to a network op (eg MD
+ * with non-zero pending count) */
lnet_fail_nid(LNET_NID_ANY, 0);
LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_test_peers));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_cpt));
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
+ LASSERT(list_empty(&the_lnet.ln_test_peers));
+ LASSERT(list_empty(&the_lnet.ln_nis));
+ LASSERT(list_empty(&the_lnet.ln_nis_cpt));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
lnet_portals_destroy();
cfs_percpt_free(the_lnet.ln_counters);
the_lnet.ln_counters = NULL;
}
+ lnet_destroy_remote_nets_table();
return 0;
}
lnet_ni_t *
lnet_net2ni_locked(__u32 net, int cpt)
{
- cfs_list_t *tmp;
- lnet_ni_t *ni;
+ struct list_head *tmp;
+ lnet_ni_t *ni;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net) {
lnet_ni_addref_locked(ni, cpt);
if (number == 1)
return 0;
- val = cfs_hash_long(key, LNET_CPT_BITS);
+ val = hash_long(key, LNET_CPT_BITS);
/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
if (val < number)
return val;
return 0; /* the only one */
/* take lnet_net_lock(any) would be OK */
- if (!cfs_list_empty(&the_lnet.ln_nis_cpt)) {
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
+ if (!list_empty(&the_lnet.ln_nis_cpt)) {
+ list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
continue;
if (LNET_CPT_NUMBER == 1)
return 0; /* the only one */
- if (cfs_list_empty(&the_lnet.ln_nis_cpt))
+ if (list_empty(&the_lnet.ln_nis_cpt))
return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
cpt = lnet_net_lock_current();
lnet_ni_t *
lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
LASSERT(cpt != LNET_LOCK_EX);
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_nid == nid) {
lnet_ni_addref_locked(ni, cpt);
lnet_count_acceptor_nis (void)
{
/* Return the # of NIs that need the acceptor. */
- int count = 0;
+ int count = 0;
#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
- cfs_list_t *tmp;
- struct lnet_ni *ni;
- int cpt;
+ struct list_head *tmp;
+ struct lnet_ni *ni;
+ int cpt;
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
if (ni->ni_lnd->lnd_accept != NULL)
count++;
int islo;
lnet_ni_t *ni;
- /* NB called holding the global mutex */
+ /* NB called holding the global mutex */
- /* All quiet on the API front */
+ /* All quiet on the API front */
LASSERT(!the_lnet.ln_shutdown);
LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(cfs_list_empty(&the_lnet.ln_nis_zombie));
- LASSERT(cfs_list_empty(&the_lnet.ln_remote_nets));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_shutdown = 1; /* flag shutdown */
/* Unlink NIs from the global table */
- while (!cfs_list_empty(&the_lnet.ln_nis)) {
- ni = cfs_list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
+ while (!list_empty(&the_lnet.ln_nis)) {
+ ni = list_entry(the_lnet.ln_nis.next,
+ lnet_ni_t, ni_list);
/* move it to zombie list and nobody can find it anymore */
- cfs_list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
- if (!cfs_list_empty(&ni->ni_cptlist)) {
- cfs_list_del_init(&ni->ni_cptlist);
+ if (!list_empty(&ni->ni_cptlist)) {
+ list_del_init(&ni->ni_cptlist);
lnet_ni_decref_locked(ni, 0);
}
}
/* Clear the peer table and wait for all peers to go (they hold refs on
* their NIs) */
- lnet_peer_tables_cleanup();
+ lnet_peer_tables_cleanup(NULL);
lnet_net_lock(LNET_LOCK_EX);
/* Now wait for the NI's I just nuked to show up on ln_zombie_nis
* and shut them down in guaranteed thread context */
i = 2;
- while (!cfs_list_empty(&the_lnet.ln_nis_zombie)) {
+ while (!list_empty(&the_lnet.ln_nis_zombie)) {
int *ref;
int j;
- ni = cfs_list_entry(the_lnet.ln_nis_zombie.next,
- lnet_ni_t, ni_list);
- cfs_list_del_init(&ni->ni_list);
+ ni = list_entry(the_lnet.ln_nis_zombie.next,
+ lnet_ni_t, ni_list);
+ list_del_init(&ni->ni_list);
cfs_percpt_for_each(ref, j, ni->ni_refs) {
if (*ref == 0)
continue;
/* still busy, add it back to zombie list */
- cfs_list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
break;
}
- while (!cfs_list_empty(&ni->ni_list)) {
+ if (!list_empty(&ni->ni_list)) {
lnet_net_unlock(LNET_LOCK_EX);
++i;
if ((i & (-i)) == i) {
- CDEBUG(D_WARNING,
- "Waiting for zombie LNI %s\n",
+ CDEBUG(D_WARNING, "Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
cfs_pause(cfs_time_seconds(1));
ni->ni_lnd->lnd_refcount--;
lnet_net_unlock(LNET_LOCK_EX);
- islo = ni->ni_lnd->lnd_type == LOLND;
+ islo = ni->ni_lnd->lnd_type == LOLND;
- LASSERT (!cfs_in_interrupt ());
- (ni->ni_lnd->lnd_shutdown)(ni);
+ LASSERT (!in_interrupt ());
+ (ni->ni_lnd->lnd_shutdown)(ni);
- /* can't deref lnd anymore now; it might have unregistered
- * itself... */
+ /* can't deref lnd anymore now; it might have unregistered
+ * itself... */
- if (!islo)
- CDEBUG(D_LNI, "Removed LNI %s\n",
- libcfs_nid2str(ni->ni_nid));
+ if (!islo)
+ CDEBUG(D_LNI, "Removed LNI %s\n",
+ libcfs_nid2str(ni->ni_nid));
lnet_ni_free(ni);
+ i = 2;
+
lnet_net_lock(LNET_LOCK_EX);
}
the_lnet.ln_shutdown = 0;
lnet_net_unlock(LNET_LOCK_EX);
-
- if (the_lnet.ln_network_tokens != NULL) {
- LIBCFS_FREE(the_lnet.ln_network_tokens,
- the_lnet.ln_network_tokens_nob);
- the_lnet.ln_network_tokens = NULL;
- }
}
int
lnd_t *lnd;
struct lnet_ni *ni;
struct lnet_tx_queue *tq;
- cfs_list_t nilist;
+ struct list_head nilist;
int i;
- int rc = 0;
- int lnd_type;
- int nicount = 0;
- char *nets = lnet_get_networks();
+ int rc = 0;
+ int lnd_type;
+ int nicount = 0;
+ char *nets = lnet_get_networks();
- CFS_INIT_LIST_HEAD(&nilist);
+ INIT_LIST_HEAD(&nilist);
- if (nets == NULL)
- goto failed;
+ if (nets == NULL)
+ goto failed;
- rc = lnet_parse_networks(&nilist, nets);
- if (rc != 0)
- goto failed;
+ rc = lnet_parse_networks(&nilist, nets);
+ if (rc != 0)
+ goto failed;
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- LASSERT (libcfs_isknown_lnd(lnd_type));
+ LASSERT(libcfs_isknown_lnd(lnd_type));
if (lnd_type == CIBLND ||
lnd_type == OPENIBLND ||
#ifdef __KERNEL__
if (lnd == NULL) {
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- rc = cfs_request_module("%s",
- libcfs_lnd2modname(lnd_type));
- LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ rc = request_module("%s",
+ libcfs_lnd2modname(lnd_type));
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
if (lnd == NULL) {
LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
- cfs_list_del(&ni->ni_list);
+ list_del(&ni->ni_list);
lnet_net_lock(LNET_LOCK_EX);
/* refcount for ln_nis */
lnet_ni_addref_locked(ni, 0);
- cfs_list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
if (ni->ni_cpts != NULL) {
- cfs_list_add_tail(&ni->ni_cptlist,
- &the_lnet.ln_nis_cpt);
+ list_add_tail(&ni->ni_cptlist,
+ &the_lnet.ln_nis_cpt);
lnet_ni_addref_locked(ni, 0);
}
failed:
lnet_shutdown_lndnis();
- while (!cfs_list_empty(&nilist)) {
- ni = cfs_list_entry(nilist.next, lnet_ni_t, ni_list);
- cfs_list_del(&ni->ni_list);
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ list_del(&ni->ni_list);
lnet_ni_free(ni);
}
the_lnet.ln_refcount = 0;
the_lnet.ln_init = 1;
LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_lnds);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
- CFS_INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
+ INIT_LIST_HEAD(&the_lnet.ln_lnds);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
#ifdef __KERNEL__
+ /* The hash table size is the number of bits it takes to express the set
+ * ln_num_routes, minus 1 (better to under estimate than over so we
+ * don't waste memory). */
+ if (rnet_htable_size <= 0)
+ rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
+ else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
+ rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
+ the_lnet.ln_remote_nets_hbits = max_t(int, 1,
+ order_base_2(rnet_htable_size) - 1);
+
/* All LNDs apart from the LOLND are in separate modules. They
* register themselves when their module loads, and unregister
* themselves when their module is unloaded. */
#else
+ the_lnet.ln_remote_nets_hbits = 8;
+
/* Register LNDs
* NB the order here determines default 'networks=' order */
-# ifdef CRAY_XT3
- LNET_REGISTER_ULND(the_ptllnd);
-# endif
# ifdef HAVE_LIBPTHREAD
LNET_REGISTER_ULND(the_tcplnd);
# endif
lnet_register_lnd(&the_lolnd);
return 0;
}
+EXPORT_SYMBOL(LNetInit);
/**
* Finalize LNet library.
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount == 0);
- while (!cfs_list_empty(&the_lnet.ln_lnds))
- lnet_unregister_lnd(cfs_list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ while (!list_empty(&the_lnet.ln_lnds))
+ lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
+ lnd_t, lnd_list));
lnet_destroy_locks();
the_lnet.ln_init = 0;
}
+EXPORT_SYMBOL(LNetFini);
/**
* Set LNet PID and start LNet interfaces, routing, and forwarding.
LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
return rc;
}
+EXPORT_SYMBOL(LNetNIInit);
/**
* Stop LNet interfaces, routing, and forwarding.
LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
return 0;
}
+EXPORT_SYMBOL(LNetNIFini);
/**
* This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
return lnet_fail_nid(data->ioc_nid, data->ioc_count);
case IOC_LIBCFS_ADD_ROUTE:
- rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid);
+ rc = lnet_add_route(data->ioc_net, data->ioc_count,
+ data->ioc_nid, data->ioc_priority);
return (rc != 0) ? rc : lnet_check_routes();
case IOC_LIBCFS_DEL_ROUTE:
case IOC_LIBCFS_GET_ROUTE:
return lnet_get_route(data->ioc_count,
&data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags);
+ &data->ioc_nid, &data->ioc_flags,
+ &data->ioc_priority);
case IOC_LIBCFS_NOTIFY_ROUTER:
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
cfs_time_current() -
}
/* not reached */
}
+EXPORT_SYMBOL(LNetCtl);
/**
* Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
int
LNetGetId(unsigned int index, lnet_process_id_t *id)
{
- struct lnet_ni *ni;
- cfs_list_t *tmp;
- int cpt;
- int rc = -ENOENT;
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+ int cpt;
+ int rc = -ENOENT;
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
- cfs_list_for_each(tmp, &the_lnet.ln_nis) {
- if (index-- != 0)
- continue;
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ if (index-- != 0)
+ continue;
- ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
- id->nid = ni->ni_nid;
- id->pid = the_lnet.ln_pid;
- rc = 0;
- break;
- }
+ id->nid = ni->ni_nid;
+ id->pid = the_lnet.ln_pid;
+ rc = 0;
+ break;
+ }
lnet_net_unlock(cpt);
return rc;
}
+EXPORT_SYMBOL(LNetGetId);
/**
* Print a string representation of handle \a h into buffer \a str of
{
snprintf(str, len, LPX64, h.cookie);
}
+EXPORT_SYMBOL(LNetSnprintHandle);
static int
lnet_create_ping_info(void)
lnet_net_lock(0);
- cfs_list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
lnet_ni_lock(ni);
ni->ni_status = NULL;
lnet_ni_unlock(ni);
lnet_net_unlock(0);
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]));
- the_lnet.ln_ping_info = NULL;
- return;
+ LIBCFS_FREE(the_lnet.ln_ping_info,
+ offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]));
+ the_lnet.ln_ping_info = NULL;
+ return;
}
int
void
lnet_ping_target_fini(void)
{
- lnet_event_t event;
- int rc;
- int which;
- int timeout_ms = 1000;
- cfs_sigset_t blocked = cfs_block_allsigs();
+ lnet_event_t event;
+ int rc;
+ int which;
+ int timeout_ms = 1000;
+ sigset_t blocked = cfs_block_allsigs();
- LNetMDUnlink(the_lnet.ln_ping_target_md);
- /* NB md could be busy; this just starts the unlink */
+ LNetMDUnlink(the_lnet.ln_ping_target_md);
+ /* NB md could be busy; this just starts the unlink */
for (;;) {
rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
int
lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = {0};
- int which;
- int unlinked = 0;
- int replied = 0;
- const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
- lnet_ping_info_t *info;
- lnet_process_id_t tmpid;
- int i;
- int nob;
- int rc;
- int rc2;
- cfs_sigset_t blocked;
-
- if (n_ids <= 0 ||
- id.nid == LNET_NID_ANY ||
- timeout_ms > 500000 || /* arbitrary limit! */
- n_ids > 20) /* arbitrary limit! */
- return -EINVAL;
+ lnet_handle_eq_t eqh;
+ lnet_handle_md_t mdh;
+ lnet_event_t event;
+ lnet_md_t md = {0};
+ int which;
+ int unlinked = 0;
+ int replied = 0;
+ const int a_long_time = 60000; /* mS */
+ int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+ lnet_ping_info_t *info;
+ lnet_process_id_t tmpid;
+ int i;
+ int nob;
+ int rc;
+ int rc2;
+ sigset_t blocked;
+
+ if (n_ids <= 0 ||
+ id.nid == LNET_NID_ANY ||
+ timeout_ms > 500000 || /* arbitrary limit! */
+ n_ids > 20) /* arbitrary limit! */
+ return -EINVAL;
if (id.pid == LNET_PID_ANY)
id.pid = LUSTRE_SRV_LNET_PID;
for (i = 0; i < n_ids; i++) {
tmpid.pid = info->pi_pid;
tmpid.nid = info->pi_ni[i].ns_nid;
-#ifdef __KERNEL__
- if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+ if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
goto out_1;
-#else
- ids[i] = tmpid;
-#endif
}
rc = info->pi_nnis;