*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#ifndef __LNET_LIB_LNET_H__
#define __LNET_LIB_LNET_H__
-#define LNET_ROUTER
+#ifndef __KERNEL__
+# error This include is only for kernel use.
+#endif
#include <libcfs/libcfs.h>
-#include <lnet/types.h>
+#include <lnet/api.h>
#include <lnet/lnet.h>
#include <lnet/lib-types.h>
-#include <lnet/lib-dlc.h>
-
-extern lnet_t the_lnet; /* THE network */
-#if !defined(__KERNEL__) || defined(LNET_USE_LIB_FREELIST)
-/* 1 CPT, simplify implementation... */
-# define LNET_CPT_MAX_BITS 0
+extern lnet_t the_lnet; /* THE network */
-#else /* KERNEL and no freelist */
-
-# if (BITS_PER_LONG == 32)
+#if (BITS_PER_LONG == 32)
/* 2 CPTs, allowing more CPTs might make us under memory pressure */
-# define LNET_CPT_MAX_BITS 1
+# define LNET_CPT_MAX_BITS 1
-# else /* 64-bit system */
+#else /* 64-bit system */
/*
* 256 CPTs for thousands of CPUs, allowing more CPTs might make us
* under risk of consuming all lh_cookie.
*/
-# define LNET_CPT_MAX_BITS 8
-# endif /* BITS_PER_LONG == 32 */
-#endif
+# define LNET_CPT_MAX_BITS 8
+#endif /* BITS_PER_LONG == 32 */
/* max allowed CPT number */
-#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
+#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
-#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number)
-#define LNET_CPT_BITS (the_lnet.ln_cpt_bits)
-#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1)
+#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number)
+#define LNET_CPT_BITS (the_lnet.ln_cpt_bits)
+#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1)
/** exclusive lock */
-#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
+#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
static inline int lnet_is_route_alive(lnet_route_t *route)
{
- if (!route->lr_gateway->lp_alive)
+ if (!route->lr_gateway->lpni_alive)
return 0; /* gateway is down */
- if ((route->lr_gateway->lp_ping_feats &
+ if ((route->lr_gateway->lpni_ping_feats &
LNET_PING_FEAT_NI_STATUS) == 0)
return 1; /* no NI status, assume it's alive */
/* has NI status, check # down NIs */
return route->lr_downis == 0;
}
-static inline int lnet_is_wire_handle_none (lnet_handle_wire_t *wh)
+static inline int lnet_is_wire_handle_none(struct lnet_handle_wire *wh)
{
- return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
- wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
+ return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
+ wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
}
static inline int lnet_md_exhausted (lnet_libmd_t *md)
{
- return (md->md_threshold == 0 ||
- ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
- md->md_offset + md->md_max_size > md->md_length));
+ return (md->md_threshold == 0 ||
+ ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
+ md->md_offset + md->md_max_size > md->md_length));
}
static inline int lnet_md_unlinkable (lnet_libmd_t *md)
{
- /* Should unlink md when its refcount is 0 and either:
- * - md has been flagged for deletion (by auto unlink or LNetM[DE]Unlink,
- * in the latter case md may not be exhausted).
- * - auto unlink is on and md is exhausted.
- */
- if (md->md_refcount != 0)
- return 0;
-
- if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
- return 1;
-
- return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
- lnet_md_exhausted(md));
+ /* Should unlink md when its refcount is 0 and either:
+ * - md has been flagged for deletion (by auto unlink or LNetM[DE]Unlink,
+ * in the latter case md may not be exhausted).
+ * - auto unlink is on and md is exhausted.
+ */
+ if (md->md_refcount != 0)
+ return 0;
+
+ if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
+ return 1;
+
+ return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
+ lnet_md_exhausted(md));
}
#define lnet_cpt_table() (the_lnet.ln_cpt_table)
#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX)
#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX)
-#ifdef __KERNEL__
-
#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m) mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m) mutex_unlock(m)
-
-#else /* !__KERNEL__ */
-
-# ifndef HAVE_LIBPTHREAD
-#define LNET_SINGLE_THREADED_LOCK(l) \
-do { \
- LASSERT ((l) == 0); \
- (l) = 1; \
-} while (0)
-
-#define LNET_SINGLE_THREADED_UNLOCK(l) \
-do { \
- LASSERT ((l) == 1); \
- (l) = 0; \
-} while (0)
-
-#define LNET_MUTEX_LOCK(m) LNET_SINGLE_THREADED_LOCK(*(m))
-#define LNET_MUTEX_UNLOCK(m) LNET_SINGLE_THREADED_UNLOCK(*(m))
-
-#define lnet_ptl_lock(ptl) \
- LNET_SINGLE_THREADED_LOCK((ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) \
- LNET_SINGLE_THREADED_UNLOCK((ptl)->ptl_lock)
-
-#define lnet_eq_wait_lock() \
- LNET_SINGLE_THREADED_LOCK(the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() \
- LNET_SINGLE_THREADED_UNLOCK(the_lnet.ln_eq_wait_lock)
-
-#define lnet_ni_lock(ni) \
- LNET_SINGLE_THREADED_LOCK((ni)->ni_lock)
-#define lnet_ni_unlock(ni) \
- LNET_SINGLE_THREADED_UNLOCK((ni)->ni_lock)
-
-# else /* HAVE_LIBPTHREAD */
-
-#define LNET_MUTEX_LOCK(m) pthread_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m) pthread_mutex_unlock(m)
-
-#define lnet_ptl_lock(ptl) pthread_mutex_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) pthread_mutex_unlock(&(ptl)->ptl_lock)
-
-#define lnet_eq_wait_lock() pthread_mutex_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() pthread_mutex_unlock(&the_lnet.ln_eq_wait_lock)
-
-#define lnet_ni_lock(ni) pthread_mutex_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) pthread_mutex_unlock(&(ni)->ni_lock)
-
-# endif /* HAVE_LIBPTHREAD */
-#endif /* __KERNEL__ */
-
-#define MAX_PORTALS 64
-
-/* these are only used by code with LNET_USE_LIB_FREELIST, but we still
- * exported them to !LNET_USE_LIB_FREELIST for easy implemetation */
-#define LNET_FL_MAX_MES 2048
-#define LNET_FL_MAX_MDS 2048
-#define LNET_FL_MAX_EQS 512
-#define LNET_FL_MAX_MSGS 2048 /* Outstanding messages */
-
-#ifdef LNET_USE_LIB_FREELIST
-int lnet_freelist_init(lnet_freelist_t *fl, int n, int size);
-void lnet_freelist_fini(lnet_freelist_t *fl);
-
-static inline void *
-lnet_freelist_alloc (lnet_freelist_t *fl)
-{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o;
-
- if (list_empty(&fl->fl_list))
- return NULL;
-
- o = list_entry(fl->fl_list.next, lnet_freeobj_t, fo_list);
- list_del(&o->fo_list);
- return (void *)&o->fo_contents;
-}
-
-static inline void
-lnet_freelist_free (lnet_freelist_t *fl, void *obj)
-{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o = list_entry(obj, lnet_freeobj_t, fo_contents);
-
- list_add(&o->fo_list, &fl->fl_list);
-}
+#define MAX_PORTALS 64
+#define LNET_SMALL_MD_SIZE offsetof(lnet_libmd_t, md_iov.iov[1])
+extern struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
+extern struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
+ * MDs kmem_cache */
static inline lnet_eq_t *
lnet_eq_alloc (void)
{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = &the_lnet.ln_eq_container;
- lnet_eq_t *eq;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- eq = (lnet_eq_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- return eq;
-}
-
-static inline void
-lnet_eq_free_locked(lnet_eq_t *eq)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = &the_lnet.ln_eq_container;
+ lnet_eq_t *eq;
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, eq);
+ LIBCFS_ALLOC(eq, sizeof(*eq));
+ return (eq);
}
static inline void
lnet_eq_free(lnet_eq_t *eq)
{
- lnet_res_lock(0);
- lnet_eq_free_locked(eq);
- lnet_res_unlock(0);
+ LIBCFS_FREE(eq, sizeof(*eq));
}
static inline lnet_libmd_t *
lnet_md_alloc (lnet_md_t *umd)
{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
- lnet_libmd_t *md;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- md = (lnet_libmd_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- if (md != NULL)
- INIT_LIST_HEAD(&md->md_list);
-
- return md;
-}
-
-static inline void
-lnet_md_free_locked(lnet_libmd_t *md)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, md);
-}
-
-static inline void
-lnet_md_free(lnet_libmd_t *md)
-{
- lnet_res_lock(0);
- lnet_md_free_locked(md);
- lnet_res_unlock(0);
-}
-
-static inline lnet_me_t *
-lnet_me_alloc(void)
-{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
- lnet_me_t *me;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- me = (lnet_me_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- return me;
-}
-
-static inline void
-lnet_me_free_locked(lnet_me_t *me)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, me);
-}
-
-static inline void
-lnet_me_free(lnet_me_t *me)
-{
- lnet_res_lock(0);
- lnet_me_free_locked(me);
- lnet_res_unlock(0);
-}
-
-static inline lnet_msg_t *
-lnet_msg_alloc (void)
-{
- /* NEVER called with network lock held */
- struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
- lnet_msg_t *msg;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_net_lock(0);
- msg = (lnet_msg_t *)lnet_freelist_alloc(&msc->msc_freelist);
- lnet_net_unlock(0);
-
- if (msg != NULL) {
- /* NULL pointers, clear flags etc */
- memset(msg, 0, sizeof(*msg));
+ lnet_libmd_t *md;
+ unsigned int size;
+ unsigned int niov;
+
+ if ((umd->options & LNET_MD_KIOV) != 0) {
+ niov = umd->length;
+ size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
+ } else {
+ niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
+ umd->length : 1;
+ size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
}
- return msg;
-}
-
-static inline void
-lnet_msg_free_locked(lnet_msg_t *msg)
-{
- /* ALWAYS called with network lock held */
- struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- LASSERT(!msg->msg_onactivelist);
- lnet_freelist_free(&msc->msc_freelist, msg);
-}
-
-static inline void
-lnet_msg_free (lnet_msg_t *msg)
-{
- lnet_net_lock(0);
- lnet_msg_free_locked(msg);
- lnet_net_unlock(0);
-}
-
-#else /* !LNET_USE_LIB_FREELIST */
-
-static inline lnet_eq_t *
-lnet_eq_alloc (void)
-{
- /* NEVER called with liblock held */
- lnet_eq_t *eq;
-
- LIBCFS_ALLOC(eq, sizeof(*eq));
- return (eq);
-}
-
-static inline void
-lnet_eq_free(lnet_eq_t *eq)
-{
- /* ALWAYS called with resource lock held */
- LIBCFS_FREE(eq, sizeof(*eq));
-}
-static inline lnet_libmd_t *
-lnet_md_alloc (lnet_md_t *umd)
-{
- /* NEVER called with liblock held */
- lnet_libmd_t *md;
- unsigned int size;
- unsigned int niov;
-
- if ((umd->options & LNET_MD_KIOV) != 0) {
- niov = umd->length;
- size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
- } else {
- niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
- umd->length : 1;
- size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
- }
-
- LIBCFS_ALLOC(md, size);
+ if (size <= LNET_SMALL_MD_SIZE) {
+ md = kmem_cache_alloc(lnet_small_mds_cachep,
+ GFP_NOFS | __GFP_ZERO);
+ if (md) {
+ CDEBUG(D_MALLOC, "slab-alloced 'md' of size %u at "
+ "%p.\n", size, md);
+ } else {
+ CDEBUG(D_MALLOC, "failed to allocate 'md' of size %u\n",
+ size);
+ return NULL;
+ }
+ } else {
+ LIBCFS_ALLOC(md, size);
+ }
if (md != NULL) {
/* Set here in case of early free */
static inline void
lnet_md_free(lnet_libmd_t *md)
{
- /* ALWAYS called with resource lock held */
unsigned int size;
if ((md->md_options & LNET_MD_KIOV) != 0)
else
size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
- LIBCFS_FREE(md, size);
+ if (size <= LNET_SMALL_MD_SIZE) {
+ CDEBUG(D_MALLOC, "slab-freed 'md' at %p.\n", md);
+ kmem_cache_free(lnet_small_mds_cachep, md);
+ } else {
+ LIBCFS_FREE(md, size);
+ }
}
static inline lnet_me_t *
lnet_me_alloc (void)
{
- /* NEVER called with liblock held */
- lnet_me_t *me;
-
- LIBCFS_ALLOC(me, sizeof(*me));
- return (me);
-}
-
-static inline void
-lnet_me_free(lnet_me_t *me)
-{
- /* ALWAYS called with resource lock held */
- LIBCFS_FREE(me, sizeof(*me));
-}
+ lnet_me_t *me;
-static inline lnet_msg_t *
-lnet_msg_alloc(void)
-{
- /* NEVER called with liblock held */
- lnet_msg_t *msg;
+ me = kmem_cache_alloc(lnet_mes_cachep, GFP_NOFS | __GFP_ZERO);
- LIBCFS_ALLOC(msg, sizeof(*msg));
+ if (me)
+ CDEBUG(D_MALLOC, "slab-alloced 'me' at %p.\n", me);
+ else
+ CDEBUG(D_MALLOC, "failed to allocate 'me'\n");
- /* no need to zero, LIBCFS_ALLOC does for us */
- return (msg);
+ return me;
}
static inline void
-lnet_msg_free(lnet_msg_t *msg)
+lnet_me_free(lnet_me_t *me)
{
- /* ALWAYS called with network lock held */
- LASSERT(!msg->msg_onactivelist);
- LIBCFS_FREE(msg, sizeof(*msg));
+ CDEBUG(D_MALLOC, "slab-freed 'me' at %p.\n", me);
+ kmem_cache_free(lnet_mes_cachep, me);
}
-#define lnet_eq_free_locked(eq) lnet_eq_free(eq)
-#define lnet_md_free_locked(md) lnet_md_free(md)
-#define lnet_me_free_locked(me) lnet_me_free(me)
-#define lnet_msg_free_locked(msg) lnet_msg_free(msg)
-
-#endif /* LNET_USE_LIB_FREELIST */
-
lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec,
__u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
static inline void
lnet_eq2handle (lnet_handle_eq_t *handle, lnet_eq_t *eq)
{
- if (eq == NULL) {
- LNetInvalidateHandle(handle);
- return;
- }
+ if (eq == NULL) {
+ LNetInvalidateHandle(handle);
+ return;
+ }
- handle->cookie = eq->eq_lh.lh_cookie;
+ handle->cookie = eq->eq_lh.lh_cookie;
}
static inline lnet_eq_t *
static inline void
lnet_md2handle (lnet_handle_md_t *handle, lnet_libmd_t *md)
{
- handle->cookie = md->md_lh.lh_cookie;
+ handle->cookie = md->md_lh.lh_cookie;
}
static inline lnet_libmd_t *
}
static inline lnet_libmd_t *
-lnet_wire_handle2md(lnet_handle_wire_t *wh)
+lnet_wire_handle2md(struct lnet_handle_wire *wh)
{
/* ALWAYS called with resource lock held */
lnet_libhandle_t *lh;
static inline void
lnet_me2handle (lnet_handle_me_t *handle, lnet_me_t *me)
{
- handle->cookie = me->me_lh.lh_cookie;
+ handle->cookie = me->me_lh.lh_cookie;
}
static inline lnet_me_t *
}
static inline void
-lnet_peer_addref_locked(lnet_peer_t *lp)
+lnet_peer_ni_addref_locked(struct lnet_peer_ni *lp)
{
- LASSERT (lp->lp_refcount > 0);
- lp->lp_refcount++;
+ LASSERT (atomic_read(&lp->lpni_refcount) > 0);
+ atomic_inc(&lp->lpni_refcount);
}
-extern void lnet_destroy_peer_locked(lnet_peer_t *lp);
+extern void lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lp);
static inline void
-lnet_peer_decref_locked(lnet_peer_t *lp)
+lnet_peer_ni_decref_locked(struct lnet_peer_ni *lp)
{
- LASSERT (lp->lp_refcount > 0);
- lp->lp_refcount--;
- if (lp->lp_refcount == 0)
- lnet_destroy_peer_locked(lp);
+ LASSERT (atomic_read(&lp->lpni_refcount) > 0);
+ atomic_dec(&lp->lpni_refcount);
+ if (atomic_read(&lp->lpni_refcount) == 0)
+ lnet_destroy_peer_ni_locked(lp);
}
static inline int
-lnet_isrouter(lnet_peer_t *lp)
+lnet_isrouter(struct lnet_peer_ni *lp)
{
- return lp->lp_rtr_refcount != 0;
-}
-
-/* check if it's a router checker ping */
-static inline int
-lnet_msg_is_rc_ping(struct lnet_msg *msg)
-{
- lnet_hdr_t *hdr = &msg->msg_hdr;
-
- return msg->msg_type == LNET_MSG_GET &&
- hdr->msg.get.ptl_index == cpu_to_le32(LNET_RESERVED_PORTAL) &&
- hdr->msg.get.match_bits ==
- cpu_to_le64(LNET_PROTO_PING_MATCHBITS);
-}
-
-/* peer aliveness is enabled in a network where lnet_ni_t::ni_peertimeout has
- * been set to a positive value, it's only valid for router peers or peers on
- * routers.
- */
-static inline int
-lnet_peer_aliveness_enabled(struct lnet_peer *lp)
-{
- if (lp->lp_ni->ni_peertimeout <= 0)
- return 0;
-
- return the_lnet.ln_routing || lnet_isrouter(lp);
+ return lp->lpni_rtr_refcount != 0;
}
static inline void
lnet_net_unlock(0);
}
-void lnet_ni_free(lnet_ni_t *ni);
+static inline lnet_msg_t *
+lnet_msg_alloc(void)
+{
+ lnet_msg_t *msg;
+
+ LIBCFS_ALLOC(msg, sizeof(*msg));
+
+ /* no need to zero, LIBCFS_ALLOC does for us */
+ return (msg);
+}
+
+static inline void
+lnet_msg_free(lnet_msg_t *msg)
+{
+ LASSERT(!msg->msg_onactivelist);
+
+ /* Make sure we have no references to an NI. */
+ if (msg->msg_txni)
+ lnet_ni_decref_locked(msg->msg_txni, msg->msg_tx_cpt);
+ if (msg->msg_rxni)
+ lnet_ni_decref_locked(msg->msg_rxni, msg->msg_rx_cpt);
+
+ LIBCFS_FREE(msg, sizeof(*msg));
+}
+
+void lnet_ni_free(struct lnet_ni *ni);
+void lnet_net_free(struct lnet_net *net);
+
+struct lnet_net *
+lnet_net_alloc(__u32 net_type, struct list_head *netlist);
+
+struct lnet_ni *
+lnet_ni_alloc(struct lnet_net *net, struct cfs_expr_list *el,
+ char *iface);
static inline int
lnet_nid2peerhash(lnet_nid_t nid)
extern lnd_t the_lolnd;
extern int avoid_asym_router_failure;
-#ifndef __KERNEL__
-/* unconditional registration */
-#define LNET_REGISTER_ULND(lnd) \
-do { \
- extern lnd_t lnd; \
- \
- lnet_register_lnd(&(lnd)); \
-} while (0)
-
-/* conditional registration */
-#define LNET_REGISTER_ULND_IF_PRESENT(lnd) \
-do { \
- extern lnd_t lnd __attribute__ ((weak, alias("the_lolnd"))); \
- \
- if (&(lnd) != &the_lolnd) \
- lnet_register_lnd(&(lnd)); \
-} while (0)
-#endif
-
-extern int lnet_cpt_of_nid_locked(lnet_nid_t nid);
-extern int lnet_cpt_of_nid(lnet_nid_t nid);
+extern unsigned int lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number);
+extern int lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni);
+extern int lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni);
extern lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
+extern lnet_ni_t *lnet_nid2ni_addref(lnet_nid_t nid);
extern lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
extern lnet_ni_t *lnet_net2ni(__u32 net);
+bool lnet_is_ni_healthy_locked(struct lnet_ni *ni);
+struct lnet_net *lnet_get_net_locked(__u32 net_id);
+
+int lnet_lib_init(void);
+void lnet_lib_exit(void);
+
+extern int portal_rotor;
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
-void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
-int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
+void lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
+ cfs_time_t when);
+int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
unsigned int priority);
int lnet_check_routes(void);
int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-int lnet_get_net_config(int idx,
- __u32 *cpt_count,
- __u64 *nid,
- int *peer_timeout,
- int *peer_tx_credits,
- int *peer_rtr_cr,
- int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config);
int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
+struct lnet_ni *lnet_get_next_ni_locked(struct lnet_net *mynet,
+ struct lnet_ni *prev);
+struct lnet_ni *lnet_get_ni_idx_locked(int idx);
+
+struct libcfs_ioctl_handler {
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
+};
+
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ static struct libcfs_ioctl_handler ident = { \
+ /* .item = */ LIST_HEAD_INIT(ident.item), \
+ /* .handle_ioctl = */ func \
+ }
+
+extern int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+extern int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+extern int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ struct libcfs_ioctl_hdr __user *uparam);
void lnet_proc_init(void);
void lnet_proc_fini(void);
int lnet_rtrpools_enable(void);
void lnet_rtrpools_disable(void);
void lnet_rtrpools_free(int keep_pools);
-lnet_remotenet_t *lnet_find_net_locked (__u32 net);
-int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits);
+lnet_remotenet_t *lnet_find_rnet_locked(__u32 net);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid,
+ struct lnet_ioctl_config_data *conf);
int lnet_dyn_del_ni(__u32 net);
+int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
+struct lnet_net *lnet_get_net_locked(__u32 net_id);
int lnet_islocalnid(lnet_nid_t nid);
int lnet_islocalnet(__u32 net);
void lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev);
void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
- unsigned int offset, unsigned int len);
+ unsigned int offset, unsigned int len);
int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
void lnet_return_tx_credits_locked(lnet_msg_t *msg);
void lnet_return_rx_credits_locked(lnet_msg_t *msg);
/* message functions */
int lnet_parse (lnet_ni_t *ni, lnet_hdr_t *hdr,
- lnet_nid_t fromnid, void *private, int rdma_req);
+ lnet_nid_t fromnid, void *private, int rdma_req);
int lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg);
int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg);
void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
int delayed, unsigned int offset,
unsigned int mlen, unsigned int rlen);
void lnet_counters_get(lnet_counters_t *counters);
void lnet_counters_reset(void);
-unsigned int lnet_iov_nob (unsigned int niov, struct iovec *iov);
-int lnet_extract_iov (int dst_niov, struct iovec *dst,
- int src_niov, struct iovec *src,
- unsigned int offset, unsigned int len);
+unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
+int lnet_extract_iov(int dst_niov, struct kvec *dst,
+ int src_niov, struct kvec *src,
+ unsigned int offset, unsigned int len);
unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len);
-
-void lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov,
- unsigned int doffset,
- unsigned int nsiov, struct iovec *siov,
- unsigned int soffset, unsigned int nob);
-void lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov,
- unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int kiovoffset,
- unsigned int niov, struct iovec *iov,
- unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov,
- unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov,
- unsigned int soffset, unsigned int nob);
-
-static inline void
-lnet_copy_iov2flat(int dlen, __user void *dest, unsigned int doffset,
- unsigned int nsiov, struct iovec *siov, unsigned int soffset,
- unsigned int nob)
+int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
+ int src_niov, lnet_kiov_t *src,
+ unsigned int offset, unsigned int len);
+
+void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
+ unsigned int doffset,
+ unsigned int nsiov, struct kvec *siov,
+ unsigned int soffset, unsigned int nob);
+void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
+ unsigned int iovoffset,
+ unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset, unsigned int nob);
+void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset,
+ unsigned int niov, struct kvec *iov,
+ unsigned int iovoffset, unsigned int nob);
+void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
+ unsigned int doffset,
+ unsigned int nskiov, lnet_kiov_t *skiov,
+ unsigned int soffset, unsigned int nob);
+
+static inline void
+lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
+ unsigned int nsiov, struct kvec *siov, unsigned int soffset,
+ unsigned int nob)
{
- struct iovec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
+ struct kvec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
- lnet_copy_iov2iov(1, &diov, doffset,
- nsiov, siov, soffset, nob);
+ lnet_copy_iov2iov(1, &diov, doffset,
+ nsiov, siov, soffset, nob);
}
static inline void
-lnet_copy_kiov2flat(int dlen, void __user *dest, unsigned int doffset,
+lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
unsigned int nsiov, lnet_kiov_t *skiov,
unsigned int soffset, unsigned int nob)
{
- struct iovec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
+ struct kvec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
- lnet_copy_kiov2iov(1, &diov, doffset,
- nsiov, skiov, soffset, nob);
+ lnet_copy_kiov2iov(1, &diov, doffset,
+ nsiov, skiov, soffset, nob);
}
static inline void
-lnet_copy_flat2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
- int slen, void __user *src, unsigned int soffset,
+lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
+ int slen, void *src, unsigned int soffset,
unsigned int nob)
{
- struct iovec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
- lnet_copy_iov2iov(ndiov, diov, doffset,
- 1, &siov, soffset, nob);
+ struct kvec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
+ lnet_copy_iov2iov(ndiov, diov, doffset,
+ 1, &siov, soffset, nob);
}
static inline void
lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
- unsigned int doffset, int slen, void __user *src,
+ unsigned int doffset, int slen, void *src,
unsigned int soffset, unsigned int nob)
{
- struct iovec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
- lnet_copy_iov2kiov(ndiov, dkiov, doffset,
- 1, &siov, soffset, nob);
+ struct kvec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
+ lnet_copy_iov2kiov(ndiov, dkiov, doffset,
+ 1, &siov, soffset, nob);
}
void lnet_me_unlink(lnet_me_t *me);
void lnet_register_lnd(lnd_t *lnd);
void lnet_unregister_lnd(lnd_t *lnd);
-int lnet_set_ip_niaddr (lnet_ni_t *ni);
-#ifdef __KERNEL__
-int lnet_connect(cfs_socket_t **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port);
+int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
+ __u32 local_ip, __u32 peer_ip, int peer_port);
void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
__u32 peer_ip, int port);
-int lnet_count_acceptor_nis(void);
+int lnet_count_acceptor_nets(void);
int lnet_acceptor_timeout(void);
int lnet_acceptor_port(void);
-#else
-void lnet_router_checker(void);
-#endif
-
-#ifdef HAVE_LIBPTHREAD
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_port(void);
-#endif
-
int lnet_acceptor_start(void);
void lnet_acceptor_stop(void);
-void lnet_get_tunables(void);
+int lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
+int lnet_ipif_enumerate(char ***names);
+void lnet_ipif_free_enumeration(char **names, int n);
+int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
+int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
+int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port);
+int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout);
+int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout);
+
+int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
+int lnet_sock_accept(struct socket **newsockp, struct socket *sock);
+int lnet_sock_connect(struct socket **sockp, int *fatal,
+ __u32 local_ip, int local_port,
+ __u32 peer_ip, int peer_port);
+
int lnet_peers_start_down(void);
-int lnet_peer_buffer_credits(lnet_ni_t *ni);
+int lnet_peer_buffer_credits(struct lnet_net *net);
int lnet_router_checker_start(void);
void lnet_router_checker_stop(void);
-void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
-void lnet_swap_pinginfo(lnet_ping_info_t *info);
+void lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net);
+void lnet_swap_pinginfo(struct lnet_ping_info *info);
int lnet_parse_ip2nets(char **networksp, char *ip2nets);
int lnet_parse_routes(char *route_str, int *im_a_router);
-int lnet_parse_networks(struct list_head *nilist, char *networks);
-int lnet_net_unique(__u32 net, struct list_head *nilist);
-
-int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
-lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
- lnet_nid_t nid);
+int lnet_parse_networks(struct list_head *nilist, char *networks,
+ bool use_tcp_bonding);
+bool lnet_net_unique(__u32 net_id, struct list_head *nilist,
+ struct lnet_net **net);
+bool lnet_ni_unique_net(struct list_head *nilist, char *iface);
+void lnet_incr_dlc_seq(void);
+__u32 lnet_get_dlc_seq_locked(void);
+
+struct lnet_peer_ni *lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
+ struct lnet_peer_net *peer_net,
+ struct lnet_peer_ni *prev);
+int lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt,
+ struct lnet_peer **peer);
+int lnet_nid2peerni_locked(struct lnet_peer_ni **lpp, lnet_nid_t nid, int cpt);
+struct lnet_peer_ni *lnet_find_peer_ni_locked(lnet_nid_t nid, int cpt);
void lnet_peer_tables_cleanup(lnet_ni_t *ni);
void lnet_peer_tables_destroy(void);
int lnet_peer_tables_create(void);
void lnet_debug_peer(lnet_nid_t nid);
+struct lnet_peer_net *lnet_peer_get_net_locked(struct lnet_peer *peer,
+ __u32 net_id);
+bool lnet_peer_is_ni_pref_locked(struct lnet_peer_ni *lpni,
+ struct lnet_ni *ni);
int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
char alivness[LNET_MAX_STR_LEN],
__u32 *cpt_iter, __u32 *refcount,
__u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
__u32 *peer_tx_qnob);
+static inline bool
+lnet_is_peer_ni_healthy_locked(struct lnet_peer_ni *lpni)
+{
+ return lpni->lpni_healthy;
+}
+
static inline void
-lnet_peer_set_alive(lnet_peer_t *lp)
+lnet_set_peer_ni_health_locked(struct lnet_peer_ni *lpni, bool health)
{
- lp->lp_last_alive = lp->lp_last_query = cfs_time_current();
- if (!lp->lp_alive)
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
+ lpni->lpni_healthy = health;
}
-#ifndef __KERNEL__
-static inline int
-lnet_parse_int_tunable(int *value, char *name)
+static inline bool
+lnet_is_peer_net_healthy_locked(struct lnet_peer_net *peer_net)
{
- char *env = getenv(name);
- char *end;
+ struct lnet_peer_ni *lpni;
- if (env == NULL)
- return 0;
+ list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
+ lpni_on_peer_net_list) {
+ if (lnet_is_peer_ni_healthy_locked(lpni))
+ return true;
+ }
- *value = strtoull(env, &end, 0);
- if (*end == 0)
- return 0;
+ return false;
+}
- CERROR("Can't parse tunable %s=%s\n", name, env);
- return -EINVAL;
+static inline bool
+lnet_is_peer_healthy_locked(struct lnet_peer *peer)
+{
+ struct lnet_peer_net *peer_net;
+
+ list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) {
+ if (lnet_is_peer_net_healthy_locked(peer_net))
+ return true;
+ }
+
+ return false;
+}
+
+static inline void
+lnet_peer_set_alive(struct lnet_peer_ni *lp)
+{
+ lp->lpni_last_alive = lp->lpni_last_query = cfs_time_current();
+ if (!lp->lpni_alive)
+ lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
}
-#endif
#endif