LN_CONFIG_CDEBUG
LC_QUOTA
-LN_CONFIG_USERSPACE
-
LB_PATH_SNMP
LB_PATH_LUSTREIOKIT
]) # LN_CONFIG_GNILND
#
-# LN_CONFIG_USERSPACE
-#
-# This is defined but empty because it is called from
-# build/autconf/lustre-build.m4 which is shared by all branches.
-#
-AC_DEFUN([LN_CONFIG_USERSPACE], [
-]) # LN_CONFIG_USERSPACE
-
-#
# LN_CONFIG_TCP_SENDPAGE
#
# 2.6.36 tcp_sendpage() first parameter is 'struct sock' instead of 'struct socket'.
* LNet is an asynchronous message-passing API, which provides an unreliable
* connectionless service that can't guarantee any order. It supports OFA IB,
* TCP/IP, and Cray Portals, and routes between heterogeneous networks.
- *
- * LNet can run both in OS kernel space and in userspace as a library.
* @{
*/
/** \defgroup lnet_init_fini Initialization and cleanup
* The LNet must be properly initialized before any LNet calls can be made.
* @{ */
-int LNetInit(void);
-void LNetFini(void);
-
int LNetNIInit(lnet_pid_t requested_pid);
int LNetNIFini(void);
/** @} lnet_init_fini */
int LNetSetLazyPortal(int portal);
int LNetClearLazyPortal(int portal);
int LNetCtl(unsigned int cmd, void *arg);
-int LNetSetAsync(lnet_process_id_t id, int nasync);
-#ifndef __KERNEL__
-/* Temporary workaround to allow uOSS and test programs force server
- * mode in userspace. See comments near ln_server_mode_flag in
- * lnet/lib-types.h */
-
-void lnet_server_mode();
-#endif
/** @} lnet_misc */
/** @} lnet */
#ifndef __LNET_LIB_LNET_H__
#define __LNET_LIB_LNET_H__
-#define LNET_ROUTER
-
#include <libcfs/libcfs.h>
#include <lnet/types.h>
#include <lnet/lnet.h>
extern lnet_t the_lnet; /* THE network */
-#if !defined(__KERNEL__) || defined(LNET_USE_LIB_FREELIST)
-/* 1 CPT, simplify implementation... */
-# define LNET_CPT_MAX_BITS 0
-
-#else /* KERNEL and no freelist */
-
-# if (BITS_PER_LONG == 32)
+#if (BITS_PER_LONG == 32)
/* 2 CPTs, allowing more CPTs might make us under memory pressure */
-# define LNET_CPT_MAX_BITS 1
+# define LNET_CPT_MAX_BITS 1
-# else /* 64-bit system */
+#else /* 64-bit system */
/*
* 256 CPTs for thousands of CPUs, allowing more CPTs might make us
* under risk of consuming all lh_cookie.
*/
-# define LNET_CPT_MAX_BITS 8
-# endif /* BITS_PER_LONG == 32 */
-#endif
+# define LNET_CPT_MAX_BITS 8
+#endif /* BITS_PER_LONG == 32 */
/* max allowed CPT number */
#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX)
#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX)
-#ifdef __KERNEL__
-
#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
#define LNET_MUTEX_LOCK(m) mutex_lock(m)
#define LNET_MUTEX_UNLOCK(m) mutex_unlock(m)
-#else /* !__KERNEL__ */
-
-# ifndef HAVE_LIBPTHREAD
-#define LNET_SINGLE_THREADED_LOCK(l) \
-do { \
- LASSERT ((l) == 0); \
- (l) = 1; \
-} while (0)
-
-#define LNET_SINGLE_THREADED_UNLOCK(l) \
-do { \
- LASSERT ((l) == 1); \
- (l) = 0; \
-} while (0)
-
-#define LNET_MUTEX_LOCK(m) LNET_SINGLE_THREADED_LOCK(*(m))
-#define LNET_MUTEX_UNLOCK(m) LNET_SINGLE_THREADED_UNLOCK(*(m))
-
-#define lnet_ptl_lock(ptl) \
- LNET_SINGLE_THREADED_LOCK((ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) \
- LNET_SINGLE_THREADED_UNLOCK((ptl)->ptl_lock)
-
-#define lnet_eq_wait_lock() \
- LNET_SINGLE_THREADED_LOCK(the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() \
- LNET_SINGLE_THREADED_UNLOCK(the_lnet.ln_eq_wait_lock)
-
-#define lnet_ni_lock(ni) \
- LNET_SINGLE_THREADED_LOCK((ni)->ni_lock)
-#define lnet_ni_unlock(ni) \
- LNET_SINGLE_THREADED_UNLOCK((ni)->ni_lock)
-
-# else /* HAVE_LIBPTHREAD */
-
-#define LNET_MUTEX_LOCK(m) pthread_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m) pthread_mutex_unlock(m)
-
-#define lnet_ptl_lock(ptl) pthread_mutex_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) pthread_mutex_unlock(&(ptl)->ptl_lock)
-
-#define lnet_eq_wait_lock() pthread_mutex_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() pthread_mutex_unlock(&the_lnet.ln_eq_wait_lock)
-
-#define lnet_ni_lock(ni) pthread_mutex_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) pthread_mutex_unlock(&(ni)->ni_lock)
-
-# endif /* HAVE_LIBPTHREAD */
-#endif /* __KERNEL__ */
-
#define MAX_PORTALS 64
-/* these are only used by code with LNET_USE_LIB_FREELIST, but we still
- * exported them to !LNET_USE_LIB_FREELIST for easy implemetation */
-#define LNET_FL_MAX_MES 2048
-#define LNET_FL_MAX_MDS 2048
-#define LNET_FL_MAX_EQS 512
-#define LNET_FL_MAX_MSGS 2048 /* Outstanding messages */
-
-#ifdef LNET_USE_LIB_FREELIST
-
-int lnet_freelist_init(lnet_freelist_t *fl, int n, int size);
-void lnet_freelist_fini(lnet_freelist_t *fl);
-
-static inline void *
-lnet_freelist_alloc (lnet_freelist_t *fl)
-{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o;
-
- if (list_empty(&fl->fl_list))
- return NULL;
-
- o = list_entry(fl->fl_list.next, lnet_freeobj_t, fo_list);
- list_del(&o->fo_list);
- return (void *)&o->fo_contents;
-}
-
-static inline void
-lnet_freelist_free (lnet_freelist_t *fl, void *obj)
-{
- /* ALWAYS called with liblock held */
- lnet_freeobj_t *o = list_entry(obj, lnet_freeobj_t, fo_contents);
-
- list_add(&o->fo_list, &fl->fl_list);
-}
-
-
static inline lnet_eq_t *
lnet_eq_alloc (void)
{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = &the_lnet.ln_eq_container;
- lnet_eq_t *eq;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- eq = (lnet_eq_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- return eq;
-}
-
-static inline void
-lnet_eq_free_locked(lnet_eq_t *eq)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = &the_lnet.ln_eq_container;
-
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, eq);
-}
-
-static inline void
-lnet_eq_free(lnet_eq_t *eq)
-{
- lnet_res_lock(0);
- lnet_eq_free_locked(eq);
- lnet_res_unlock(0);
-}
-
-static inline lnet_libmd_t *
-lnet_md_alloc (lnet_md_t *umd)
-{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
- lnet_libmd_t *md;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- md = (lnet_libmd_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- if (md != NULL)
- INIT_LIST_HEAD(&md->md_list);
-
- return md;
-}
-
-static inline void
-lnet_md_free_locked(lnet_libmd_t *md)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, md);
-}
-
-static inline void
-lnet_md_free(lnet_libmd_t *md)
-{
- lnet_res_lock(0);
- lnet_md_free_locked(md);
- lnet_res_unlock(0);
-}
-
-static inline lnet_me_t *
-lnet_me_alloc(void)
-{
- /* NEVER called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
- lnet_me_t *me;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_res_lock(0);
- me = (lnet_me_t *)lnet_freelist_alloc(&rec->rec_freelist);
- lnet_res_unlock(0);
-
- return me;
-}
-
-static inline void
-lnet_me_free_locked(lnet_me_t *me)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- lnet_freelist_free(&rec->rec_freelist, me);
-}
-
-static inline void
-lnet_me_free(lnet_me_t *me)
-{
- lnet_res_lock(0);
- lnet_me_free_locked(me);
- lnet_res_unlock(0);
-}
-
-static inline lnet_msg_t *
-lnet_msg_alloc (void)
-{
- /* NEVER called with network lock held */
- struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
- lnet_msg_t *msg;
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_net_lock(0);
- msg = (lnet_msg_t *)lnet_freelist_alloc(&msc->msc_freelist);
- lnet_net_unlock(0);
-
- if (msg != NULL) {
- /* NULL pointers, clear flags etc */
- memset(msg, 0, sizeof(*msg));
- }
- return msg;
-}
-
-static inline void
-lnet_msg_free_locked(lnet_msg_t *msg)
-{
- /* ALWAYS called with network lock held */
- struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
-
- LASSERT(LNET_CPT_NUMBER == 1);
- LASSERT(!msg->msg_onactivelist);
- lnet_freelist_free(&msc->msc_freelist, msg);
-}
-
-static inline void
-lnet_msg_free (lnet_msg_t *msg)
-{
- lnet_net_lock(0);
- lnet_msg_free_locked(msg);
- lnet_net_unlock(0);
-}
-
-#else /* !LNET_USE_LIB_FREELIST */
-
-static inline lnet_eq_t *
-lnet_eq_alloc (void)
-{
- /* NEVER called with liblock held */
lnet_eq_t *eq;
LIBCFS_ALLOC(eq, sizeof(*eq));
static inline void
lnet_eq_free(lnet_eq_t *eq)
{
- /* ALWAYS called with resource lock held */
LIBCFS_FREE(eq, sizeof(*eq));
}
static inline lnet_libmd_t *
lnet_md_alloc (lnet_md_t *umd)
{
- /* NEVER called with liblock held */
lnet_libmd_t *md;
unsigned int size;
unsigned int niov;
static inline void
lnet_md_free(lnet_libmd_t *md)
{
- /* ALWAYS called with resource lock held */
unsigned int size;
if ((md->md_options & LNET_MD_KIOV) != 0)
static inline lnet_me_t *
lnet_me_alloc (void)
{
- /* NEVER called with liblock held */
lnet_me_t *me;
LIBCFS_ALLOC(me, sizeof(*me));
static inline void
lnet_me_free(lnet_me_t *me)
{
- /* ALWAYS called with resource lock held */
LIBCFS_FREE(me, sizeof(*me));
}
static inline lnet_msg_t *
lnet_msg_alloc(void)
{
- /* NEVER called with liblock held */
lnet_msg_t *msg;
LIBCFS_ALLOC(msg, sizeof(*msg));
static inline void
lnet_msg_free(lnet_msg_t *msg)
{
- /* ALWAYS called with network lock held */
LASSERT(!msg->msg_onactivelist);
LIBCFS_FREE(msg, sizeof(*msg));
}
-#define lnet_eq_free_locked(eq) lnet_eq_free(eq)
-#define lnet_md_free_locked(md) lnet_md_free(md)
-#define lnet_me_free_locked(me) lnet_me_free(me)
-#define lnet_msg_free_locked(msg) lnet_msg_free(msg)
-
-#endif /* LNET_USE_LIB_FREELIST */
-
lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec,
__u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
extern lnd_t the_lolnd;
extern int avoid_asym_router_failure;
-#ifndef __KERNEL__
-/* unconditional registration */
-#define LNET_REGISTER_ULND(lnd) \
-do { \
- extern lnd_t lnd; \
- \
- lnet_register_lnd(&(lnd)); \
-} while (0)
-
-/* conditional registration */
-#define LNET_REGISTER_ULND_IF_PRESENT(lnd) \
-do { \
- extern lnd_t lnd __attribute__ ((weak, alias("the_lolnd"))); \
- \
- if (&(lnd) != &the_lolnd) \
- lnet_register_lnd(&(lnd)); \
-} while (0)
-#endif
-
extern int lnet_cpt_of_nid_locked(lnet_nid_t nid);
extern int lnet_cpt_of_nid(lnet_nid_t nid);
extern lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
extern lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
extern lnet_ni_t *lnet_net2ni(__u32 net);
+int lnet_init(void);
+void lnet_fini(void);
+
extern int portal_rotor;
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
void lnet_unregister_lnd(lnd_t *lnd);
int lnet_set_ip_niaddr (lnet_ni_t *ni);
-#ifdef __KERNEL__
int lnet_connect(cfs_socket_t **sockp, lnet_nid_t peer_nid,
__u32 local_ip, __u32 peer_ip, int peer_port);
void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
int lnet_count_acceptor_nis(void);
int lnet_acceptor_timeout(void);
int lnet_acceptor_port(void);
-#else
-void lnet_router_checker(void);
-#endif
-
-#ifdef HAVE_LIBPTHREAD
-int lnet_count_acceptor_nis(void);
-int lnet_acceptor_port(void);
-#endif
-
int lnet_acceptor_start(void);
void lnet_acceptor_stop(void);
-void lnet_get_tunables(void);
int lnet_peers_start_down(void);
int lnet_peer_buffer_credits(lnet_ni_t *ni);
lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
}
-#ifndef __KERNEL__
-static inline int
-lnet_parse_int_tunable(int *value, char *name)
-{
- char *env = getenv(name);
- char *end;
-
- if (env == NULL)
- return 0;
-
- *value = strtoull(env, &end, 0);
- if (*end == 0)
- return 0;
-
- CERROR("Can't parse tunable %s=%s\n", name, env);
- return -EINVAL;
-}
-#endif
-
#endif
# include <linux/uio.h>
# include <linux/types.h>
#else /* !__KERNEL__ */
-# define LNET_USE_LIB_FREELIST
# include <sys/types.h>
#endif /* __KERNEL__ */
#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
#define LNET_MD_FLAG_ABORTED (1 << 2)
-#ifdef LNET_USE_LIB_FREELIST
-typedef struct
-{
- /* single contiguous array of objects */
- void *fl_objs;
- /* the number of them */
- int fl_nobjs;
- /* the size (including overhead) of each of them */
- int fl_objsize;
- /* where they are enqueued */
- struct list_head fl_list;
-} lnet_freelist_t;
-
-typedef struct
-{
- struct list_head fo_list; /* enqueue on fl_list */
- void *fo_contents; /* aligned contents */
-} lnet_freeobj_t;
-#endif
-
typedef struct {
/* info about peers we are trying to fail */
struct list_head tp_list; /* ln_test_peers */
/* query of peer aliveness */
void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, cfs_time_t *when);
-#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, cfs_socket_t *sock);
-#endif
-
-#ifndef __KERNEL__
- /* wait for something to happen */
- void (*lnd_wait)(struct lnet_ni *ni, int milliseconds);
-
- /* ensure non-RDMA messages can be received outside liblustre */
- int (*lnd_setasync)(struct lnet_ni *ni, lnet_process_id_t id, int nasync);
-#endif
} lnd_t;
#define LNET_NI_STATUS_UP 0x15aac0de
#define LNET_MAX_INTERFACES 16
typedef struct lnet_ni {
-#ifdef __KERNEL__
spinlock_t ni_lock;
-#else
-# ifndef HAVE_LIBPTHREAD
- int ni_lock;
-# else
- pthread_mutex_t ni_lock;
-# endif
-#endif
struct list_head ni_list; /* chain on ln_nis */
struct list_head ni_cptlist; /* chain on ln_nis_cpt */
int ni_maxtxcredits; /* # tx credits */
#define LNET_PTL_ROTOR_HASH_RT 3
typedef struct lnet_portal {
-#ifdef __KERNEL__
spinlock_t ptl_lock;
-#else
-# ifndef HAVE_LIBPTHREAD
- int ptl_lock;
-# else
- pthread_mutex_t ptl_lock;
-# endif
-#endif
unsigned int ptl_index; /* portal ID, reserved */
/* flags on this portal: lazy, unique... */
unsigned int ptl_options;
__u64 rec_lh_cookie; /* cookie generator */
struct list_head rec_active; /* active resource list */
struct list_head *rec_lh_hash; /* handle hash */
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_t rec_freelist; /* freelist for resources */
-#endif
};
/* message container */
struct list_head msc_active; /* active message list */
/* threads doing finalization */
void **msc_finalizers;
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_t msc_freelist; /* freelist for messages */
-#endif
};
/* Router Checker states */
/* Event Queue container */
struct lnet_res_container ln_eq_container;
-#ifdef __KERNEL__
wait_queue_head_t ln_eq_waitq;
spinlock_t ln_eq_wait_lock;
-#else
-# ifndef HAVE_LIBPTHREAD
- int ln_eq_wait_lock;
-# else
- pthread_cond_t ln_eq_cond;
- pthread_mutex_t ln_eq_wait_lock;
-# endif
-#endif
+
unsigned int ln_remote_nets_hbits;
/* protect NI, peer table, credits, routers, rtrbuf... */
/* dying LND instances */
struct list_head ln_nis_zombie;
lnet_ni_t *ln_loni; /* the loopback NI */
- /* NI to wait for events in */
- lnet_ni_t *ln_eq_waitni;
/* remote networks with routes to them */
struct list_head *ln_remote_nets_hash;
struct list_head ln_rcd_deathrow;
/* rcd ready for free */
struct list_head ln_rcd_zombie;
-#ifdef __KERNEL__
/* serialise startup/shutdown */
struct semaphore ln_rc_signal;
struct mutex ln_api_mutex;
struct mutex ln_lnd_mutex;
- struct mutex ln_delay_mutex;
-#else
-# ifndef HAVE_LIBPTHREAD
- int ln_api_mutex;
- int ln_lnd_mutex;
-# else
- pthread_mutex_t ln_api_mutex;
- pthread_mutex_t ln_lnd_mutex;
-# endif
-#endif
- int ln_init; /* LNetInit() called? */
/* Have I called LNetNIInit myself? */
int ln_niinit_self;
/* LNetNIInit/LNetNIFini counter */
* the list, the router checker will sleep on this queue. when
* routes are added the thread will wake up */
wait_queue_head_t ln_rc_waitq;
-
-#ifndef __KERNEL__
- /* Temporary workaround to allow uOSS and test programs force
- * server mode in userspace. The only place where we use it is
- * lnet_prepare(). The only way to turn this flag on is to
- * call lnet_server_mode() */
- int ln_server_mode_flag;
-#endif
} lnet_t;
#endif
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
-#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
-
static int accept_port = 988;
static int accept_backlog = 127;
static int accept_timeout = 5;
magic == __swab32(constant));
}
-#ifdef __KERNEL__
-
EXPORT_SYMBOL(lnet_acceptor_port);
static char *accept = "secure";
}
EXPORT_SYMBOL(lnet_connect);
-#else /* below is multi-threaded user-space code */
-
-static char *accept_type = "secure";
-
-int
-lnet_acceptor_get_tunables()
-{
- int rc;
- char *env = getenv("LNET_ACCEPT");
-
- if (env != NULL)
- accept_type = env;
-
- rc = lnet_parse_int_tunable(&accept_port, "LNET_ACCEPT_PORT");
-
- if (rc != 0)
- return rc;
-
- rc = lnet_parse_int_tunable(&accept_backlog, "LNET_ACCEPT_BACKLOG");
-
- if (rc != 0)
- return rc;
-
- rc = lnet_parse_int_tunable(&accept_timeout, "LNET_ACCEPT_TIMEOUT");
-
- if (rc != 0)
- return rc;
-
- CDEBUG(D_NET, "accept_type = %s\n", accept_type);
- CDEBUG(D_NET, "accept_port = %d\n", accept_port);
- CDEBUG(D_NET, "accept_backlog = %d\n", accept_backlog);
- CDEBUG(D_NET, "accept_timeout = %d\n", accept_timeout);
- return 0;
-}
-
-#endif /* __KERNEL__ */
-
-/* Below is the code common for both kernel and MT user-space */
-
static int
lnet_accept(cfs_socket_t *sock, __u32 magic)
{
if (rc != 0)
return rc;
-#ifndef __KERNEL__
- /* Do nothing if we're liblustre clients */
- if ((the_lnet.ln_pid & LNET_PID_USERFLAG) != 0)
- return 0;
-#endif
-
init_completion(&lnet_acceptor_state.pta_signal);
rc = accept2secure(accept_type, &secure);
if (rc <= 0) {
fini_completion(&lnet_acceptor_state.pta_signal);
}
-
-#else /* single-threaded user-space */
-int
-lnet_acceptor_start(void)
-{
- return 0;
-}
-
-void
-lnet_acceptor_stop(void)
-{
-}
-#endif /* defined(__KERNEL__) || defined(HAVE_LIBPTHREAD) */
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
#include <lnet/lib-dlc.h>
-#ifdef __KERNEL__
#include <linux/log2.h>
-#endif
-#ifdef __KERNEL__
#define D_LNI D_CONSOLE
-#else
-#define D_LNI D_CONFIG
-#endif
lnet_t the_lnet; /* THE state of the network */
EXPORT_SYMBOL(the_lnet);
-#ifdef __KERNEL__
-
static char *ip2nets = "";
CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
"LNET network <- IP table");
{
}
-#else
-
-static char *
-lnet_get_routes(void)
-{
- char *str = getenv("LNET_ROUTES");
-
- return (str == NULL) ? "" : str;
-}
-
-static char *
-lnet_get_networks (void)
-{
- static char default_networks[256];
- char *networks = getenv("LNET_NETWORKS");
- char *str;
- char *sep;
- int len;
- int nob;
- struct list_head *tmp;
-
- if (networks != NULL)
- return networks;
-
- /* In userland, the default 'networks=' is the list of known net types */
- len = sizeof(default_networks);
- str = default_networks;
- *str = 0;
- sep = "";
-
- list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd_t *lnd = list_entry(tmp, lnd_t, lnd_list);
-
- nob = snprintf(str, len, "%s%s", sep,
- libcfs_lnd2str(lnd->lnd_type));
- if (nob >= len) {
- /* overflowed the string; leave it where it was */
- *str = 0;
- break;
- }
- len -= nob;
- str += nob;
- sep = ",";
- }
-
- return default_networks;
-}
-
-# ifndef HAVE_LIBPTHREAD
-
-static void lnet_init_locks(void)
-{
- the_lnet.ln_eq_wait_lock = 0;
- the_lnet.ln_lnd_mutex = 0;
- the_lnet.ln_api_mutex = 0;
-}
-
-static void lnet_fini_locks(void)
-{
- LASSERT(the_lnet.ln_api_mutex == 0);
- LASSERT(the_lnet.ln_lnd_mutex == 0);
- LASSERT(the_lnet.ln_eq_wait_lock == 0);
-}
-
-# else
-
-static void lnet_init_locks(void)
-{
- pthread_cond_init(&the_lnet.ln_eq_cond, NULL);
- pthread_mutex_init(&the_lnet.ln_eq_wait_lock, NULL);
- pthread_mutex_init(&the_lnet.ln_lnd_mutex, NULL);
- pthread_mutex_init(&the_lnet.ln_api_mutex, NULL);
-}
-
-static void lnet_fini_locks(void)
-{
- pthread_mutex_destroy(&the_lnet.ln_api_mutex);
- pthread_mutex_destroy(&the_lnet.ln_lnd_mutex);
- pthread_mutex_destroy(&the_lnet.ln_eq_wait_lock);
- pthread_cond_destroy(&the_lnet.ln_eq_cond);
-}
-
-# endif
-#endif
-
static int
lnet_create_remote_nets_table(void)
{
{
LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT(the_lnet.ln_init);
LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
{
LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
- LASSERT(the_lnet.ln_init);
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
LASSERT(lnd->lnd_refcount == 0);
}
EXPORT_SYMBOL(lnet_counters_reset);
-#ifdef LNET_USE_LIB_FREELIST
-
-int
-lnet_freelist_init(lnet_freelist_t *fl, int n, int size)
-{
- char *space;
-
- LASSERT (n > 0);
-
- size += offsetof (lnet_freeobj_t, fo_contents);
-
- LIBCFS_ALLOC(space, n * size);
- if (space == NULL)
- return (-ENOMEM);
-
- INIT_LIST_HEAD(&fl->fl_list);
- fl->fl_objs = space;
- fl->fl_nobjs = n;
- fl->fl_objsize = size;
-
- do {
- list_add((struct list_head *)space, &fl->fl_list);
- space += size;
- } while (--n != 0);
-
- return 0;
-}
-
-void
-lnet_freelist_fini(lnet_freelist_t *fl)
-{
- struct list_head *el;
- int count;
-
- if (fl->fl_nobjs == 0)
- return;
-
- count = 0;
- for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next)
- count++;
-
- LASSERT (count == fl->fl_nobjs);
-
- LIBCFS_FREE(fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
- memset (fl, 0, sizeof (*fl));
-}
-
-#endif /* LNET_USE_LIB_FREELIST */
-
static __u64 lnet_create_interface_cookie(void)
{
/* NB the interface cookie in wire handles guards against delayed
* easily good enough. */
struct timeval tv;
__u64 cookie;
-#ifndef __KERNEL__
- int rc = gettimeofday (&tv, NULL);
- LASSERT (rc == 0);
-#else
do_gettimeofday(&tv);
-#endif
cookie = tv.tv_sec;
cookie *= 1000000;
cookie += tv.tv_usec;
count, lnet_res_type2str(rec->rec_type));
}
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_fini(&rec->rec_freelist);
-#endif
if (rec->rec_lh_hash != NULL) {
LIBCFS_FREE(rec->rec_lh_hash,
LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
}
static int
-lnet_res_container_setup(struct lnet_res_container *rec,
- int cpt, int type, int objnum, int objsz)
+lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
{
int rc = 0;
int i;
rec->rec_type = type;
INIT_LIST_HEAD(&rec->rec_active);
-#ifdef LNET_USE_LIB_FREELIST
- memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist));
- rc = lnet_freelist_init(&rec->rec_freelist, objnum, objsz);
- if (rc != 0)
- goto out;
-#endif
rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
/* Arbitrary choice of hash table size */
}
static struct lnet_res_container **
-lnet_res_containers_create(int type, int objnum, int objsz)
+lnet_res_containers_create(int type)
{
struct lnet_res_container **recs;
struct lnet_res_container *rec;
}
cfs_percpt_for_each(rec, i, recs) {
- rc = lnet_res_container_setup(rec, i, type, objnum, objsz);
+ rc = lnet_res_container_setup(rec, i, type);
if (rc != 0) {
lnet_res_containers_destroy(recs);
return NULL;
list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
-#ifndef __KERNEL__
-/**
- * Reserved API - do not use.
- * Temporary workaround to allow uOSS and test programs force server
- * mode in userspace. See comments near ln_server_mode_flag in
- * lnet/lib-types.h */
-
-void
-lnet_server_mode() {
- the_lnet.ln_server_mode_flag = 1;
-}
-#endif
-
static int lnet_unprepare(void);
static int
the_lnet.ln_routing = 0;
-#ifdef __KERNEL__
LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
the_lnet.ln_pid = requested_pid;
-#else
- if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */
- LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
- /* Only root can run user-space server */
- if (current_uid() != 0)
- return -EPERM;
- the_lnet.ln_pid = requested_pid;
-
- } else {/* client case (liblustre) */
- /* My PID must be unique on this node and flag I'm userspace */
- the_lnet.ln_pid = getpid() | LNET_PID_USERFLAG;
- }
-#endif
INIT_LIST_HEAD(&the_lnet.ln_test_peers);
INIT_LIST_HEAD(&the_lnet.ln_nis);
goto failed;
rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
- LNET_COOKIE_TYPE_EQ, LNET_FL_MAX_EQS,
- sizeof(lnet_eq_t));
+ LNET_COOKIE_TYPE_EQ);
if (rc != 0)
goto failed;
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME, LNET_FL_MAX_MES,
- sizeof(lnet_me_t));
+ recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
if (recs == NULL) {
rc = -ENOMEM;
goto failed;
the_lnet.ln_me_containers = recs;
- recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD, LNET_FL_MAX_MDS,
- sizeof(lnet_libmd_t));
+ recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
if (recs == NULL) {
rc = -ENOMEM;
goto failed;
{
/* Return the # of NIs that need the acceptor. */
int count = 0;
-#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD)
struct list_head *tmp;
struct lnet_ni *ni;
int cpt;
lnet_net_unlock(cpt);
-#endif /* defined(__KERNEL__) || defined(HAVE_LIBPTHREAD) */
return count;
}
lnet_ni_unlink_locked(ni);
}
- /* Drop the cached eqwait NI. */
- if (the_lnet.ln_eq_waitni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
- the_lnet.ln_eq_waitni = NULL;
- }
-
/* Drop the cached loopback NI. */
if (the_lnet.ln_loni != NULL) {
lnet_ni_decref_locked(the_lnet.ln_loni, 0);
LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
-#ifdef __KERNEL__
if (lnd == NULL) {
LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
rc = request_module("%s", libcfs_lnd2modname(lnd_type));
goto failed0;
}
}
-#else
- if (lnd == NULL) {
- LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
- CERROR("LND %s not supported\n",
- libcfs_lnd2str(lnd_type));
- goto failed0;
- }
-#endif
lnet_net_lock(LNET_LOCK_EX);
lnd->lnd_refcount++;
return 0;
}
-#ifndef __KERNEL__
- if (lnd->lnd_wait != NULL) {
- if (the_lnet.ln_eq_waitni == NULL) {
- lnet_ni_addref(ni);
- the_lnet.ln_eq_waitni = ni;
- }
- } else {
-# ifndef HAVE_LIBPTHREAD
- LCONSOLE_ERROR_MSG(0x106, "LND %s not supported in a "
- "single-threaded runtime\n",
- libcfs_lnd2str(lnd_type));
- /* shutdown the NI since if we get here then it must've already
- * been started
- */
- lnet_shutdown_lndni(ni);
- return -EINVAL;
-# endif
- }
-#endif
if (ni->ni_peertxcredits == 0 || ni->ni_maxtxcredits == 0) {
LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
libcfs_lnd2str(lnd->lnd_type),
{
struct lnet_ni *ni;
int rc;
- int lnd_type;
int ni_count = 0;
while (!list_empty(nilist)) {
ni_count++;
}
- if (the_lnet.ln_eq_waitni != NULL && ni_count > 1) {
- lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
- LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network"
- "\n",
- libcfs_lnd2str(lnd_type));
- rc = -EINVAL;
- goto failed;
- }
-
return ni_count;
failed:
lnet_shutdown_lndnis();
/**
* Initialize LNet library.
*
- * Only userspace program needs to call this function - it's automatically
- * called in the kernel at module loading time. Caller has to call LNetFini()
- * after a call to LNetInit(), if and only if the latter returned 0. It must
- * be called exactly once.
+ * Automatically called at module loading time. Caller has to call
+ * lnet_fini() after a call to lnet_init(), if and only if the latter
+ * returned 0. It must be called exactly once.
*
* \return 0 on success, and -ve on failures.
*/
int
-LNetInit(void)
+lnet_init(void)
{
int rc;
lnet_assert_wire_constants();
- LASSERT(!the_lnet.ln_init);
memset(&the_lnet, 0, sizeof(the_lnet));
}
the_lnet.ln_refcount = 0;
- the_lnet.ln_init = 1;
LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
INIT_LIST_HEAD(&the_lnet.ln_lnds);
INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
-#ifdef __KERNEL__
/* The hash table size is the number of bits it takes to express the set
* ln_num_routes, minus 1 (better to under estimate than over so we
* don't waste memory). */
/* All LNDs apart from the LOLND are in separate modules. They
* register themselves when their module loads, and unregister
* themselves when their module is unloaded. */
-#else
- the_lnet.ln_remote_nets_hbits = 8;
-
- /* Register LNDs
- * NB the order here determines default 'networks=' order */
-# ifdef HAVE_LIBPTHREAD
- LNET_REGISTER_ULND(the_tcplnd);
-# endif
-#endif
lnet_register_lnd(&the_lolnd);
return 0;
}
-EXPORT_SYMBOL(LNetInit);
/**
* Finalize LNet library.
*
- * Only userspace program needs to call this function. It can be called
- * at most once.
- *
- * \pre LNetInit() called with success.
+ * \pre lnet_init() called with success.
* \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
*/
void
-LNetFini(void)
+lnet_fini(void)
{
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount == 0);
while (!list_empty(&the_lnet.ln_lnds))
lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
lnd_t, lnd_list));
lnet_destroy_locks();
-
- the_lnet.ln_init = 0;
}
-EXPORT_SYMBOL(LNetFini);
/**
* Set LNet PID and start LNet interfaces, routing, and forwarding.
*
- * Userspace program should call this after a successful call to LNetInit().
* Users must call this function at least once before any other functions.
* For each successful call there must be a corresponding call to
* LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
- LASSERT(the_lnet.ln_init);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
if (the_lnet.ln_refcount > 0) {
{
LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (the_lnet.ln_refcount != 1) {
CLASSERT(LIBCFS_IOC_DATA_MAX >= sizeof(struct lnet_ioctl_net_config) +
sizeof(struct lnet_ioctl_config_data));
- LASSERT(the_lnet.ln_init);
switch (cmd) {
case IOC_LIBCFS_GET_NI:
return 0;
}
-#if defined(__KERNEL__) && defined(LNET_ROUTER)
case IOC_LIBCFS_CONFIG_RTR:
config = arg;
buf_large);
LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
return rc;
-#endif
case IOC_LIBCFS_GET_BUF: {
struct lnet_ioctl_pool_cfg *pool_cfg;
int cpt;
int rc = -ENOENT;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
if (ni->ni_cpts != NULL)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
-#ifndef __KERNEL__
-# ifdef HAVE_LIBPTHREAD
- pthread_mutex_destroy(&ni->ni_lock);
-# endif
-#endif
for (i = 0; i < LNET_MAX_INTERFACES &&
ni->ni_interfaces[i] != NULL; i++) {
LIBCFS_FREE(ni->ni_interfaces[i],
return NULL;
}
-#ifdef __KERNEL__
spin_lock_init(&ni->ni_lock);
-#else
-# ifdef HAVE_LIBPTHREAD
- pthread_mutex_init(&ni->ni_lock, NULL);
-# endif
-#endif
INIT_LIST_HEAD(&ni->ni_cptlist);
ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_refs[0]));
return count;
}
-#ifdef __KERNEL__
static void
lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
{
}
EXPORT_SYMBOL(lnet_set_ip_niaddr);
-#endif
{
lnet_eq_t *eq;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
/* We need count to be a power of 2 so that when eq_{enq,deq}_seq
int size = 0;
int i;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
lnet_res_lock(LNET_LOCK_EX);
lnet_res_lh_invalidate(&eq->eq_lh);
list_del(&eq->eq_list);
- lnet_eq_free_locked(eq);
+ lnet_eq_free(eq);
out:
lnet_eq_wait_unlock();
lnet_res_unlock(LNET_LOCK_EX);
if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
eq->eq_callback(ev);
-#ifdef __KERNEL__
/* Wake anyone waiting in LNetEQPoll() */
if (waitqueue_active(&the_lnet.ln_eq_waitq))
wake_up_all(&the_lnet.ln_eq_waitq);
-#else
-# ifndef HAVE_LIBPTHREAD
- /* LNetEQPoll() calls into _the_ LND to wait for action */
-# else
- /* Wake anyone waiting in LNetEQPoll() */
- pthread_cond_broadcast(&the_lnet.ln_eq_cond);
-# endif
-#endif
lnet_eq_wait_unlock();
}
}
EXPORT_SYMBOL(LNetEQWait);
-#ifdef __KERNEL__
-
static int
lnet_eq_wait_locked(int *timeout_ms)
__must_hold(&the_lnet.ln_eq_wait_lock)
return wait;
}
-#else /* !__KERNEL__ */
-
-# ifdef HAVE_LIBPTHREAD
-static void
-lnet_eq_cond_wait(struct timespec *ts)
-{
- if (ts == NULL) {
- pthread_cond_wait(&the_lnet.ln_eq_cond,
- &the_lnet.ln_eq_wait_lock);
- } else {
- pthread_cond_timedwait(&the_lnet.ln_eq_cond,
- &the_lnet.ln_eq_wait_lock, ts);
- }
-}
-# endif
-
-static int
-lnet_eq_wait_locked(int *timeout_ms)
-{
- lnet_ni_t *eq_waitni = NULL;
- int tms = *timeout_ms;
- int wait;
- struct timeval then;
- struct timeval now;
-
- if (the_lnet.ln_eq_waitni != NULL) {
- /* I have a single NI that I have to call into, to get
- * events queued, or to block. */
- lnet_eq_wait_unlock();
-
- lnet_net_lock(0);
- eq_waitni = the_lnet.ln_eq_waitni;
- if (unlikely(eq_waitni == NULL)) {
- lnet_net_unlock(0);
-
- lnet_eq_wait_lock();
- return -1;
- }
-
- lnet_ni_addref_locked(eq_waitni, 0);
- lnet_net_unlock(0);
-
- if (tms <= 0) { /* even for tms == 0 */
- (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
- } else {
- gettimeofday(&then, NULL);
-
- (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
- gettimeofday(&now, NULL);
- tms -= (now.tv_sec - then.tv_sec) * 1000 +
- (now.tv_usec - then.tv_usec) / 1000;
- if (tms < 0)
- tms = 0;
- }
-
- lnet_ni_decref(eq_waitni);
- lnet_eq_wait_lock();
- } else { /* w/o eq_waitni */
-# ifndef HAVE_LIBPTHREAD
- /* If I'm single-threaded, LNET fails at startup if it can't
- * set the_lnet.ln_eqwaitni correctly. */
- LBUG();
-# else /* HAVE_LIBPTHREAD */
- struct timespec ts;
-
- if (tms == 0) /* don't want to wait and new event */
- return -1;
-
- if (tms < 0) {
- lnet_eq_cond_wait(NULL);
-
- } else {
-
- gettimeofday(&then, NULL);
-
- ts.tv_sec = then.tv_sec + tms / 1000;
- ts.tv_nsec = then.tv_usec * 1000 +
- (tms % 1000) * 1000000;
- if (ts.tv_nsec >= 1000000000) {
- ts.tv_sec++;
- ts.tv_nsec -= 1000000000;
- }
-
- lnet_eq_cond_wait(&ts);
-
- gettimeofday(&now, NULL);
- tms -= (now.tv_sec - then.tv_sec) * 1000 +
- (now.tv_usec - then.tv_usec) / 1000;
- if (tms < 0)
- tms = 0;
- }
-# endif /* HAVE_LIBPTHREAD */
- }
-
- wait = tms != 0;
- *timeout_ms = tms;
-
- return wait;
-}
-
-#endif /* __KERNEL__ */
-
-
/**
* Block the calling process until there's an event from a set of EQs or
* timeout happens.
int i;
ENTRY;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (neq < 1)
lnet_eq_wait_lock();
for (;;) {
-#ifndef __KERNEL__
- lnet_eq_wait_unlock();
-
- /* Recursion breaker */
- if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
- !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh))
- lnet_router_checker();
-
- lnet_eq_wait_lock();
-#endif
for (i = 0; i < neq; i++) {
lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
LASSERT(!list_empty(&md->md_list));
list_del_init(&md->md_list);
- lnet_md_free_locked(md);
+ lnet_md_free(md);
}
static int
return -EINVAL;
} else if ((umd->options & LNET_MD_KIOV) != 0) {
-#ifndef __KERNEL__
- return -EINVAL;
-#else
lmd->md_niov = niov = umd->length;
memcpy(lmd->md_iov.kiov, umd->start,
niov * sizeof (lmd->md_iov.kiov[0]));
(umd->max_size < 0 ||
umd->max_size > total_length)) // illegal max_size
return -EINVAL;
-#endif
} else { /* contiguous */
lmd->md_length = umd->length;
lmd->md_niov = niov = 1;
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (lnet_md_validate(&umd) != 0)
return 0;
failed:
- lnet_md_free_locked(md);
+ lnet_md_free(md);
lnet_res_unlock(cpt);
return rc;
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (lnet_md_validate(&umd) != 0)
return 0;
failed:
- lnet_md_free_locked(md);
+ lnet_md_free(md);
lnet_res_unlock(cpt);
return rc;
lnet_libmd_t *md;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_cpt_of_cookie(mdh.cookie);
struct lnet_me *me;
struct list_head *head;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if ((int)portal >= the_lnet.ln_nportals)
struct lnet_portal *ptl;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (pos == LNET_INS_LOCAL)
current_me = lnet_handle2me(¤t_meh);
if (current_me == NULL) {
- lnet_me_free_locked(new_me);
+ lnet_me_free(new_me);
lnet_res_unlock(cpt);
return -ENOENT;
ptl = the_lnet.ln_portals[current_me->me_portal];
if (lnet_ptl_is_unique(ptl)) {
/* nosense to insertion on unique portal */
- lnet_me_free_locked(new_me);
+ lnet_me_free(new_me);
lnet_res_unlock(cpt);
return -EPERM;
}
lnet_event_t ev;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_cpt_of_cookie(meh.cookie);
}
lnet_res_lh_invalidate(&me->me_lh);
- lnet_me_free_locked(me);
+ lnet_me_free(me);
}
#if 0
struct list_head *next;
struct list_head cull;
- LASSERT(the_lnet.ln_init);
-
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
/* Adding a new entry */
}
EXPORT_SYMBOL(lnet_extract_iov);
-#ifndef __KERNEL__
-unsigned int
-lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
-{
- LASSERT (0);
- return (0);
-}
-
-void
-lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov, unsigned int soffset,
- unsigned int nob)
-{
- LASSERT (0);
-}
-
-void
-lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
- unsigned int nob)
-{
- LASSERT (0);
-}
-
-void
-lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
- unsigned int niov, struct iovec *iov, unsigned int iovoffset,
- unsigned int nob)
-{
- LASSERT (0);
-}
-
-int
-lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len)
-{
- LASSERT (0);
-}
-
-#else /* __KERNEL__ */
unsigned int
lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
}
}
EXPORT_SYMBOL(lnet_extract_kiov);
-#endif
void
lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
return LNET_CREDIT_OK;
}
-#ifdef __KERNEL__
static lnet_rtrbufpool_t *
lnet_msg2bufpool(lnet_msg_t *msg)
}
return LNET_CREDIT_OK;
}
-#endif
void
lnet_return_tx_credits_locked(lnet_msg_t *msg)
}
}
-#ifdef __KERNEL__
void
lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
{
(void)lnet_post_routed_recv_locked(msg, 1);
}
-#endif
void
lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
lnet_return_rx_credits_locked(lnet_msg_t *msg)
{
lnet_peer_t *rxpeer = msg->msg_rxpeer;
-#ifdef __KERNEL__
lnet_msg_t *msg2;
if (msg->msg_rtrcredit) {
(void) lnet_post_routed_recv_locked(msg2, 1);
}
}
-#else
- LASSERT(!msg->msg_rtrcredit);
- LASSERT(!msg->msg_peerrtrcredit);
-#endif
if (rxpeer != NULL) {
msg->msg_rxpeer = NULL;
lnet_peer_decref_locked(rxpeer);
}
LASSERT (lp->lp_ni == src_ni);
} else {
-#ifndef __KERNEL__
- lnet_net_unlock(cpt);
-
- /* NB
- * - once application finishes computation, check here to update
- * router states before it waits for pending IO in LNetEQPoll
- * - recursion breaker: router checker sends no message
- * to remote networks */
- if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING)
- lnet_router_checker();
-
- lnet_net_lock(cpt);
-#endif
/* sending to a remote network */
lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
if (lp == NULL) {
{
int rc = 0;
-#ifdef __KERNEL__
if (!the_lnet.ln_routing)
return -ECANCELED;
if (rc == 0)
rc = lnet_post_routed_recv_locked(msg, 0);
-#else
- LBUG();
-#endif
return rc;
}
int cpt;
int rc;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
* keep order 0 free for 0@lo and order 1 free for a local NID
* match */
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
return -EHOSTUNREACH;
}
EXPORT_SYMBOL(LNetDist);
-
-/**
- * Set the number of asynchronous messages expected from a target process.
- *
- * This function is only meaningful for userspace callers. It's a no-op when
- * called from kernel.
- *
- * Asynchronous messages are those that can come from a target when the
- * userspace process is not waiting for IO to complete; e.g., AST callbacks
- * from Lustre servers. Specifying the expected number of such messages
- * allows them to be eagerly received when user process is not running in
- * LNet; otherwise network errors may occur.
- *
- * \param id Process ID of the target process.
- * \param nasync Number of asynchronous messages expected from the target.
- *
- * \return 0 on success, and an error code otherwise.
- */
-int
-LNetSetAsync(lnet_process_id_t id, int nasync)
-{
-#ifdef __KERNEL__
- return 0;
-#else
- lnet_ni_t *ni;
- lnet_remotenet_t *rnet;
- struct list_head *tmp;
- lnet_route_t *route;
- lnet_nid_t *nids;
- int nnids;
- int maxnids = 256;
- int rc = 0;
- int rc2;
- int cpt;
-
- /* Target on a local network? */
- ni = lnet_net2ni(LNET_NIDNET(id.nid));
- if (ni != NULL) {
- if (ni->ni_lnd->lnd_setasync != NULL)
- rc = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
- lnet_ni_decref(ni);
- return rc;
- }
-
- /* Target on a remote network: apply to routers */
- again:
- LIBCFS_ALLOC(nids, maxnids * sizeof(*nids));
- if (nids == NULL)
- return -ENOMEM;
- nnids = 0;
-
- /* Snapshot all the router NIDs */
- cpt = lnet_net_lock_current();
- rnet = lnet_find_net_locked(LNET_NIDNET(id.nid));
- if (rnet != NULL) {
- list_for_each(tmp, &rnet->lrn_routes) {
- if (nnids == maxnids) {
- lnet_net_unlock(cpt);
- LIBCFS_FREE(nids, maxnids * sizeof(*nids));
- maxnids *= 2;
- goto again;
- }
-
- route = list_entry(tmp, lnet_route_t, lr_list);
- nids[nnids++] = route->lr_gateway->lp_nid;
- }
- }
- lnet_net_unlock(cpt);
-
- /* set async on all the routers */
- while (nnids-- > 0) {
- id.pid = LNET_PID_LUSTRE;
- id.nid = nids[nnids];
-
- ni = lnet_net2ni(LNET_NIDNET(id.nid));
- if (ni == NULL)
- continue;
-
- if (ni->ni_lnd->lnd_setasync != NULL) {
- rc2 = (ni->ni_lnd->lnd_setasync)(ni, id, nasync);
- if (rc2 != 0)
- rc = rc2;
- }
- lnet_ni_decref(ni);
- }
-
- LIBCFS_FREE(nids, maxnids * sizeof(*nids));
- return rc;
-#endif
-}
-EXPORT_SYMBOL(LNetSetAsync);
}
lnet_msg_decommit(msg, cpt, status);
- lnet_msg_free_locked(msg);
+ lnet_msg_free(msg);
return 0;
}
/* Recursion breaker. Don't complete the message here if I am (or
* enough other threads are) already completing messages */
-#ifdef __KERNEL__
my_slot = -1;
for (i = 0; i < container->msc_nfinalizers; i++) {
if (container->msc_finalizers[i] == current)
}
container->msc_finalizers[my_slot] = current;
-#else
- LASSERT(container->msc_nfinalizers == 1);
- if (container->msc_finalizers[0] != NULL) {
- lnet_net_unlock(cpt);
- return;
- }
-
- my_slot = i = 0;
- container->msc_finalizers[0] = (struct lnet_msg_container *)1;
-#endif
while (!list_empty(&container->msc_finalizing)) {
msg = list_entry(container->msc_finalizing.next,
sizeof(*container->msc_finalizers));
container->msc_finalizers = NULL;
}
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_fini(&container->msc_freelist);
-#endif
container->msc_init = 0;
}
INIT_LIST_HEAD(&container->msc_active);
INIT_LIST_HEAD(&container->msc_finalizing);
-#ifdef LNET_USE_LIB_FREELIST
- memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
-
- rc = lnet_freelist_init(&container->msc_freelist,
- LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
- if (rc != 0) {
- CERROR("Failed to init freelist for message container\n");
- lnet_msg_container_cleanup(container);
- return rc;
- }
-#else
rc = 0;
-#endif
/* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
LASSERT(list_empty(&ptl->ptl_msg_delayed));
LASSERT(list_empty(&ptl->ptl_msg_stealing));
-#ifndef __KERNEL__
-# ifdef HAVE_LIBPTHREAD
- pthread_mutex_destroy(&ptl->ptl_lock);
-# endif
-#endif
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
struct list_head *mhash;
lnet_me_t *me;
ptl->ptl_index = index;
INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
-#ifdef __KERNEL__
spin_lock_init(&ptl->ptl_lock);
-#else
-# ifdef HAVE_LIBPTHREAD
- pthread_mutex_init(&ptl->ptl_lock, NULL);
-# endif
-#endif
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
/* the extra entry is for MEs with ignore bits */
LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
/* .lnd_recv = */ lolnd_recv,
/* .lnd_eager_recv = */ NULL,
/* .lnd_notify = */ NULL,
-#ifdef __KERNEL__
/* .lnd_accept = */ NULL
-#else
- /* .lnd_wait = */ NULL
-#endif
};
DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
static int
-init_lnet(void)
+lnet_module_init(void)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
mutex_init(&lnet_config_mutex);
- rc = LNetInit();
- if (rc != 0) {
- CERROR("LNetInit: error %d\n", rc);
- RETURN(rc);
- }
+ rc = lnet_init();
+ if (rc != 0) {
+ CERROR("lnet_init: error %d\n", rc);
+ RETURN(rc);
+ }
- rc = libcfs_register_ioctl(&lnet_ioctl_handler);
- LASSERT (rc == 0);
+ rc = libcfs_register_ioctl(&lnet_ioctl_handler);
+ LASSERT(rc == 0);
if (config_on_load) {
/* Have to schedule a separate thread to avoid deadlocking
(void) kthread_run(lnet_configure, NULL, "lnet_initd");
}
- RETURN(0);
+ RETURN(0);
}
static void
-fini_lnet(void)
+lnet_module_exit(void)
{
- int rc;
+ int rc;
- rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
- LASSERT (rc == 0);
+ rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
+ LASSERT(rc == 0);
- LNetFini();
+ lnet_fini();
}
MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
MODULE_DESCRIPTION("Portals v3.1");
MODULE_LICENSE("GPL");
-cfs_module(lnet, "1.0.0", init_lnet, fini_lnet);
+cfs_module(lnet, "1.0.0", lnet_module_init, lnet_module_exit);
#define DEBUG_SUBSYSTEM S_LNET
#include <lnet/lib-lnet.h>
-#if defined(__KERNEL__) && defined(LNET_ROUTER)
-
#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
/* forward ref's */
static int lnet_router_checker(void *);
-#else
-
-int
-lnet_peer_buffer_credits(lnet_ni_t *ni)
-{
- return 0;
-}
-
-#endif
static int check_routers_before_use = 0;
CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
if (all_known)
return;
-#ifndef __KERNEL__
- lnet_router_checker();
-#endif
cfs_pause(cfs_time_seconds(1));
}
}
lnet_router_checker_start(void)
{
int rc;
- int eqsz;
-#ifdef __KERNEL__
+ int eqsz = 0;
struct task_struct *task;
-#else /* __KERNEL__ */
- lnet_peer_t *rtr;
- __u64 version;
- int nrtr = 0;
- int router_checker_max_eqsize = 10240;
-
- LASSERT (check_routers_before_use);
- LASSERT (dead_router_check_interval > 0);
-
- lnet_net_lock(0);
-
- /* As an approximation, allow each router the same number of
- * outstanding events as it is allowed outstanding sends */
- eqsz = 0;
- version = the_lnet.ln_routers_version;
- list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
- lnet_ni_t *ni = rtr->lp_ni;
- lnet_process_id_t id;
-
- nrtr++;
- eqsz += ni->ni_peertxcredits;
-
- /* one async ping reply per router */
- id.nid = rtr->lp_nid;
- id.pid = LNET_PID_LUSTRE;
-
- lnet_net_unlock(0);
-
- rc = LNetSetAsync(id, 1);
- if (rc != 0) {
- CWARN("LNetSetAsync %s failed: %d\n",
- libcfs_id2str(id), rc);
- return rc;
- }
-
- lnet_net_lock(0);
- /* NB router list doesn't change in userspace */
- LASSERT(version == the_lnet.ln_routers_version);
- }
-
- lnet_net_unlock(0);
-
- if (nrtr == 0) {
- CDEBUG(D_NET,
- "No router found, not starting router checker\n");
- return 0;
- }
-
- /* at least allow a SENT and a REPLY per router */
- if (router_checker_max_eqsize < 2 * nrtr)
- router_checker_max_eqsize = 2 * nrtr;
-
- LASSERT (eqsz > 0);
- if (eqsz > router_checker_max_eqsize)
- eqsz = router_checker_max_eqsize;
-#endif
- LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
+ LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
if (check_routers_before_use &&
dead_router_check_interval <= 0) {
return -EINVAL;
}
-#ifdef __KERNEL__
sema_init(&the_lnet.ln_rc_signal, 0);
- /* EQ size doesn't matter; the callback is guaranteed to get every
- * event */
- eqsz = 0;
- rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
- &the_lnet.ln_rc_eqh);
-#else
- rc = LNetEQAlloc(eqsz, LNET_EQ_HANDLER_NONE,
- &the_lnet.ln_rc_eqh);
-#endif
- if (rc != 0) {
- CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
- return -ENOMEM;
- }
- the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
-#ifdef __KERNEL__
+ rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
+ if (rc != 0) {
+ CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
+ return -ENOMEM;
+ }
+
+ the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
task = kthread_run(lnet_router_checker, NULL, "router_checker");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
return -ENOMEM;
}
-#endif
if (check_routers_before_use) {
/* Note that a helpful side-effect of pinging all known routers
/* wakeup the RC thread if it's sleeping */
wake_up(&the_lnet.ln_rc_waitq);
-#ifdef __KERNEL__
/* block until event callback signals exit */
down(&the_lnet.ln_rc_signal);
-#else
- lnet_router_checker();
-#endif
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
lnet_net_unlock(LNET_LOCK_EX);
}
-
-#if defined(__KERNEL__) && defined(LNET_ROUTER)
-
/*
* This function is called to check if the RC should block indefinitely.
* It's called from lnet_router_checker() as well as being passed to
return 0;
}
EXPORT_SYMBOL(lnet_notify);
-
-void
-lnet_get_tunables (void)
-{
- return;
-}
-
-#else
-
-int
-lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
-{
- return -EOPNOTSUPP;
-}
-
-void
-lnet_router_checker (void)
-{
- static time_t last = 0;
- static int running = 0;
-
- time_t now = cfs_time_current_sec();
- int interval = now - last;
- int rc;
- __u64 version;
- lnet_peer_t *rtr;
-
- /* It's no use to call me again within a sec - all intervals and
- * timeouts are measured in seconds */
- if (last != 0 && interval < 2)
- return;
-
- if (last != 0 &&
- interval > MAX(live_router_check_interval,
- dead_router_check_interval))
- CNETERR("Checker(%d/%d) not called for %d seconds\n",
- live_router_check_interval, dead_router_check_interval,
- interval);
-
- LASSERT(LNET_CPT_NUMBER == 1);
-
- lnet_net_lock(0);
- LASSERT(!running); /* recursion check */
- running = 1;
- lnet_net_unlock(0);
-
- last = now;
-
- if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING)
- lnet_prune_rc_data(0); /* unlink all rcd and nowait */
-
- /* consume all pending events */
- while (1) {
- int i;
- lnet_event_t ev;
-
- /* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
- * recursion breaker in LNetEQPoll would fail */
- rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i);
- if (rc == 0) /* no event pending */
- break;
-
- /* NB a lost SENT prevents me from pinging a router again */
- if (rc == -EOVERFLOW) {
- CERROR("Dropped an event!!!\n");
- abort();
- }
-
- LASSERT (rc == 1);
-
- lnet_router_checker_event(&ev);
- }
-
- if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) {
- lnet_prune_rc_data(1); /* release rcd */
- the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- running = 0;
- return;
- }
-
- LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
- lnet_net_lock(0);
-
- version = the_lnet.ln_routers_version;
- list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
- lnet_ping_router_locked(rtr);
- LASSERT(version == the_lnet.ln_routers_version);
- }
-
- lnet_net_unlock(0);
-
- running = 0; /* lock only needed for the recursion check */
- return;
-}
-
-/* NB lnet_peers_start_down depends on me,
- * so must be called before any peer creation */
-void
-lnet_get_tunables (void)
-{
- char *s;
-
- s = getenv("LNET_ROUTER_PING_TIMEOUT");
- if (s != NULL) router_ping_timeout = atoi(s);
-
- s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
- if (s != NULL) live_router_check_interval = atoi(s);
-
- s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
- if (s != NULL) dead_router_check_interval = atoi(s);
-
- /* This replaces old lnd_notify mechanism */
- check_routers_before_use = 1;
- if (dead_router_check_interval <= 0)
- dead_router_check_interval = 30;
-}
-
-void
-lnet_rtrpools_free(int keep_pools)
-{
-}
-
-int
-lnet_rtrpools_alloc(int im_a_arouter)
-{
- return 0;
-}
-
-#endif
#include <libcfs/libcfs.h>
#include <lnet/lib-lnet.h>
-#if defined(__KERNEL__) && defined(LNET_ROUTER)
-
/* This is really lnet_proc.c. You might need to update sanity test 215
* if any file format is changed. */
lnet_table_header = NULL;
#endif
}
-
-#else
-
-void
-lnet_proc_init(void)
-{
-}
-
-void
-lnet_proc_fini(void)
-{
-}
-
-#endif
if (brw_inject_errors <= 0) return 0;
-#ifndef __KERNEL__
- gettimeofday(&tv, NULL);
-#else
do_gettimeofday(&tv);
-#endif
if ((tv.tv_usec & 1) == 0) return 0;
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
-#ifdef __KERNEL__
pg = bk->bk_iovs[i].kiov_page;
-#else
- LASSERT (bk->bk_pages != NULL);
- pg = bk->bk_pages[i];
-#endif
brw_fill_page(pg, pattern, magic);
}
}
struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
-#ifdef __KERNEL__
pg = bk->bk_iovs[i].kiov_page;
-#else
- LASSERT (bk->bk_pages != NULL);
- pg = bk->bk_pages[i];
-#endif
if (brw_check_page(pg, pattern, magic) != 0) {
CERROR ("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
}
out:
-#ifndef __KERNEL__
- rpc->crpc_bulk.bk_pages = NULL;
-#endif
return;
}
srpc_service_t brw_test_service;
void brw_init_test_service(void)
{
-#ifndef __KERNEL__
- char *s;
-
- s = getenv("BRW_INJECT_ERRORS");
- brw_inject_errors = s != NULL ? atoi(s) : brw_inject_errors;
-#endif
brw_test_service.sv_id = SRPC_SERVICE_BRW;
brw_test_service.sv_name = "brw_test";
*
* Author: Liang Zhen <liangzhen@clusterfs.com>
*/
-#ifdef __KERNEL__
#include <libcfs/libcfs.h>
#include <lnet/lib-lnet.h>
EXPORT_SYMBOL(lstcon_ioctl_entry);
-#endif
* Author: Liang Zhen <liang@whamcloud.com>
*/
-#ifdef __KERNEL__
#include <libcfs/libcfs.h>
#include <lnet/lib-lnet.h>
LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
}
-#endif
#ifndef __LST_CONRPC_H__
#define __LST_CONRPC_H__
-#ifdef __KERNEL__
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
#include <lnet/lib-types.h>
int lstcon_rpc_module_init(void);
void lstcon_rpc_module_fini(void);
-#endif
#endif
* Author: Liang Zhen <liangzhen@clusterfs.com>
*/
-#ifdef __KERNEL__
#include <libcfs/libcfs.h>
#include <lnet/lib-lnet.h>
return 0;
}
-#endif
#ifndef __LST_CONSOLE_H__
#define __LST_CONSOLE_H__
-#ifdef __KERNEL__
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
struct list_head __user *result_up);
-#endif
int lstcon_console_init(void);
int lstcon_console_fini(void);
return 0;
}
-#ifndef __KERNEL__
- /* Racing is impossible in single-threaded userland selftest */
- LBUG();
-#endif
return EBUSY; /* racing with sfw_session_expired() */
}
spin_lock(&sfw_data.fw_lock);
}
-#ifndef __KERNEL__
-
-int
-sfw_session_removed (void)
-{
- return (sfw_data.fw_session == NULL) ? 1 : 0;
-}
-
-#endif
static void
sfw_session_expired (void *data)
LASSERT(rpc->crpc_bulk.bk_niov == 0);
LASSERT(list_empty(&rpc->crpc_list));
LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
-#ifndef __KERNEL__
- LASSERT(rpc->crpc_bulk.bk_pages == NULL);
-#endif
CDEBUG(D_NET, "Outgoing framework RPC done: "
"service %d, peer %s, status %s:%d:%d\n",
}
LASSERT (bk != NULL);
-#ifndef __KERNEL__
- LASSERT (bk->bk_pages != NULL);
-#endif
LASSERT (bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
LASSERT((unsigned int)bk->bk_len >=
sizeof(lnet_process_id_packed_t) * ndest);
lnet_process_id_packed_t id;
int j;
-#ifdef __KERNEL__
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
LASSERT (dests != NULL); /* my pages are within KVM always */
-#else
- dests = page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]);
-#endif
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
rpc->srpc_done = sfw_server_rpc_done;
spin_lock(&sfw_data.fw_lock);
-#ifdef __KERNEL__
if (!sfw_data.fw_shuttingdown)
sfw_add_session_timer();
-#else
- LASSERT(!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
-#endif
sfw_data.fw_active_srpc = NULL;
spin_unlock(&sfw_data.fw_lock);
spin_lock(&sfw_data.fw_lock);
-#ifdef __KERNEL__
if (!sfw_data.fw_shuttingdown)
sfw_add_session_timer();
-#else
- LASSERT(!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
-#endif
sfw_data.fw_active_srpc = NULL;
spin_unlock(&sfw_data.fw_lock);
srpc_service_t *sv;
sfw_test_case_t *tsc;
-#ifndef __KERNEL__
- char *s;
-
- s = getenv("SESSION_TIMEOUT");
- session_timeout = s != NULL ? atoi(s) : session_timeout;
-
- s = getenv("RPC_TIMEOUT");
- rpc_timeout = s != NULL ? atoi(s) : rpc_timeout;
-#endif
if (session_timeout < 0) {
CERROR ("Session timeout must be non-negative: %d\n",
spin_lock(&sfw_data.fw_lock);
sfw_data.fw_shuttingdown = 1;
-#ifdef __KERNEL__
lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
"waiting for active RPC to finish.\n");
-#else
- LASSERT (sfw_data.fw_active_srpc == NULL);
-#endif
if (sfw_del_session_timer() != 0)
lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
int i;
switch (lst_init_step) {
-#ifdef __KERNEL__
case LST_INIT_CONSOLE:
lstcon_console_fini();
-#endif
case LST_INIT_FW:
sfw_shutdown();
case LST_INIT_RPC:
}
lst_init_step = LST_INIT_FW;
-#ifdef __KERNEL__
rc = lstcon_console_init();
if (rc != 0) {
CERROR("LST can't startup console\n");
goto error;
}
lst_init_step = LST_INIT_CONSOLE;
-#endif
return 0;
error:
lnet_selftest_fini();
return rc;
}
-#ifdef __KERNEL__
MODULE_DESCRIPTION("LNet Selftest");
MODULE_LICENSE("GPL");
cfs_module(lnet, "0.9.0", lnet_selftest_init, lnet_selftest_fini);
-#else
-
-int
-selftest_wait_events (void)
-{
- int evts = 0;
-
- for (;;) {
- /* Consume all pending events */
- while (srpc_check_event(0))
- evts++;
- evts += stt_check_events();
- evts += swi_check_events();
- if (evts != 0) break;
-
- /* Nothing happened, block for events */
- evts += srpc_check_event(stt_poll_interval());
- /* We may have blocked, check for expired timers */
- evts += stt_check_events();
- if (evts == 0) /* timed out and still no event */
- break;
- }
-
- return evts;
-}
-
-#endif
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
-#ifdef __KERNEL__
bk->bk_iovs[i].kiov_offset = 0;
bk->bk_iovs[i].kiov_page = pg;
bk->bk_iovs[i].kiov_len = nob;
-#else
- LASSERT(bk->bk_pages != NULL);
-
- bk->bk_pages[i] = pg;
- bk->bk_iovs[i].iov_len = nob;
- bk->bk_iovs[i].iov_base = page_address(pg);
-#endif
return nob;
}
struct page *pg;
LASSERT (bk != NULL);
-#ifndef __KERNEL__
- LASSERT (bk->bk_pages != NULL);
-#endif
for (i = 0; i < bk->bk_niov; i++) {
-#ifdef __KERNEL__
pg = bk->bk_iovs[i].kiov_page;
-#else
- pg = bk->bk_pages[i];
-#endif
if (pg == NULL) break;
__free_page(pg);
}
-#ifndef __KERNEL__
- LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov);
-#endif
LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
return;
}
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
-#ifndef __KERNEL__
- {
- struct page **pages;
-
- LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
- sizeof(struct page *) * bulk_npg);
- if (pages == NULL) {
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t,
- bk_iovs[bulk_npg]));
- CERROR("Can't allocate page array for %d pages\n",
- bulk_npg);
- return NULL;
- }
-
- memset(pages, 0, sizeof(struct page *) * bulk_npg);
- bk->bk_pages = pages;
- }
-#endif
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
if (bk->bk_niov == 0) return 0; /* nothing to do */
opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
LASSERT (bk != NULL);
opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
if (stt_del_timer(&rpc->crpc_timer))
return;
-#ifdef __KERNEL__
/* timer detonated, wait for it to explode */
while (rpc->crpc_timeout != 0) {
spin_unlock(&rpc->crpc_lock);
spin_lock(&rpc->crpc_lock);
}
-#else
- LBUG(); /* impossible in single-threaded runtime */
-#endif
}
static void
}
}
-#ifndef __KERNEL__
-
-int
-srpc_check_event (int timeout)
-{
- lnet_event_t ev;
- int rc;
- int i;
-
- rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
- timeout * 1000, &ev, &i);
- if (rc == 0) return 0;
-
- LASSERT (rc == -EOVERFLOW || rc == 1);
-
- /* We can't affort to miss any events... */
- if (rc == -EOVERFLOW) {
- CERROR ("Dropped an event!!!\n");
- abort();
- }
-
- srpc_lnet_ev_handler(&ev);
- return 1;
-}
-
-#endif
int
srpc_startup (void)
srpc_data.rpc_state = SRPC_STATE_NONE;
-#ifdef __KERNEL__
rc = LNetNIInit(LNET_PID_LUSTRE);
-#else
- if (the_lnet.ln_server_mode_flag)
- rc = LNetNIInit(LNET_PID_LUSTRE);
- else
- rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
-#endif
if (rc < 0) {
CERROR ("LNetNIInit() has failed: %d\n", rc);
return rc;
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
-#ifdef __KERNEL__
rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
-#else
- rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
-#endif
if (rc != 0) {
CERROR("LNetEQAlloc() has failed: %d\n", rc);
goto bail;
#define LNET_ONLY
-#ifndef __KERNEL__
-
-/* XXX workaround XXX */
-#ifdef HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
-
-#endif
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
#include <lnet/lib-lnet.h>
lnet_handle_md_t bk_mdh;
int bk_sink; /* sink/source */
int bk_niov; /* # iov in bk_iovs */
-#ifdef __KERNEL__
lnet_kiov_t bk_iovs[0];
-#else
- struct page **bk_pages;
- lnet_md_iovec_t bk_iovs[0];
-#endif
} srpc_bulk_t; /* bulk descriptor */
/* message buffer descriptor */
return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
}
-#ifndef __KERNEL__
-static inline int
-swi_check_events(void)
-{
- return cfs_wi_check_events();
-}
-#endif
-
int sfw_startup(void);
int srpc_startup(void);
void sfw_shutdown(void);
LASSERT (rpc != NULL);
LASSERT (!srpc_event_pending(rpc));
LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
-#ifndef __KERNEL__
- LASSERT (rpc->crpc_bulk.bk_pages == NULL);
-#endif
if (rpc->crpc_fini == NULL) {
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
#undef STATE2STR
}
-#ifndef __KERNEL__
-
-int stt_poll_interval(void);
-int sfw_session_removed(void);
-
-int stt_check_events(void);
-int srpc_check_event(int timeout);
-
-int lnet_selftest_init(void);
-void lnet_selftest_fini(void);
-int selftest_wait_events(void);
-
-#else
-
#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
-#endif
-
#define lst_wait_until(cond, lock, fmt, ...) \
do { \
int __I = 2; \
cfs_time_t stt_prev_slot;
struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
-#ifdef __KERNEL__
wait_queue_head_t stt_waitq;
int stt_nthreads;
-#endif
} stt_data;
void
spin_lock(&stt_data.stt_lock);
-#ifdef __KERNEL__
LASSERT(stt_data.stt_nthreads > 0);
-#endif
LASSERT(!stt_data.stt_shuttingdown);
LASSERT(timer->stt_func != NULL);
LASSERT(list_empty(&timer->stt_list));
spin_lock(&stt_data.stt_lock);
-#ifdef __KERNEL__
LASSERT(stt_data.stt_nthreads > 0);
-#endif
LASSERT(!stt_data.stt_shuttingdown);
if (!list_empty(&timer->stt_list)) {
return expired;
}
-#ifdef __KERNEL__
static int
stt_timer_main (void *arg)
return 0;
}
-#else /* !__KERNEL__ */
-
-int
-stt_check_events (void)
-{
- return stt_check_timers(&stt_data.stt_prev_slot);
-}
-
-int
-stt_poll_interval (void)
-{
- return STTIMER_SLOTTIME;
-}
-
-#endif
int
stt_startup (void)
for (i = 0; i < STTIMER_NSLOTS; i++)
INIT_LIST_HEAD(&stt_data.stt_hash[i]);
-#ifdef __KERNEL__
stt_data.stt_nthreads = 0;
init_waitqueue_head(&stt_data.stt_waitq);
rc = stt_start_timer_thread();
if (rc != 0)
CERROR ("Can't spawn timer thread: %d\n", rc);
-#endif
return rc;
}
stt_data.stt_shuttingdown = 1;
-#ifdef __KERNEL__
wake_up(&stt_data.stt_waitq);
lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
"waiting for %d threads to terminate\n",
stt_data.stt_nthreads);
-#endif
spin_unlock(&stt_data.stt_lock);
}