*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/include/lnet/lib-lnet.h
*
/** exclusive lock */
#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-/* default timeout */
+/* default timeout and credits */
#define DEFAULT_PEER_TIMEOUT 180
-#define LNET_LND_DEFAULT_TIMEOUT 5
+#define DEFAULT_PEER_CREDITS 8
+#define DEFAULT_CREDITS 256
+
+/* default number of connections per peer */
+#define DEFAULT_CONNS_PER_PEER 0
#ifdef HAVE_KERN_SOCK_GETNAME_2ARGS
#define lnet_kernel_getpeername(sock, addr, addrlen) \
kernel_getsockname(sock, addr, addrlen)
#endif
+/*
+ * kernel 5.3: commit ef11db3310e272d3d8dbe8739e0770820dd20e52
+ * kernel 4.18.0-193.el8:
+ * added in_dev_for_each_ifa_rtnl and in_dev_for_each_ifa_rcu
+ * and removed for_ifa and endfor_ifa.
+ * Use the _rntl variant as the current locking is rtnl.
+ */
+#ifdef HAVE_IN_DEV_FOR_EACH_IFA_RTNL
+#define DECLARE_CONST_IN_IFADDR(ifa) const struct in_ifaddr *ifa
+#define endfor_ifa(in_dev)
+#else
+#define DECLARE_CONST_IN_IFADDR(ifa)
+#define in_dev_for_each_ifa_rtnl(ifa, in_dev) for_ifa((in_dev))
+#define in_dev_for_each_ifa_rcu(ifa, in_dev) for_ifa((in_dev))
+#endif
+
+int choose_ipv4_src(__u32 *ret,
+ int interface, __u32 dst_ipaddr, struct net *ns);
+
bool lnet_is_route_alive(struct lnet_route *route);
bool lnet_is_gateway_alive(struct lnet_peer *gw);
#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
#define MAX_PORTALS 64
-#define LNET_SMALL_MD_SIZE offsetof(struct lnet_libmd, md_iov.iov[1])
+#define LNET_SMALL_MD_SIZE offsetof(struct lnet_libmd, md_kiov[1])
extern struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
extern struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
* MDs kmem_cache */
+extern struct kmem_cache *lnet_udsp_cachep;
extern struct kmem_cache *lnet_rspt_cachep;
extern struct kmem_cache *lnet_msg_cachep;
-static inline struct lnet_eq *
-lnet_eq_alloc (void)
+static inline bool
+lnet_ni_set_status_locked(struct lnet_ni *ni, __u32 status)
+__must_hold(&ni->ni_lock)
{
- struct lnet_eq *eq;
+ bool update = false;
+
+ if (ni->ni_status && ni->ni_status->ns_status != status) {
+ CDEBUG(D_NET, "ni %s status changed from %#x to %#x\n",
+ libcfs_nidstr(&ni->ni_nid),
+ ni->ni_status->ns_status, status);
+ ni->ni_status->ns_status = status;
+ update = true;
+ }
- LIBCFS_ALLOC(eq, sizeof(*eq));
- return (eq);
+ return update;
}
-static inline void
-lnet_eq_free(struct lnet_eq *eq)
+static inline unsigned int
+lnet_ni_get_status_locked(struct lnet_ni *ni)
+__must_hold(&ni->ni_lock)
{
- LIBCFS_FREE(eq, sizeof(*eq));
+ if (nid_is_lo0(&ni->ni_nid))
+ return LNET_NI_STATUS_UP;
+ else if (atomic_read(&ni->ni_fatal_error_on))
+ return LNET_NI_STATUS_DOWN;
+ else if (ni->ni_status)
+ return ni->ni_status->ns_status;
+ else
+ return LNET_NI_STATUS_UP;
}
-static inline struct lnet_libmd *
-lnet_md_alloc(struct lnet_md *umd)
+static inline bool
+lnet_ni_set_status(struct lnet_ni *ni, __u32 status)
{
- struct lnet_libmd *md;
- unsigned int size;
- unsigned int niov;
+ bool update;
- if ((umd->options & LNET_MD_KIOV) != 0) {
- niov = umd->length;
- size = offsetof(struct lnet_libmd, md_iov.kiov[niov]);
- } else {
- niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
- umd->length : 1;
- size = offsetof(struct lnet_libmd, md_iov.iov[niov]);
- }
+ lnet_ni_lock(ni);
+ update = lnet_ni_set_status_locked(ni, status);
+ lnet_ni_unlock(ni);
- if (size <= LNET_SMALL_MD_SIZE) {
- md = kmem_cache_alloc(lnet_small_mds_cachep,
- GFP_NOFS | __GFP_ZERO);
- if (md) {
- CDEBUG(D_MALLOC, "slab-alloced 'md' of size %u at "
- "%p.\n", size, md);
- } else {
- CDEBUG(D_MALLOC, "failed to allocate 'md' of size %u\n",
- size);
- return NULL;
- }
- } else {
- LIBCFS_ALLOC(md, size);
- }
+ return update;
+}
- if (md != NULL) {
- /* Set here in case of early free */
- md->md_options = umd->options;
- md->md_niov = niov;
- INIT_LIST_HEAD(&md->md_list);
+static inline void lnet_md_wait_handling(struct lnet_libmd *md, int cpt)
+{
+ wait_queue_head_t *wq = __var_waitqueue(md);
+#if defined(HAVE_WAIT_BIT_QUEUE_ENTRY) || !defined(HAVE_WAIT_VAR_EVENT)
+ struct wait_bit_queue_entry entry;
+ wait_queue_entry_t *wqe = &entry.wq_entry;
+#else
+ struct wait_bit_queue entry;
+ wait_queue_entry_t *wqe = &entry.wait;
+#endif
+ init_wait_var_entry(&entry, md, 0);
+ prepare_to_wait_event(wq, wqe, TASK_IDLE);
+ if (md->md_flags & LNET_MD_FLAG_HANDLING) {
+ /* Race with unlocked call to ->md_handler.
+ * It is safe to drop the res_lock here as the
+ * caller has only just claimed it.
+ */
+ lnet_res_unlock(cpt);
+ schedule();
+ /* Cannot check md now, it might be freed. Caller
+ * must reclaim reference and check.
+ */
+ lnet_res_lock(cpt);
}
-
- return md;
+ finish_wait(wq, wqe);
}
static inline void
LASSERTF(md->md_rspt_ptr == NULL, "md %p rsp %p\n", md, md->md_rspt_ptr);
- if ((md->md_options & LNET_MD_KIOV) != 0)
- size = offsetof(struct lnet_libmd, md_iov.kiov[md->md_niov]);
- else
- size = offsetof(struct lnet_libmd, md_iov.iov[md->md_niov]);
+ size = offsetof(struct lnet_libmd, md_kiov[md->md_niov]);
if (size <= LNET_SMALL_MD_SIZE) {
CDEBUG(D_MALLOC, "slab-freed 'md' at %p.\n", md);
}
}
-static inline struct lnet_me *
-lnet_me_alloc (void)
-{
- struct lnet_me *me;
-
- me = kmem_cache_alloc(lnet_mes_cachep, GFP_NOFS | __GFP_ZERO);
-
- if (me)
- CDEBUG(D_MALLOC, "slab-alloced 'me' at %p.\n", me);
- else
- CDEBUG(D_MALLOC, "failed to allocate 'me'\n");
-
- return me;
-}
-
-static inline void
-lnet_me_free(struct lnet_me *me)
-{
- CDEBUG(D_MALLOC, "slab-freed 'me' at %p.\n", me);
- kmem_cache_free(lnet_mes_cachep, me);
-}
-
struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec,
__u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
}
static inline void
-lnet_eq2handle(struct lnet_handle_eq *handle, struct lnet_eq *eq)
-{
- if (eq == NULL) {
- LNetInvalidateEQHandle(handle);
- return;
- }
-
- handle->cookie = eq->eq_lh.lh_cookie;
-}
-
-static inline struct lnet_eq *
-lnet_handle2eq(struct lnet_handle_eq *handle)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
-
- lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, struct lnet_eq, eq_lh);
-}
-
-static inline void
lnet_md2handle(struct lnet_handle_md *handle, struct lnet_libmd *md)
{
handle->cookie = md->md_lh.lh_cookie;
static inline void
lnet_peer_ni_addref_locked(struct lnet_peer_ni *lp)
{
- LASSERT(atomic_read(&lp->lpni_refcount) > 0);
- atomic_inc(&lp->lpni_refcount);
+ kref_get(&lp->lpni_kref);
}
-extern void lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lp);
+extern void lnet_destroy_peer_ni_locked(struct kref *ref);
static inline void
lnet_peer_ni_decref_locked(struct lnet_peer_ni *lp)
{
- LASSERT(atomic_read(&lp->lpni_refcount) > 0);
- if (atomic_dec_and_test(&lp->lpni_refcount))
- lnet_destroy_peer_ni_locked(lp);
+ kref_put(&lp->lpni_kref, lnet_destroy_peer_ni_locked);
}
static inline int
{
struct lnet_msg *msg;
- msg = kmem_cache_alloc(lnet_msg_cachep, GFP_NOFS | __GFP_ZERO);
+ msg = kmem_cache_zalloc(lnet_msg_cachep, GFP_NOFS);
return (msg);
}
{
struct lnet_rsp_tracker *rspt;
- rspt = kmem_cache_alloc(lnet_rspt_cachep, GFP_NOFS | __GFP_ZERO);
+ rspt = kmem_cache_zalloc(lnet_rspt_cachep, GFP_NOFS);
if (rspt) {
lnet_net_lock(cpt);
the_lnet.ln_counters[cpt]->lct_health.lch_rst_alloc++;
char *iface);
static inline int
-lnet_nid2peerhash(lnet_nid_t nid)
+lnet_nid2peerhash(struct lnet_nid *nid)
{
- return hash_long(nid, LNET_PEER_HASH_BITS);
+ u32 h = 0;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ h = hash_32(nid->nid_addr[i]^h, 32);
+ return hash_32(LNET_NID_NET(nid) ^ h, LNET_PEER_HASH_BITS);
}
static inline struct list_head *
extern const struct lnet_lnd the_lolnd;
extern int avoid_asym_router_failure;
-extern unsigned int lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number);
-extern int lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni);
+extern unsigned int lnet_nid_cpt_hash(struct lnet_nid *nid,
+ unsigned int number);
+extern int lnet_cpt_of_nid_locked(struct lnet_nid *nid, struct lnet_ni *ni);
extern int lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni);
+extern int lnet_nid2cpt(struct lnet_nid *nid, struct lnet_ni *ni);
extern struct lnet_ni *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
+extern struct lnet_ni *lnet_nid_to_ni_locked(struct lnet_nid *nid, int cpt);
extern struct lnet_ni *lnet_nid2ni_addref(lnet_nid_t nid);
extern struct lnet_ni *lnet_net2ni_locked(__u32 net, int cpt);
extern struct lnet_ni *lnet_net2ni_addref(__u32 net);
+extern struct lnet_ni *lnet_nid_to_ni_addref(struct lnet_nid *nid);
struct lnet_net *lnet_get_net_locked(__u32 net_id);
int lnet_lib_init(void);
void lnet_lib_exit(void);
+extern unsigned int lnet_response_tracking;
extern unsigned lnet_transaction_timeout;
extern unsigned lnet_retry_count;
+extern unsigned int lnet_lnd_timeout;
extern unsigned int lnet_numa_range;
extern unsigned int lnet_health_sensitivity;
extern unsigned int lnet_recovery_interval;
+extern unsigned int lnet_recovery_limit;
extern unsigned int lnet_peer_discovery_disabled;
extern unsigned int lnet_drop_asym_route;
extern unsigned int router_sensitivity_percentage;
time64_t when);
void lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
time64_t when);
-int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
+int lnet_add_route(__u32 net, __u32 hops, struct lnet_nid *gateway,
__u32 priority, __u32 sensitivity);
int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
+void lnet_move_route(struct lnet_route *route, struct lnet_peer *lp,
+ struct list_head *rt_list);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority,
struct lnet_ni *lnet_get_next_ni_locked(struct lnet_net *mynet,
struct lnet_ni *prev);
struct lnet_ni *lnet_get_ni_idx_locked(int idx);
+int lnet_get_net_healthv_locked(struct lnet_net *net);
extern int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
struct libcfs_ioctl_hdr __user *uparam);
extern int lnet_get_peer_list(__u32 *countp, __u32 *sizep,
struct lnet_process_id __user *ids);
extern void lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all);
-extern void lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni);
+extern void lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
+ struct list_head *queue,
+ time64_t now);
+extern int lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni,
+ struct lnet_nid *nid);
+extern void lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni);
+extern int lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni,
+ struct lnet_nid *nid);
+void lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni,
+ __u32 priority);
+extern void lnet_ni_add_to_recoveryq_locked(struct lnet_ni *ni,
+ struct list_head *queue,
+ time64_t now);
void lnet_router_debugfs_init(void);
void lnet_router_debugfs_fini(void);
int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf);
int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
struct lnet_net *lnet_get_net_locked(__u32 net_id);
+void lnet_net_clr_pref_rtrs(struct lnet_net *net);
+int lnet_net_add_pref_rtr(struct lnet_net *net, struct lnet_nid *gw_nid);
-int lnet_islocalnid(lnet_nid_t nid);
+int lnet_islocalnid4(lnet_nid_t nid);
+int lnet_islocalnid(struct lnet_nid *nid);
int lnet_islocalnet(__u32 net);
int lnet_islocalnet_locked(__u32 net);
void lnet_msg_commit(struct lnet_msg *msg, int cpt);
void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status);
-void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev);
void lnet_prep_send(struct lnet_msg *msg, int type,
struct lnet_process_id target, unsigned int offset,
unsigned int len);
int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis,
- void *user_ptr, struct lnet_handle_eq eqh, bool recovery);
+ void *user_ptr, lnet_handler_t handler, bool recovery);
void lnet_return_tx_credits_locked(struct lnet_msg *msg);
void lnet_return_rx_credits_locked(struct lnet_msg *msg);
void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
/* match-table functions */
struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
- struct lnet_process_id id, __u64 mbits);
+ struct lnet_processid *id, __u64 mbits);
struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
- struct lnet_process_id id,
+ struct lnet_processid *id,
__u64 mbits, __u64 ignore_bits,
enum lnet_ins_pos pos);
int lnet_mt_match_md(struct lnet_match_table *mtable,
void lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int offset,
unsigned int mlen, unsigned int rlen);
+void lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg);
struct lnet_msg *lnet_create_reply_msg(struct lnet_ni *ni,
struct lnet_msg *get_msg);
void lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt);
void lnet_clean_zombie_rstqs(void);
+bool lnet_md_discarded(struct lnet_libmd *md);
void lnet_finalize(struct lnet_msg *msg, int rc);
bool lnet_send_error_simulation(struct lnet_msg *msg,
enum lnet_msg_hstatus *hstatus);
char *lnet_health_error2str(enum lnet_msg_hstatus hstatus);
char *lnet_msgtyp2str(int type);
-void lnet_print_hdr(struct lnet_hdr *hdr);
int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
/** \addtogroup lnet_fault_simulation @{ */
/** @} lnet_fault_simulation */
void lnet_counters_get_common(struct lnet_counters_common *common);
-void lnet_counters_get(struct lnet_counters *counters);
+int lnet_counters_get(struct lnet_counters *counters);
void lnet_counters_reset(void);
+static inline void
+lnet_ni_set_sel_priority_locked(struct lnet_ni *ni, __u32 priority)
+{
+ ni->ni_sel_priority = priority;
+}
+
+static inline void
+lnet_net_set_sel_priority_locked(struct lnet_net *net, __u32 priority)
+{
+ net->net_sel_priority = priority;
+}
unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
-int lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
+unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov);
+int lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
+ int src_niov, struct bio_vec *src,
unsigned int offset, unsigned int len);
-unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len);
-
void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
unsigned int doffset,
unsigned int nsiov, struct kvec *siov,
unsigned int soffset, unsigned int nob);
void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int nkiov, struct bio_vec *kiov,
unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
+void lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
unsigned int kiovoffset,
unsigned int niov, struct kvec *iov,
unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
+void lnet_copy_kiov2kiov(unsigned int ndkiov, struct bio_vec *dkiov,
unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov,
+ unsigned int nskiov, struct bio_vec *skiov,
unsigned int soffset, unsigned int nob);
static inline void
-lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
-{
- struct kvec diov = { .iov_base = dest, .iov_len = dlen };
-
- lnet_copy_iov2iov(1, &diov, doffset,
- nsiov, siov, soffset, nob);
-}
-
-static inline void
lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *skiov,
+ unsigned int nsiov, struct bio_vec *skiov,
unsigned int soffset, unsigned int nob)
{
struct kvec diov = { .iov_base = dest, .iov_len = dlen };
}
static inline void
-lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- int slen, void *src, unsigned int soffset,
- unsigned int nob)
-{
- struct kvec siov = { .iov_base = src, .iov_len = slen };
- lnet_copy_iov2iov(ndiov, diov, doffset,
- 1, &siov, soffset, nob);
-}
-
-static inline void
-lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
+lnet_copy_flat2kiov(unsigned int ndiov, struct bio_vec *dkiov,
unsigned int doffset, int slen, void *src,
unsigned int soffset, unsigned int nob)
{
void lnet_me_unlink(struct lnet_me *me);
void lnet_md_unlink(struct lnet_libmd *md);
-void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd);
+void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_event *ev);
struct page *lnet_kvaddr_to_page(unsigned long vaddr);
+struct page *lnet_get_first_page(struct lnet_libmd *md, unsigned int offset);
int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset);
unsigned int lnet_get_lnd_timeout(void);
void lnet_register_lnd(const struct lnet_lnd *lnd);
void lnet_unregister_lnd(const struct lnet_lnd *lnd);
-int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port, struct net *ns);
-void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int port);
+struct socket *lnet_connect(struct lnet_nid *peer_nid, int interface,
+ struct sockaddr *peeraddr, struct net *ns);
+void lnet_connect_console_error(int rc, struct lnet_nid *peer_nid,
+ struct sockaddr *sa);
int lnet_count_acceptor_nets(void);
int lnet_acceptor_timeout(void);
int lnet_acceptor_port(void);
};
int lnet_inet_enumerate(struct lnet_inetdev **dev_list, struct net *ns);
-int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
-int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
-int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port);
+void lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
+void lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
+int lnet_sock_getaddr(struct socket *socket, bool remote,
+ struct sockaddr_storage *peer);
int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout);
int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout);
-int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog,
- struct net *ns);
-int lnet_sock_connect(struct socket **sockp, int *fatal,
- __u32 local_ip, int local_port,
- __u32 peer_ip, int peer_port, struct net *ns);
+struct socket *lnet_sock_listen(int port, int backlog,
+ struct net *ns);
+struct socket *lnet_sock_connect(int interface, int local_port,
+ struct sockaddr *peeraddr,
+ struct net *ns);
int lnet_peers_start_down(void);
int lnet_peer_buffer_credits(struct lnet_net *net);
static inline void lnet_ping_buffer_decref(struct lnet_ping_buffer *pbuf)
{
- if (atomic_dec_and_test(&pbuf->pb_refcnt))
+ if (atomic_dec_and_test(&pbuf->pb_refcnt)) {
+ wake_up_var(&pbuf->pb_refcnt);
lnet_ping_buffer_free(pbuf);
+ }
}
static inline int lnet_push_target_resize_needed(void)
}
int lnet_push_target_resize(void);
+int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
+ struct lnet_handle_md *mdh);
void lnet_peer_push_event(struct lnet_event *ev);
-int lnet_parse_ip2nets(char **networksp, char *ip2nets);
-int lnet_parse_routes(char *route_str, int *im_a_router);
-int lnet_parse_networks(struct list_head *nilist, char *networks,
- bool use_tcp_bonding);
+int lnet_parse_ip2nets(const char **networksp, const char *ip2nets);
+int lnet_parse_routes(const char *route_str, int *im_a_router);
+int lnet_parse_networks(struct list_head *nilist, const char *networks);
bool lnet_net_unique(__u32 net_id, struct list_head *nilist,
struct lnet_net **net);
bool lnet_ni_unique_net(struct list_head *nilist, char *iface);
void lnet_incr_dlc_seq(void);
__u32 lnet_get_dlc_seq_locked(void);
-int lnet_get_net_count(void);
-extern unsigned int lnet_current_net_count;
struct lnet_peer_net *lnet_get_next_peer_net_locked(struct lnet_peer *lp,
__u32 prev_lpn_id);
struct lnet_peer_ni *prev);
struct lnet_peer_ni *lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref,
int cpt);
-struct lnet_peer_ni *lnet_nid2peerni_ex(lnet_nid_t nid, int cpt);
+struct lnet_peer_ni *lnet_peerni_by_nid_locked(struct lnet_nid *nid,
+ struct lnet_nid *pref,
+ int cpt);
+struct lnet_peer_ni *lnet_nid2peerni_ex(struct lnet_nid *nid, int cpt);
struct lnet_peer_ni *lnet_peer_get_ni_locked(struct lnet_peer *lp,
lnet_nid_t nid);
+struct lnet_peer_ni *lnet_peer_ni_get_locked(struct lnet_peer *lp,
+ struct lnet_nid *nid);
struct lnet_peer_ni *lnet_find_peer_ni_locked(lnet_nid_t nid);
+struct lnet_peer_ni *lnet_peer_ni_find_locked(struct lnet_nid *nid);
struct lnet_peer *lnet_find_peer(lnet_nid_t nid);
void lnet_peer_net_added(struct lnet_net *net);
lnet_nid_t lnet_peer_primary_nid_locked(lnet_nid_t nid);
int lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block);
+void lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg);
int lnet_peer_discovery_start(void);
void lnet_peer_discovery_stop(void);
void lnet_push_update_to_peers(int force);
void lnet_debug_peer(lnet_nid_t nid);
struct lnet_peer_net *lnet_peer_get_net_locked(struct lnet_peer *peer,
__u32 net_id);
-bool lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid);
-int lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
-int lnet_add_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid, bool mr);
+bool lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni,
+ struct lnet_nid *nid);
+int lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid);
+void lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni);
+bool lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
+ struct lnet_nid *gw_nid);
+void lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni);
+int lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni, struct lnet_nid *nid);
+int lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
+ struct lnet_nid *nid);
+int lnet_add_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid, bool mr, bool temp);
int lnet_del_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid);
int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk);
int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
__u32 *peer_tx_qnob);
int lnet_get_peer_ni_hstats(struct lnet_ioctl_peer_ni_hstats *stats);
+static inline void
+lnet_peer_net_set_sel_priority_locked(struct lnet_peer_net *lpn, __u32 priority)
+{
+ lpn->lpn_sel_priority = priority;
+}
+
+
static inline struct lnet_peer_net *
lnet_find_peer_net_locked(struct lnet_peer *peer, __u32 net_id)
{
static inline bool
lnet_peer_ni_is_primary(struct lnet_peer_ni *lpni)
{
- return lpni->lpni_nid == lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
+ return nid_same(&lpni->lpni_nid,
+ &lpni->lpni_peer_net->lpn_peer->lp_primary_nid);
}
bool lnet_peer_is_uptodate(struct lnet_peer *lp);
{
if (!(lp->lp_state & LNET_PEER_MULTI_RAIL))
return false;
+ if (lp->lp_state & LNET_PEER_MARK_DELETED)
+ return false;
if (lp->lp_state & LNET_PEER_FORCE_PUSH)
return true;
if (lp->lp_state & LNET_PEER_NO_DISCOVERY)
return false;
}
+#define LNET_RECOVERY_INTERVAL_MAX 900
+static inline unsigned int
+lnet_get_next_recovery_ping(unsigned int ping_count, time64_t now)
+{
+ unsigned int interval;
+
+ /* 2^9 = 512, 2^10 = 1024 */
+ if (ping_count > 9)
+ interval = LNET_RECOVERY_INTERVAL_MAX;
+ else
+ interval = 1 << ping_count;
+
+ return now + interval;
+}
+
+static inline void
+lnet_peer_ni_set_next_ping(struct lnet_peer_ni *lpni, time64_t now)
+{
+ lpni->lpni_next_ping =
+ lnet_get_next_recovery_ping(lpni->lpni_ping_count, now);
+}
+
+static inline void
+lnet_ni_set_next_ping(struct lnet_ni *ni, time64_t now)
+{
+ ni->ni_next_ping = lnet_get_next_recovery_ping(ni->ni_ping_count, now);
+}
+
/*
- * A peer is alive if it satisfies the following two conditions:
- * 1. peer health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
+ * A peer NI is alive if it satisfies the following two conditions:
+ * 1. peer NI health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
* 2. the cached NI status received when we discover the peer is UP
*/
static inline bool
}
static inline void
-lnet_set_healthv(atomic_t *healthv, int value)
+lnet_update_peer_net_healthv(struct lnet_peer_ni *lpni)
+{
+ struct lnet_peer_net *lpn;
+ int best_healthv = 0;
+
+ lpn = lpni->lpni_peer_net;
+
+ list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
+ int lpni_healthv = atomic_read(&lpni->lpni_healthv);
+ if (best_healthv < lpni_healthv)
+ best_healthv = lpni_healthv;
+ }
+
+ lpn->lpn_healthv = best_healthv;
+}
+
+static inline void
+lnet_set_lpni_healthv_locked(struct lnet_peer_ni *lpni, int value)
+{
+ if (atomic_read(&lpni->lpni_healthv) == value)
+ return;
+ atomic_set(&lpni->lpni_healthv, value);
+ lnet_update_peer_net_healthv(lpni);
+}
+
+static inline bool
+lnet_atomic_add_unless_max(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+ bool mod = false;
+ int old;
+ int m;
+
+ if (c == u)
+ return mod;
+
+ for (;;) {
+ if (c + a >= u)
+ m = u;
+ else
+ m = c + a;
+ old = atomic_cmpxchg(v, c, m);
+
+ if (old == u)
+ break;
+
+ if (old == c) {
+ mod = true;
+ break;
+ }
+ c = old;
+ }
+
+ return mod;
+}
+
+static inline void
+lnet_inc_lpni_healthv_locked(struct lnet_peer_ni *lpni, int value)
{
- atomic_set(healthv, value);
+ /* only adjust the net health if the lpni health value changed */
+ if (lnet_atomic_add_unless_max(&lpni->lpni_healthv, value,
+ LNET_MAX_HEALTH_VALUE))
+ lnet_update_peer_net_healthv(lpni);
}
static inline void
-lnet_inc_healthv(atomic_t *healthv)
+lnet_inc_healthv(atomic_t *healthv, int value)
{
- atomic_add_unless(healthv, 1, LNET_MAX_HEALTH_VALUE);
+ lnet_atomic_add_unless_max(healthv, value, LNET_MAX_HEALTH_VALUE);
+}
+
+static inline int
+lnet_get_list_len(struct list_head *list)
+{
+ struct list_head *l;
+ int count = 0;
+
+ list_for_each(l, list)
+ count++;
+
+ return count;
}
void lnet_incr_stats(struct lnet_element_stats *stats,
void lnet_usr_translate_stats(struct lnet_ioctl_element_msg_stats *msg_stats,
struct lnet_element_stats *stats);
+static inline void
+lnet_set_route_aliveness(struct lnet_route *route, bool alive)
+{
+ bool old = atomic_xchg(&route->lr_alive, alive);
+
+ if (old != alive)
+ CERROR("route to %s through %s has gone from %s to %s\n",
+ libcfs_net2str(route->lr_net),
+ libcfs_nidstr(&route->lr_gateway->lp_primary_nid),
+ old ? "up" : "down",
+ alive ? "up" : "down");
+}
#endif