X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Finclude%2Flnet%2Flib-lnet.h;h=ecc97da3e7be07ee0013e12923ddad70c637f3cd;hp=c2fec376f8d3e79a43517c6addaf0c51a4fbb4bb;hb=0127d64b8cadd28f2306f416058557dd8622c160;hpb=97084f70a6c2956d49d8dc289aeb8ede84e5f7b4 diff --git a/lnet/include/lnet/lib-lnet.h b/lnet/include/lnet/lib-lnet.h index c2fec37..ecc97da 100644 --- a/lnet/include/lnet/lib-lnet.h +++ b/lnet/include/lnet/lib-lnet.h @@ -76,7 +76,6 @@ extern struct lnet the_lnet; /* THE network */ /* default timeout */ #define DEFAULT_PEER_TIMEOUT 180 -#define LNET_LND_DEFAULT_TIMEOUT 5 #ifdef HAVE_KERN_SOCK_GETNAME_2ARGS #define lnet_kernel_getpeername(sock, addr, addrlen) \ @@ -90,6 +89,24 @@ extern struct lnet the_lnet; /* THE network */ kernel_getsockname(sock, addr, addrlen) #endif +/* + * kernel 5.3: commit ef11db3310e272d3d8dbe8739e0770820dd20e52 + * added in_dev_for_each_ifa_rtnl and in_dev_for_each_ifa_rcu + * and removed for_ifa and endfor_ifa. + * Use the _rntl variant as the current locking is rtnl. + */ +#ifdef in_dev_for_each_ifa_rtnl +#define DECLARE_CONST_IN_IFADDR(ifa) const struct in_ifaddr *ifa +#define endfor_ifa(in_dev) +#else +#define DECLARE_CONST_IN_IFADDR(ifa) +#define in_dev_for_each_ifa_rtnl(ifa, in_dev) for_ifa((in_dev)) +#define in_dev_for_each_ifa_rcu(ifa, in_dev) for_ifa((in_dev)) +#endif + +int choose_ipv4_src(__u32 *ret, + int interface, __u32 dst_ipaddr, struct net *ns); + bool lnet_is_route_alive(struct lnet_route *route); bool lnet_is_gateway_alive(struct lnet_peer *gw); @@ -183,76 +200,18 @@ lnet_net_lock_current(void) #define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock) #define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock) -#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock) -#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock) #define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock) #define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock) #define MAX_PORTALS 64 -#define LNET_SMALL_MD_SIZE offsetof(struct lnet_libmd, md_iov.iov[1]) +#define LNET_SMALL_MD_SIZE offsetof(struct lnet_libmd, md_kiov[1]) extern struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */ extern struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes * MDs kmem_cache */ extern struct kmem_cache *lnet_rspt_cachep; extern struct kmem_cache *lnet_msg_cachep; -static inline struct lnet_eq * -lnet_eq_alloc (void) -{ - struct lnet_eq *eq; - - LIBCFS_ALLOC(eq, sizeof(*eq)); - return (eq); -} - -static inline void -lnet_eq_free(struct lnet_eq *eq) -{ - LIBCFS_FREE(eq, sizeof(*eq)); -} - -static inline struct lnet_libmd * -lnet_md_alloc(struct lnet_md *umd) -{ - struct lnet_libmd *md; - unsigned int size; - unsigned int niov; - - if ((umd->options & LNET_MD_KIOV) != 0) { - niov = umd->length; - size = offsetof(struct lnet_libmd, md_iov.kiov[niov]); - } else { - niov = ((umd->options & LNET_MD_IOVEC) != 0) ? - umd->length : 1; - size = offsetof(struct lnet_libmd, md_iov.iov[niov]); - } - - if (size <= LNET_SMALL_MD_SIZE) { - md = kmem_cache_alloc(lnet_small_mds_cachep, - GFP_NOFS | __GFP_ZERO); - if (md) { - CDEBUG(D_MALLOC, "slab-alloced 'md' of size %u at " - "%p.\n", size, md); - } else { - CDEBUG(D_MALLOC, "failed to allocate 'md' of size %u\n", - size); - return NULL; - } - } else { - LIBCFS_ALLOC(md, size); - } - - if (md != NULL) { - /* Set here in case of early free */ - md->md_options = umd->options; - md->md_niov = niov; - INIT_LIST_HEAD(&md->md_list); - } - - return md; -} - static inline void lnet_md_free(struct lnet_libmd *md) { @@ -260,10 +219,7 @@ lnet_md_free(struct lnet_libmd *md) LASSERTF(md->md_rspt_ptr == NULL, "md %p rsp %p\n", md, md->md_rspt_ptr); - if ((md->md_options & LNET_MD_KIOV) != 0) - size = offsetof(struct lnet_libmd, md_iov.kiov[md->md_niov]); - else - size = offsetof(struct lnet_libmd, md_iov.iov[md->md_niov]); + size = offsetof(struct lnet_libmd, md_kiov[md->md_niov]); if (size <= LNET_SMALL_MD_SIZE) { CDEBUG(D_MALLOC, "slab-freed 'md' at %p.\n", md); @@ -418,7 +374,7 @@ lnet_msg_alloc(void) { struct lnet_msg *msg; - msg = kmem_cache_alloc(lnet_msg_cachep, GFP_NOFS | __GFP_ZERO); + msg = kmem_cache_zalloc(lnet_msg_cachep, GFP_NOFS); return (msg); } @@ -435,7 +391,7 @@ lnet_rspt_alloc(int cpt) { struct lnet_rsp_tracker *rspt; - rspt = kmem_cache_alloc(lnet_rspt_cachep, GFP_NOFS | __GFP_ZERO); + rspt = kmem_cache_zalloc(lnet_rspt_cachep, GFP_NOFS); if (rspt) { lnet_net_lock(cpt); the_lnet.ln_counters[cpt]->lct_health.lch_rst_alloc++; @@ -520,6 +476,8 @@ void lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive, int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid, __u32 priority, __u32 sensitivity); int lnet_del_route(__u32 net, lnet_nid_t gw_nid); +void lnet_move_route(struct lnet_route *route, struct lnet_peer *lp, + struct list_head *rt_list); void lnet_destroy_routes(void); int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority, @@ -565,13 +523,12 @@ void lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type); void lnet_msg_commit(struct lnet_msg *msg, int cpt); void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status); -void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev); void lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target, unsigned int offset, unsigned int len); int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid); int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis, - void *user_ptr, struct lnet_eq *eq, bool recovery); + void *user_ptr, lnet_handler_t handler, bool recovery); void lnet_return_tx_credits_locked(struct lnet_msg *msg); void lnet_return_rx_credits_locked(struct lnet_msg *msg); void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp); @@ -695,46 +652,31 @@ void lnet_counters_get(struct lnet_counters *counters); void lnet_counters_reset(void); unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov); -int lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, struct kvec *src, +unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov); +int lnet_extract_kiov(int dst_niov, struct bio_vec *dst, + int src_niov, struct bio_vec *src, unsigned int offset, unsigned int len); -unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov); -int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, - unsigned int offset, unsigned int len); - void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, unsigned int nsiov, struct kvec *siov, unsigned int soffset, unsigned int nob); void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, - unsigned int nkiov, lnet_kiov_t *kiov, + unsigned int nkiov, struct bio_vec *kiov, unsigned int kiovoffset, unsigned int nob); -void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, +void lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov, unsigned int kiovoffset, unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int nob); -void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov, +void lnet_copy_kiov2kiov(unsigned int ndkiov, struct bio_vec *dkiov, unsigned int doffset, - unsigned int nskiov, lnet_kiov_t *skiov, + unsigned int nskiov, struct bio_vec *skiov, unsigned int soffset, unsigned int nob); static inline void -lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset, - unsigned int nsiov, struct kvec *siov, unsigned int soffset, - unsigned int nob) -{ - struct kvec diov = { .iov_base = dest, .iov_len = dlen }; - - lnet_copy_iov2iov(1, &diov, doffset, - nsiov, siov, soffset, nob); -} - -static inline void lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset, - unsigned int nsiov, lnet_kiov_t *skiov, + unsigned int nsiov, struct bio_vec *skiov, unsigned int soffset, unsigned int nob) { struct kvec diov = { .iov_base = dest, .iov_len = dlen }; @@ -744,17 +686,7 @@ lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset, } static inline void -lnet_copy_flat2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, - int slen, void *src, unsigned int soffset, - unsigned int nob) -{ - struct kvec siov = { .iov_base = src, .iov_len = slen }; - lnet_copy_iov2iov(ndiov, diov, doffset, - 1, &siov, soffset, nob); -} - -static inline void -lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov, +lnet_copy_flat2kiov(unsigned int ndiov, struct bio_vec *dkiov, unsigned int doffset, int slen, void *src, unsigned int soffset, unsigned int nob) { @@ -766,7 +698,7 @@ lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov, void lnet_me_unlink(struct lnet_me *me); void lnet_md_unlink(struct lnet_libmd *md); -void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd); +void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_event *ev); struct page *lnet_kvaddr_to_page(unsigned long vaddr); int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset); @@ -774,8 +706,8 @@ unsigned int lnet_get_lnd_timeout(void); void lnet_register_lnd(const struct lnet_lnd *lnd); void lnet_unregister_lnd(const struct lnet_lnd *lnd); -int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, - __u32 local_ip, __u32 peer_ip, int peer_port, struct net *ns); +struct socket *lnet_connect(lnet_nid_t peer_nid, int interface, __u32 peer_ip, + int peer_port, struct net *ns); void lnet_connect_console_error(int rc, lnet_nid_t peer_nid, __u32 peer_ip, int port); int lnet_count_acceptor_nets(void); @@ -799,11 +731,11 @@ int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port); int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout); int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout); -int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog, - struct net *ns); -int lnet_sock_connect(struct socket **sockp, - __u32 local_ip, int local_port, - __u32 peer_ip, int peer_port, struct net *ns); +struct socket *lnet_sock_listen(int port, int backlog, + struct net *ns); +struct socket *lnet_sock_connect(int interface, int local_port, + struct sockaddr *peeraddr, + struct net *ns); int lnet_peers_start_down(void); int lnet_peer_buffer_credits(struct lnet_net *net); @@ -847,9 +779,9 @@ int lnet_push_target_post(struct lnet_ping_buffer *pbuf, struct lnet_handle_md *mdh); void lnet_peer_push_event(struct lnet_event *ev); -int lnet_parse_ip2nets(char **networksp, char *ip2nets); -int lnet_parse_routes(char *route_str, int *im_a_router); -int lnet_parse_networks(struct list_head *nilist, char *networks, +int lnet_parse_ip2nets(const char **networksp, const char *ip2nets); +int lnet_parse_routes(const char *route_str, int *im_a_router); +int lnet_parse_networks(struct list_head *nilist, const char *networks, bool use_tcp_bonding); bool lnet_net_unique(__u32 net_id, struct list_head *nilist, struct lnet_net **net); @@ -996,18 +928,50 @@ lnet_set_lpni_healthv_locked(struct lnet_peer_ni *lpni, int value) lnet_update_peer_net_healthv(lpni); } +static inline bool +lnet_atomic_add_unless_max(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + bool mod = false; + int old; + int m; + + if (c == u) + return mod; + + for (;;) { + if (c + a >= u) + m = u; + else + m = c + a; + old = atomic_cmpxchg(v, c, m); + + if (old == u) + break; + + if (old == c) { + mod = true; + break; + } + c = old; + } + + return mod; +} + static inline void -lnet_inc_lpni_healthv_locked(struct lnet_peer_ni *lpni) +lnet_inc_lpni_healthv_locked(struct lnet_peer_ni *lpni, int value) { /* only adjust the net health if the lpni health value changed */ - if (atomic_add_unless(&lpni->lpni_healthv, 1, LNET_MAX_HEALTH_VALUE)) + if (lnet_atomic_add_unless_max(&lpni->lpni_healthv, value, + LNET_MAX_HEALTH_VALUE)) lnet_update_peer_net_healthv(lpni); } static inline void -lnet_inc_healthv(atomic_t *healthv) +lnet_inc_healthv(atomic_t *healthv, int value) { - atomic_add_unless(healthv, 1, LNET_MAX_HEALTH_VALUE); + lnet_atomic_add_unless_max(healthv, value, LNET_MAX_HEALTH_VALUE); } void lnet_incr_stats(struct lnet_element_stats *stats,