extern struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
extern struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
* MDs kmem_cache */
+extern struct kmem_cache *lnet_rspt_cachep;
+extern struct kmem_cache *lnet_msg_cachep;
static inline struct lnet_eq *
lnet_eq_alloc (void)
{
unsigned int size;
+ LASSERTF(md->md_rspt_ptr == NULL, "md %p rsp %p\n", md, md->md_rspt_ptr);
+
if ((md->md_options & LNET_MD_KIOV) != 0)
size = offsetof(struct lnet_libmd, md_iov.kiov[md->md_niov]);
else
}
}
-static inline struct lnet_me *
-lnet_me_alloc (void)
-{
- struct lnet_me *me;
-
- me = kmem_cache_alloc(lnet_mes_cachep, GFP_NOFS | __GFP_ZERO);
-
- if (me)
- CDEBUG(D_MALLOC, "slab-alloced 'me' at %p.\n", me);
- else
- CDEBUG(D_MALLOC, "failed to allocate 'me'\n");
-
- return me;
-}
-
-static inline void
-lnet_me_free(struct lnet_me *me)
-{
- CDEBUG(D_MALLOC, "slab-freed 'me' at %p.\n", me);
- kmem_cache_free(lnet_mes_cachep, me);
-}
-
struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec,
__u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
}
static inline void
-lnet_eq2handle(struct lnet_handle_eq *handle, struct lnet_eq *eq)
-{
- if (eq == NULL) {
- LNetInvalidateEQHandle(handle);
- return;
- }
-
- handle->cookie = eq->eq_lh.lh_cookie;
-}
-
-static inline struct lnet_eq *
-lnet_handle2eq(struct lnet_handle_eq *handle)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
-
- lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, struct lnet_eq, eq_lh);
-}
-
-static inline void
lnet_md2handle(struct lnet_handle_md *handle, struct lnet_libmd *md)
{
handle->cookie = md->md_lh.lh_cookie;
}
static inline void
-lnet_me2handle(struct lnet_handle_me *handle, struct lnet_me *me)
-{
- handle->cookie = me->me_lh.lh_cookie;
-}
-
-static inline struct lnet_me *
-lnet_handle2me(struct lnet_handle_me *handle)
-{
- /* ALWAYS called with resource lock held */
- struct lnet_libhandle *lh;
- int cpt;
-
- cpt = lnet_cpt_of_cookie(handle->cookie);
- lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
- handle->cookie);
- if (lh == NULL)
- return NULL;
-
- return lh_entry(lh, struct lnet_me, me_lh);
-}
-
-static inline void
lnet_peer_net_addref_locked(struct lnet_peer_net *lpn)
{
atomic_inc(&lpn->lpn_refcount);
{
struct lnet_msg *msg;
- LIBCFS_ALLOC(msg, sizeof(*msg));
+ msg = kmem_cache_alloc(lnet_msg_cachep, GFP_NOFS | __GFP_ZERO);
- /* no need to zero, LIBCFS_ALLOC does for us */
return (msg);
}
lnet_msg_free(struct lnet_msg *msg)
{
LASSERT(!msg->msg_onactivelist);
- LIBCFS_FREE(msg, sizeof(*msg));
+ kmem_cache_free(lnet_msg_cachep, msg);
}
static inline struct lnet_rsp_tracker *
lnet_rspt_alloc(int cpt)
{
struct lnet_rsp_tracker *rspt;
- LIBCFS_ALLOC(rspt, sizeof(*rspt));
- lnet_net_lock(cpt);
- the_lnet.ln_counters[cpt]->lct_health.lch_rst_alloc++;
- lnet_net_unlock(cpt);
+
+ rspt = kmem_cache_alloc(lnet_rspt_cachep, GFP_NOFS | __GFP_ZERO);
+ if (rspt) {
+ lnet_net_lock(cpt);
+ the_lnet.ln_counters[cpt]->lct_health.lch_rst_alloc++;
+ lnet_net_unlock(cpt);
+ }
+ CDEBUG(D_MALLOC, "rspt alloc %p\n", rspt);
return rspt;
}
static inline void
lnet_rspt_free(struct lnet_rsp_tracker *rspt, int cpt)
{
- LIBCFS_FREE(rspt, sizeof(*rspt));
+ CDEBUG(D_MALLOC, "rspt free %p\n", rspt);
+
+ kmem_cache_free(lnet_rspt_cachep, rspt);
lnet_net_lock(cpt);
the_lnet.ln_counters[cpt]->lct_health.lch_rst_alloc--;
lnet_net_unlock(cpt);
((1U << the_lnet.ln_remote_nets_hbits) - 1)];
}
-extern struct lnet_lnd the_lolnd;
+extern const struct lnet_lnd the_lolnd;
extern int avoid_asym_router_failure;
extern unsigned int lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number);
unsigned int len);
int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
int lnet_send_ping(lnet_nid_t dest_nid, struct lnet_handle_md *mdh, int nnis,
- void *user_ptr, struct lnet_handle_eq eqh, bool recovery);
+ void *user_ptr, struct lnet_eq *eq, bool recovery);
void lnet_return_tx_credits_locked(struct lnet_msg *msg);
void lnet_return_rx_credits_locked(struct lnet_msg *msg);
void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
int lnet_cpt_of_md(struct lnet_libmd *md, unsigned int offset);
unsigned int lnet_get_lnd_timeout(void);
-void lnet_register_lnd(struct lnet_lnd *lnd);
-void lnet_unregister_lnd(struct lnet_lnd *lnd);
+void lnet_register_lnd(const struct lnet_lnd *lnd);
+void lnet_unregister_lnd(const struct lnet_lnd *lnd);
int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
__u32 local_ip, __u32 peer_ip, int peer_port, struct net *ns);
int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog,
struct net *ns);
-int lnet_sock_accept(struct socket **newsockp, struct socket *sock);
int lnet_sock_connect(struct socket **sockp, int *fatal,
__u32 local_ip, int local_port,
__u32 peer_ip, int peer_port, struct net *ns);
void lnet_consolidate_routes_locked(struct lnet_peer *orig_lp,
struct lnet_peer *new_lp);
void lnet_router_discovery_complete(struct lnet_peer *lp);
+void lnet_router_discovery_ping_reply(struct lnet_peer *lp);
int lnet_monitor_thr_start(void);
void lnet_monitor_thr_stop(void);
lnet_ping_buffer_free(pbuf);
}
-static inline int lnet_ping_buffer_numref(struct lnet_ping_buffer *pbuf)
-{
- return atomic_read(&pbuf->pb_refcnt);
-}
-
static inline int lnet_push_target_resize_needed(void)
{
return the_lnet.ln_push_target->pb_nnis < the_lnet.ln_push_target_nnis;
}
int lnet_push_target_resize(void);
+int lnet_push_target_post(struct lnet_ping_buffer *pbuf,
+ struct lnet_handle_md *mdh);
void lnet_peer_push_event(struct lnet_event *ev);
int lnet_parse_ip2nets(char **networksp, char *ip2nets);
void lnet_incr_dlc_seq(void);
__u32 lnet_get_dlc_seq_locked(void);
int lnet_get_net_count(void);
+extern unsigned int lnet_current_net_count;
struct lnet_peer_net *lnet_get_next_peer_net_locked(struct lnet_peer *lp,
__u32 prev_lpn_id);
}
bool lnet_peer_is_uptodate(struct lnet_peer *lp);
+bool lnet_peer_is_uptodate_locked(struct lnet_peer *lp);
bool lnet_is_discovery_disabled(struct lnet_peer *lp);
+bool lnet_is_discovery_disabled_locked(struct lnet_peer *lp);
bool lnet_peer_gw_discovery(struct lnet_peer *lp);
static inline bool
}
static inline void
-lnet_set_healthv(atomic_t *healthv, int value)
+lnet_update_peer_net_healthv(struct lnet_peer_ni *lpni)
+{
+ struct lnet_peer_net *lpn;
+ int best_healthv = 0;
+
+ lpn = lpni->lpni_peer_net;
+
+ list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
+ int lpni_healthv = atomic_read(&lpni->lpni_healthv);
+ if (best_healthv < lpni_healthv)
+ best_healthv = lpni_healthv;
+ }
+
+ lpn->lpn_healthv = best_healthv;
+}
+
+static inline void
+lnet_set_lpni_healthv_locked(struct lnet_peer_ni *lpni, int value)
+{
+ if (atomic_read(&lpni->lpni_healthv) == value)
+ return;
+ atomic_set(&lpni->lpni_healthv, value);
+ lnet_update_peer_net_healthv(lpni);
+}
+
+static inline void
+lnet_inc_lpni_healthv_locked(struct lnet_peer_ni *lpni)
{
- atomic_set(healthv, value);
+ /* only adjust the net health if the lpni health value changed */
+ if (atomic_add_unless(&lpni->lpni_healthv, 1, LNET_MAX_HEALTH_VALUE))
+ lnet_update_peer_net_healthv(lpni);
}
static inline void