extern struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
extern struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
* MDs kmem_cache */
+extern struct kmem_cache *lnet_udsp_cachep;
extern struct kmem_cache *lnet_rspt_cachep;
extern struct kmem_cache *lnet_msg_cachep;
return update;
}
+static inline void lnet_md_wait_handling(struct lnet_libmd *md, int cpt)
+{
+ wait_queue_head_t *wq = __var_waitqueue(md);
+#ifdef HAVE_WAIT_QUEUE_ENTRY
+ struct wait_bit_queue_entry entry;
+ wait_queue_entry_t *wqe = &entry.wq_entry;
+#else
+ struct wait_bit_queue entry;
+ wait_queue_entry_t *wqe = &entry.wait;
+#endif
+ init_wait_var_entry(&entry, md, 0);
+ prepare_to_wait_event(wq, wqe, TASK_IDLE);
+ if (md->md_flags & LNET_MD_FLAG_HANDLING) {
+ /* Race with unlocked call to ->md_handler.
+ * It is safe to drop the res_lock here as the
+ * caller has only just claimed it.
+ */
+ lnet_res_unlock(cpt);
+ schedule();
+ /* Cannot check md now, it might be freed. Caller
+ * must reclaim reference and check.
+ */
+ lnet_res_lock(cpt);
+ }
+ finish_wait(wq, wqe);
+}
+
static inline void
lnet_md_free(struct lnet_libmd *md)
{
static inline void
lnet_peer_ni_addref_locked(struct lnet_peer_ni *lp)
{
- LASSERT(atomic_read(&lp->lpni_refcount) > 0);
- atomic_inc(&lp->lpni_refcount);
+ kref_get(&lp->lpni_kref);
}
-extern void lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lp);
+extern void lnet_destroy_peer_ni_locked(struct kref *ref);
static inline void
lnet_peer_ni_decref_locked(struct lnet_peer_ni *lp)
{
- LASSERT(atomic_read(&lp->lpni_refcount) > 0);
- if (atomic_dec_and_test(&lp->lpni_refcount))
- lnet_destroy_peer_ni_locked(lp);
+ kref_put(&lp->lpni_kref, lnet_destroy_peer_ni_locked);
}
static inline int
extern unsigned int lnet_numa_range;
extern unsigned int lnet_health_sensitivity;
extern unsigned int lnet_recovery_interval;
+extern unsigned int lnet_recovery_limit;
extern unsigned int lnet_peer_discovery_disabled;
extern unsigned int lnet_drop_asym_route;
extern unsigned int router_sensitivity_percentage;
struct lnet_ni *lnet_get_next_ni_locked(struct lnet_net *mynet,
struct lnet_ni *prev);
struct lnet_ni *lnet_get_ni_idx_locked(int idx);
+int lnet_get_net_healthv_locked(struct lnet_net *net);
extern int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
struct libcfs_ioctl_hdr __user *uparam);
extern int lnet_get_peer_list(__u32 *countp, __u32 *sizep,
struct lnet_process_id __user *ids);
extern void lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all);
-extern void lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni);
+extern void lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
+ struct list_head *queue,
+ time64_t now);
+extern int lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+extern void lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni);
+extern int lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+void lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni,
+ __u32 priority);
void lnet_router_debugfs_init(void);
void lnet_router_debugfs_fini(void);
int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf);
int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
struct lnet_net *lnet_get_net_locked(__u32 net_id);
+void lnet_net_clr_pref_rtrs(struct lnet_net *net);
+int lnet_net_add_pref_rtr(struct lnet_net *net, lnet_nid_t gw_nid);
int lnet_islocalnid(lnet_nid_t nid);
int lnet_islocalnet(__u32 net);
void lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt);
void lnet_clean_zombie_rstqs(void);
+bool lnet_md_discarded(struct lnet_libmd *md);
void lnet_finalize(struct lnet_msg *msg, int rc);
bool lnet_send_error_simulation(struct lnet_msg *msg,
enum lnet_msg_hstatus *hstatus);
/** @} lnet_fault_simulation */
void lnet_counters_get_common(struct lnet_counters_common *common);
-void lnet_counters_get(struct lnet_counters *counters);
+int lnet_counters_get(struct lnet_counters *counters);
void lnet_counters_reset(void);
+static inline void
+lnet_ni_set_sel_priority_locked(struct lnet_ni *ni, __u32 priority)
+{
+ ni->ni_sel_priority = priority;
+}
+
+static inline void
+lnet_net_set_sel_priority_locked(struct lnet_net *net, __u32 priority)
+{
+ net->net_sel_priority = priority;
+}
unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov);
void lnet_register_lnd(const struct lnet_lnd *lnd);
void lnet_unregister_lnd(const struct lnet_lnd *lnd);
-struct socket *lnet_connect(lnet_nid_t peer_nid, int interface, __u32 peer_ip,
- int peer_port, struct net *ns);
+struct socket *lnet_connect(lnet_nid_t peer_nid, int interface,
+ struct sockaddr *peeraddr, struct net *ns);
void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
- __u32 peer_ip, int port);
+ struct sockaddr *sa);
int lnet_count_acceptor_nets(void);
int lnet_acceptor_timeout(void);
int lnet_acceptor_port(void);
};
int lnet_inet_enumerate(struct lnet_inetdev **dev_list, struct net *ns);
-int lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
-int lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
-int lnet_sock_getaddr(struct socket *socket, bool remote, __u32 *ip, int *port);
+void lnet_sock_setbuf(struct socket *socket, int txbufsize, int rxbufsize);
+void lnet_sock_getbuf(struct socket *socket, int *txbufsize, int *rxbufsize);
+int lnet_sock_getaddr(struct socket *socket, bool remote,
+ struct sockaddr_storage *peer);
int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout);
int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout);
int lnet_parse_ip2nets(const char **networksp, const char *ip2nets);
int lnet_parse_routes(const char *route_str, int *im_a_router);
-int lnet_parse_networks(struct list_head *nilist, const char *networks,
- bool use_tcp_bonding);
+int lnet_parse_networks(struct list_head *nilist, const char *networks);
bool lnet_net_unique(__u32 net_id, struct list_head *nilist,
struct lnet_net **net);
bool lnet_ni_unique_net(struct list_head *nilist, char *iface);
void lnet_peer_net_added(struct lnet_net *net);
lnet_nid_t lnet_peer_primary_nid_locked(lnet_nid_t nid);
int lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block);
+void lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg);
int lnet_peer_discovery_start(void);
void lnet_peer_discovery_stop(void);
void lnet_push_update_to_peers(int force);
struct lnet_peer_net *lnet_peer_get_net_locked(struct lnet_peer *peer,
__u32 net_id);
bool lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+int lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+void lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni);
+bool lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni, lnet_nid_t gw_nid);
+void lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni);
+int lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni, lnet_nid_t nid);
int lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
int lnet_add_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid, bool mr);
int lnet_del_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid);
__u32 *peer_tx_qnob);
int lnet_get_peer_ni_hstats(struct lnet_ioctl_peer_ni_hstats *stats);
+static inline void
+lnet_peer_net_set_sel_priority_locked(struct lnet_peer_net *lpn, __u32 priority)
+{
+ lpn->lpn_sel_priority = priority;
+}
+
+
static inline struct lnet_peer_net *
lnet_find_peer_net_locked(struct lnet_peer *peer, __u32 net_id)
{
{
if (!(lp->lp_state & LNET_PEER_MULTI_RAIL))
return false;
+ if (lp->lp_state & LNET_PEER_MARK_DELETED)
+ return false;
if (lp->lp_state & LNET_PEER_FORCE_PUSH)
return true;
if (lp->lp_state & LNET_PEER_NO_DISCOVERY)
return false;
}
+#define LNET_RECOVERY_INTERVAL_MAX 900
+static inline unsigned int
+lnet_get_next_recovery_ping(unsigned int ping_count, time64_t now)
+{
+ unsigned int interval;
+
+ /* 2^9 = 512, 2^10 = 1024 */
+ if (ping_count > 9)
+ interval = LNET_RECOVERY_INTERVAL_MAX;
+ else
+ interval = 1 << ping_count;
+
+ return now + interval;
+}
+
+static inline void
+lnet_peer_ni_set_next_ping(struct lnet_peer_ni *lpni, time64_t now)
+{
+ lpni->lpni_next_ping =
+ lnet_get_next_recovery_ping(lpni->lpni_ping_count, now);
+}
+
/*
* A peer NI is alive if it satisfies the following two conditions:
* 1. peer NI health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
lnet_atomic_add_unless_max(healthv, value, LNET_MAX_HEALTH_VALUE);
}
+static inline int
+lnet_get_list_len(struct list_head *list)
+{
+ struct list_head *l;
+ int count = 0;
+
+ list_for_each(l, list)
+ count++;
+
+ return count;
+}
+
void lnet_incr_stats(struct lnet_element_stats *stats,
enum lnet_msg_type msg_type,
enum lnet_stats_type stats_type);