-int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
- char alivness[LNET_MAX_STR_LEN],
- __u32 *cpt_iter, __u32 *refcount,
- __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
- __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
- __u32 *peer_tx_qnob);
+struct lnet_peer_net *lnet_peer_get_net_locked(struct lnet_peer *peer,
+ __u32 net_id);
+bool lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+int lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
+int lnet_add_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid, bool mr);
+int lnet_del_peer_ni(lnet_nid_t key_nid, lnet_nid_t nid);
+int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk);
+int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
+ char alivness[LNET_MAX_STR_LEN],
+ __u32 *cpt_iter, __u32 *refcount,
+ __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+ __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
+ __u32 *peer_tx_qnob);
+int lnet_get_peer_ni_hstats(struct lnet_ioctl_peer_ni_hstats *stats);
+
+static inline struct lnet_peer_net *
+lnet_find_peer_net_locked(struct lnet_peer *peer, __u32 net_id)
+{
+ struct lnet_peer_net *peer_net;
+
+ list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
+ if (peer_net->lpn_net_id == net_id)
+ return peer_net;
+ }
+
+ return NULL;
+}
+
+static inline bool
+lnet_peer_is_multi_rail(struct lnet_peer *lp)
+{
+ if (lp->lp_state & LNET_PEER_MULTI_RAIL)
+ return true;
+ return false;
+}
+
+static inline bool
+lnet_peer_ni_is_configured(struct lnet_peer_ni *lpni)
+{
+ if (lpni->lpni_peer_net->lpn_peer->lp_state & LNET_PEER_CONFIGURED)
+ return true;
+ return false;
+}
+
+static inline bool
+lnet_peer_ni_is_primary(struct lnet_peer_ni *lpni)
+{
+ return lpni->lpni_nid == lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
+}
+
+bool lnet_peer_is_uptodate(struct lnet_peer *lp);
+bool lnet_is_discovery_disabled(struct lnet_peer *lp);
+bool lnet_peer_gw_discovery(struct lnet_peer *lp);
+
+static inline bool
+lnet_peer_needs_push(struct lnet_peer *lp)
+{
+ if (!(lp->lp_state & LNET_PEER_MULTI_RAIL))
+ return false;
+ if (lp->lp_state & LNET_PEER_FORCE_PUSH)
+ return true;
+ if (lp->lp_state & LNET_PEER_NO_DISCOVERY)
+ return false;
+ /* if discovery is not enabled then no need to push */
+ if (lnet_peer_discovery_disabled)
+ return false;
+ if (lp->lp_node_seqno < atomic_read(&the_lnet.ln_ping_target_seqno))
+ return true;
+ return false;
+}
+
+/*
+ * A peer is alive if it satisfies the following two conditions:
+ * 1. peer health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
+ * 2. the cached NI status received when we discover the peer is UP
+ */
+static inline bool
+lnet_is_peer_ni_alive(struct lnet_peer_ni *lpni)
+{
+ bool halive = false;
+
+ halive = (atomic_read(&lpni->lpni_healthv) >=
+ (LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage / 100));
+
+ return halive && lpni->lpni_ns_status == LNET_NI_STATUS_UP;
+}