/** exclusive lock */
#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-/* default timeout */
+/* default timeout and credits */
#define DEFAULT_PEER_TIMEOUT 180
+#define DEFAULT_PEER_CREDITS 8
+#define DEFAULT_CREDITS 256
#ifdef HAVE_KERN_SOCK_GETNAME_2ARGS
#define lnet_kernel_getpeername(sock, addr, addrlen) \
/*
* kernel 5.3: commit ef11db3310e272d3d8dbe8739e0770820dd20e52
+ * kernel 4.18.0-193.el8:
* added in_dev_for_each_ifa_rtnl and in_dev_for_each_ifa_rcu
* and removed for_ifa and endfor_ifa.
* Use the _rntl variant as the current locking is rtnl.
*/
-#ifdef in_dev_for_each_ifa_rtnl
+#ifdef HAVE_IN_DEV_FOR_EACH_IFA_RTNL
#define DECLARE_CONST_IN_IFADDR(ifa) const struct in_ifaddr *ifa
#define endfor_ifa(in_dev)
#else
extern struct kmem_cache *lnet_rspt_cachep;
extern struct kmem_cache *lnet_msg_cachep;
+static inline bool
+lnet_ni_set_status_locked(struct lnet_ni *ni, __u32 status)
+__must_hold(&ni->ni_lock)
+{
+ bool update = false;
+
+ if (ni->ni_status && ni->ni_status->ns_status != status) {
+ CDEBUG(D_NET, "ni %s status changed from %#x to %#x\n",
+ libcfs_nid2str(ni->ni_nid),
+ ni->ni_status->ns_status, status);
+ ni->ni_status->ns_status = status;
+ update = true;
+ }
+
+ return update;
+}
+
+static inline bool
+lnet_ni_set_status(struct lnet_ni *ni, __u32 status)
+{
+ bool update;
+
+ lnet_ni_lock(ni);
+ update = lnet_ni_set_status_locked(ni, status);
+ lnet_ni_unlock(ni);
+
+ return update;
+}
+
+static inline void lnet_md_wait_handling(struct lnet_libmd *md, int cpt)
+{
+ wait_queue_head_t *wq = __var_waitqueue(md);
+#ifdef HAVE_WAIT_QUEUE_ENTRY
+ struct wait_bit_queue_entry entry;
+ wait_queue_entry_t *wqe = &entry.wq_entry;
+#else
+ struct wait_bit_queue entry;
+ wait_queue_entry_t *wqe = &entry.wait;
+#endif
+ init_wait_var_entry(&entry, md, 0);
+ prepare_to_wait_event(wq, wqe, TASK_IDLE);
+ if (md->md_flags & LNET_MD_FLAG_HANDLING) {
+ /* Race with unlocked call to ->md_handler.
+ * It is safe to drop the res_lock here as the
+ * caller has only just claimed it.
+ */
+ lnet_res_unlock(cpt);
+ schedule();
+ /* Cannot check md now, it might be freed. Caller
+ * must reclaim reference and check.
+ */
+ lnet_res_lock(cpt);
+ }
+ finish_wait(wq, wqe);
+}
+
static inline void
lnet_md_free(struct lnet_libmd *md)
{
int lnet_lib_init(void);
void lnet_lib_exit(void);
+extern unsigned int lnet_response_tracking;
extern unsigned lnet_transaction_timeout;
extern unsigned lnet_retry_count;
+extern unsigned int lnet_lnd_timeout;
extern unsigned int lnet_numa_range;
extern unsigned int lnet_health_sensitivity;
extern unsigned int lnet_recovery_interval;
+extern unsigned int lnet_recovery_limit;
extern unsigned int lnet_peer_discovery_disabled;
extern unsigned int lnet_drop_asym_route;
extern unsigned int router_sensitivity_percentage;
void lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt);
void lnet_clean_zombie_rstqs(void);
+bool lnet_md_discarded(struct lnet_libmd *md);
void lnet_finalize(struct lnet_msg *msg, int rc);
bool lnet_send_error_simulation(struct lnet_msg *msg,
enum lnet_msg_hstatus *hstatus);
char *lnet_health_error2str(enum lnet_msg_hstatus hstatus);
char *lnet_msgtyp2str(int type);
-void lnet_print_hdr(struct lnet_hdr *hdr);
int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
/** \addtogroup lnet_fault_simulation @{ */
/** @} lnet_fault_simulation */
void lnet_counters_get_common(struct lnet_counters_common *common);
-void lnet_counters_get(struct lnet_counters *counters);
+int lnet_counters_get(struct lnet_counters *counters);
void lnet_counters_reset(void);
unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
}
/*
- * A peer is alive if it satisfies the following two conditions:
- * 1. peer health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
+ * A peer NI is alive if it satisfies the following two conditions:
+ * 1. peer NI health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
* 2. the cached NI status received when we discover the peer is UP
*/
static inline bool