1 /* SPDX-License-Identifier: GPL-2.0 */
3 /* Copyright (C) 2004 Cluster File Systems, Inc.
5 * Copyright (C) 2009-2012 Cray, Inc.
7 * Copyright (c) 2014, 2016, Intel Corporation.
10 /* This file is part of Lustre, http://www.lustre.org.
12 * Derived from work by: Eric Barton <eric@bartonsoftware.com>
13 * Author: Nic Henke <nic@cray.com>
14 * Author: James Shimek <jshimek@cray.com>
17 #ifndef _GNILND_GNILND_H_
18 #define _GNILND_GNILND_H_
20 #define DEBUG_SUBSYSTEM S_LND
22 #include <linux/module.h>
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/stat.h>
27 #include <linux/errno.h>
28 #ifdef HAVE_LINUX_KERNEL_LOCK
29 #include <linux/smp_lock.h>
31 #include <linux/unistd.h>
32 #include <linux/uio.h>
33 #include <linux/time.h>
34 #include <asm/timex.h>
36 #include <asm/uaccess.h>
39 #include <linux/init.h>
41 #include <linux/file.h>
42 #include <linux/stat.h>
43 #include <linux/list.h>
44 #include <linux/kmod.h>
45 #include <linux/sysctl.h>
46 #include <linux/kthread.h>
47 #include <linux/nmi.h>
51 #include <linux/nmi.h>
53 #include <lnet/lib-lnet.h>
57 static inline time_t cfs_duration_sec(long duration_jiffies)
59 return jiffies_to_msecs(duration_jiffies) / MSEC_PER_SEC;
63 #define GNILND_MBOX_SIZE KMALLOC_MAX_SIZE
65 #define GNILND_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
66 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
67 #define GNILND_SHIFT_MAX GNILND_SHIFT_HIGH
68 #define GNILND_MBOX_SIZE (1UL << GNILND_SHIFT_MAX)
72 /* tunables determined at compile time */
73 #define GNILND_MIN_TIMEOUT 5 /* minimum timeout interval (seconds) */
74 #define GNILND_TO2KA(t) (((t)-1)/2) /* timeout -> keepalive interval */
75 #define GNILND_MIN_RECONNECT_TO (GNILND_BASE_TIMEOUT/4)
76 #define GNILND_MAX_RECONNECT_TO GNILND_BASE_TIMEOUT
77 #define GNILND_HARDWARE_TIMEOUT 15 /* maximum time for data to travel between nodes */
78 #define GNILND_MDD_TIMEOUT 15 /* MDD hold timeout in minutes */
79 #define GNILND_SCHED_TIMEOUT 1
80 #define GNILND_DGRAM_TIMEOUT 2
81 #define GNILND_FAST_MAPPING_TRY \
82 *kgnilnd_tunables.kgn_max_retransmits /* maximum number to attempt mapping of a tx */
83 #define GNILND_MAP_RETRY_RATE 1 /* interval between mapping attempts in jiffies */
85 /* map failure timeout */
86 #define GNILND_MAP_TIMEOUT \
87 (cfs_time_seconds(*kgnilnd_tunables.kgn_timeout * \
88 *kgnilnd_tunables.kgn_timeout))
90 /* Should we use the no_retry flag with vzalloc */
91 #define GNILND_VZALLOC_RETRY 0
93 /* reaper thread wakup interval */
94 #define GNILND_REAPER_THREAD_WAKE 1
95 /* reaper thread checks each conn NCHECKS time every kgnilnd_data.kgn_new_min_timeout */
96 #define GNILND_REAPER_NCHECKS 4
99 #define GNILND_MAXDEVS 1 /* max # of GNI devices currently supported */
100 #define GNILND_MBOX_CREDITS 256 /* number of credits per mailbox */
101 #define GNILND_CONN_MAGIC 0xa100f /* magic value for verifying connection validity */
102 /* checksum values */
103 #define GNILND_CHECKSUM_OFF 0 /* checksum turned off */
104 #define GNILND_CHECKSUM_SMSG_HEADER 1 /* Only checksum SMSG header */
105 #define GNILND_CHECKSUM_SMSG 2 /* checksum entire SMSG packet */
106 #define GNILND_CHECKSUM_SMSG_BTE 3 /* Full checksum support */
108 /* tune down some COMPUTE options as they won't see the same number of connections and
109 * don't need the throughput of multiple threads by default */
110 #if defined(CONFIG_CRAY_COMPUTE)
112 #define GNILND_SCHED_THREADS 2 /* default # of kgnilnd_scheduler threads */
114 #define GNILND_SCHED_THREADS 1 /* default # of kgnilnd_scheduler threads */
116 #define GNILND_FMABLK 64 /* default number of mboxes per fmablk */
117 #define GNILND_SCHED_NICE 0 /* default nice value for scheduler threads */
118 #define GNILND_COMPUTE 1 /* compute image */
119 #define GNILND_FAST_RECONNECT 1 /* Fast Reconnect option */
120 #define GNILND_DEFAULT_CREDITS 64 /* Default number of simultaneous transmits */
122 #define GNILND_FMABLK 1024 /* default number of mboxes per fmablk */
123 #define GNILND_SCHED_NICE -20 /* default nice value for scheduler threads */
124 #define GNILND_COMPUTE 0 /* service image */
125 #define GNILND_FAST_RECONNECT 0 /* Fast Reconnect option */
126 #define GNILND_DEFAULT_CREDITS 256 /* Default number of simultaneous transmits */
129 /* EXTRA_BITS are there to allow us to hide NOOP/CLOSE and anything else out of band */
130 #define GNILND_EXTRA_BITS 1
131 /* maximum number of conns & bits for cqid in the SMSG event data */
132 #define GNILND_CQID_NBITS (21 - GNILND_EXTRA_BITS)
133 #define GNILND_MSGID_TX_NBITS (32 - GNILND_CQID_NBITS)
134 #define GNILND_MAX_CQID (1 << GNILND_CQID_NBITS)
135 #define GNILND_MAX_MSG_ID (1 << GNILND_MSGID_TX_NBITS)
136 #define GNILND_MAX_MSG_SIZE (*kgnilnd_tunables.kgn_max_immediate + sizeof(kgn_msg_t))
138 /* need sane upper bound to limit copy overhead */
139 #define GNILND_MAX_IMMEDIATE (64<<10)
140 /* allow for 4M transfers over gni. Note 2.5M used by DVS */
141 #define GNILND_MAX_IOV 1024
143 /* Max number of connections to keep in purgatory per peer */
144 #define GNILND_PURGATORY_MAX 5
145 /* Closing, don't put in purgatory */
146 #define GNILND_NOPURG 222
148 /* payload size to add to the base mailbox size
149 * This is subtracting 2 from the concurrent_sends as 4 messages are included in the size
150 * gni_smsg_buff_size_needed calculates, the MAX_PAYLOAD is added to
151 * the calculation return from that function.*/
152 #define GNILND_MBOX_PAYLOAD \
153 (GNILND_MAX_MSG_SIZE * \
154 ((*kgnilnd_tunables.kgn_concurrent_sends - 2) * 2));
156 /* timeout -> deadman timer for kgni mdd holds */
157 #define GNILND_TIMEOUT2DEADMAN ((*kgnilnd_tunables.kgn_mdd_timeout) * 1000 * 60)
159 /* timeout for failing sends in t is in jiffies*/
160 #define GNILND_TIMEOUTRX(t) (t + cfs_time_seconds(*kgnilnd_tunables.kgn_hardware_timeout))
162 /* time when to release from purgatory in the reaper thread in jiffies */
163 #define GNILND_PURG_RELEASE(t) (GNILND_TIMEOUTRX(t) * 3)
165 /* Macro for finding last_rx 2 datapoints are compared
166 * and the most recent one in jiffies is returned.
168 #define GNILND_LASTRX(conn) (time_after(conn->gnc_last_rx, conn->gnc_last_rx_cq) \
169 ? conn->gnc_last_rx : conn->gnc_last_rx_cq)
171 /* fmablk registration failures timeout before failing node */
172 #define GNILND_REGFAILTO_DISABLE -1
174 /************************************************************************
175 * Enum, flag and tag data
177 #define GNILND_INIT_NOTHING 0
178 #define GNILND_INIT_DATA 1
179 #define GNILND_INIT_ALL 2
181 /* If you change the ordering away from MAPPED = UNMAPPED + 1, things break */
182 #define GNILND_BUF_NONE 0 /* buffer type not set */
183 #define GNILND_BUF_IMMEDIATE 1 /* immediate data */
184 #define GNILND_BUF_IMMEDIATE_KIOV 2 /* immediate data */
185 #define GNILND_BUF_PHYS_UNMAPPED 3 /* physical: not mapped yet */
186 #define GNILND_BUF_PHYS_MAPPED 4 /* physical: mapped already */
188 #define GNILND_TX_WAITING_REPLY (1<<1) /* expecting to receive reply */
189 #define GNILND_TX_WAITING_COMPLETION (1<<2) /* waiting for smsg_send to complete */
190 #define GNILND_TX_PENDING_RDMA (1<<3) /* RDMA transaction pending until we get prev. completion */
191 #define GNILND_TX_QUIET_ERROR (1<<4) /* don't print error on tx_done */
192 #define GNILND_TX_FAIL_SMSG (1<<5) /* pass down error injection for SMSG fail */
194 /* stash above max CQID to avoid any collision */
195 #define GNILND_MSGID_NOOP (GNILND_MAX_CQID + 128)
196 #define GNILND_MSGID_CLOSE (GNILND_MSGID_NOOP + 1)
198 /* kgn_msg_t::gnm_type */
199 #define GNILND_MSG_NONE 0x00 /* illegal message */
200 #define GNILND_MSG_NOOP 0x01 /* empty gnm_u (keepalive) */
201 #define GNILND_MSG_IMMEDIATE 0x02 /* gnm_u.immediate */
202 #define GNILND_MSG_PUT_REQ 0x03 /* gnm_u.putreq (src->sink) */
203 #define GNILND_MSG_PUT_NAK 0x04 /* gnm_u.completion (no PUT match: sink->src) */
204 #define GNILND_MSG_PUT_ACK 0x05 /* gnm_u.putack (PUT matched: sink->src) */
205 #define GNILND_MSG_PUT_DONE 0x06 /* gnm_u.completion (src->sink) */
206 #define GNILND_MSG_GET_REQ 0x07 /* gnm_u.get (sink->src) */
207 #define GNILND_MSG_GET_NAK 0x08 /* gnm_u.completion (no GET match: src->sink) */
208 #define GNILND_MSG_GET_DONE 0x09 /* gnm_u.completion (src->sink) */
209 #define GNILND_MSG_CLOSE 0x0a /* empty gnm_u */
210 #define GNILND_MSG_PUT_REQ_REV 0x0b /* gnm_u.get (src->sink) */
211 #define GNILND_MSG_PUT_DONE_REV 0x0c /* gnm_u.completion (sink->src) */
212 #define GNILND_MSG_PUT_NAK_REV 0x0d /* gnm_u.completion (no PUT match: sink->src) */
213 #define GNILND_MSG_GET_REQ_REV 0x0e /* gnm_u.get (sink->src ) */
214 #define GNILND_MSG_GET_ACK_REV 0x0f /* gnm_u.getack (GET matched: src->sink) */
215 #define GNILND_MSG_GET_DONE_REV 0x10 /* gnm_u.completion (sink -> src) */
216 #define GNILND_MSG_GET_NAK_REV 0x11 /* gnm_u.completeion (no GET match: sink -> src) */
218 /* defines for gnc_*scheduled states */
219 #define GNILND_CONN_IDLE 0
220 #define GNILND_CONN_SCHED 1
221 #define GNILND_CONN_WANTS_SCHED 2
222 #define GNILND_CONN_PROCESS 3
224 #define GNILND_DEV_IDLE 0
225 #define GNILND_DEV_IRQ 1
226 #define GNILND_DEV_LOOP 2
228 #define GNILND_DGRAM_IDLE 0
229 #define GNILND_DGRAM_SCHED 1
230 #define GNILND_DGRAM_PROCESS 2
232 #define GNILND_PEER_IDLE 0
233 #define GNILND_PEER_CONNECT 1
234 #define GNILND_PEER_POSTING 2
235 #define GNILND_PEER_POSTED 3
236 #define GNILND_PEER_NEEDS_DEATH 4
237 #define GNILND_PEER_KILL 5
239 /* for gnc_close_recvd */
240 #define GNILND_CLOSE_RX 1
241 #define GNILND_CLOSE_INJECT1 2
242 #define GNILND_CLOSE_INJECT2 3
243 #define GNILND_CLOSE_EARLY 4
245 /* defines for why quiesce trigger set */
246 #define GNILND_QUIESCE_IDLE 0
247 #define GNILND_QUIESCE_ADMIN 1
248 #define GNILND_QUIESCE_RESET 2
249 #define GNILND_QUIESCE_HW_QUIESCE 3
251 #define GNILND_PEER_CLEAN 0
252 #define GNILND_PEER_PERSISTING 1
254 #define GNILND_DEL_CONN 0
255 #define GNILND_DEL_PEER 1
256 #define GNILND_CLEAR_PURGATORY 2
258 #define GNILND_PEER_UP 0
259 #define GNILND_PEER_DOWN 1
260 #define GNILND_PEER_TIMED_OUT 2
261 #define GNILND_PEER_UNKNOWN 3
263 /* defines for reverse RDMA states */
264 #define GNILND_REVERSE_NONE 0
265 #define GNILND_REVERSE_GET 1
266 #define GNILND_REVERSE_PUT 2
267 #define GNILND_REVERSE_BOTH (GNILND_REVERSE_GET | GNILND_REVERSE_PUT)
269 typedef enum kgn_fmablk_state {
270 GNILND_FMABLK_IDLE = 0, /* is allocated or ready to be freed */
271 GNILND_FMABLK_PHYS, /* allocated out of slab of physical memory */
272 GNILND_FMABLK_VIRT, /* 'standard' vmalloc hunk */
273 GNILND_FMABLK_FREED, /* after free */
274 } kgn_fmablk_state_t;
276 typedef enum kgn_tx_list_state {
277 GNILND_TX_IDLE = 0, /* TX is on the idle list, kgn_idle_txs */
278 GNILND_TX_ALLOCD, /* TX has been alloced (off of idle), could be in any state transition */
279 GNILND_TX_PEERQ, /* TX on peer->gnp_tx_queue (no live conn) */
280 GNILND_TX_MAPQ, /* TX on dev:gnd_map_tx for buffer mapping */
281 GNILND_TX_FMAQ, /* TX waiting to be send on conn FMA */
282 GNILND_TX_LIVE_FMAQ, /* TX live on the FMA wire, waiting for completion or reply */
283 GNILND_TX_RDMAQ, /* TX waiting to send FMA confirmation to auth RDMA PUT */
284 GNILND_TX_LIVE_RDMAQ, /* TX live on the RDMA wire, waiting for completion */
285 GNILND_TX_DYING, /* TX got caught on MAPQ or RDMAQ while conn was closing, needs someone to call tx_done */
286 GNILND_TX_FREED /* TX is free! */
287 } kgn_tx_list_state_t;
289 typedef enum kgn_conn_state {
290 /* don't start @ 0 - prevent memset(0) badness */
291 GNILND_CONN_DUMMY = 0,
293 GNILND_CONN_CONNECTING,
294 GNILND_CONN_ESTABLISHED,
298 GNILND_CONN_DESTROY_EP
301 /* changing these requires a change to GNILND_CONNREQ_VERSION and
302 * will result in dropped packets instead of NAKs. Adding to this is
303 * acceptable without changing the CONNREQ_VERSION, but code should
304 * be ready to handle NAKs on version mismatch */
305 typedef enum kgn_connreq_type {
306 GNILND_CONNREQ_REQ = 1, /* how YOU doin' ? */
307 GNILND_CONNREQ_NAK, /* NO soup for you! */
308 GNILND_CONNREQ_CLOSE, /* we should see other people */
309 } kgn_connreq_type_t;
311 typedef enum kgn_dgram_state {
312 /* don't use 0 to avoid thinking a memset of zero is valid data */
313 GNILND_DGRAM_USED = 1,
314 GNILND_DGRAM_POSTING,
316 GNILND_DGRAM_PROCESSING,
317 GNILND_DGRAM_CANCELED,
321 typedef enum kgn_dgram_type {
322 GNILND_DGRAM_REQ = 1, /* how YOU doin' ? */
323 GNILND_DGRAM_WC_REQ, /* you talkin' to ME? */
324 GNILND_DGRAM_NAK, /* NO soup for you! */
325 GNILND_DGRAM_CLOSE, /* we should see other people */
328 /************************************************************************
329 * Wire message structs. These are sent in sender's byte order
330 * (i.e. receiver checks magic and flips if required).
333 #define GNILND_MSG_MAGIC LNET_PROTO_GNI_MAGIC /* unique magic */
334 #define GNILND_DGRAM_MAGIC 0x0DDBA11
336 /* kgn_msg_t - FMA/SMSG wire struct
338 * - added checksum to FMA
339 * moved seq before paylod
340 * __packed added for alignment
342 * added gnm_payload_len for FMA payload size
344 * added gncm_retval to completion, allowing return code transmission
347 * changed how CQID and TX ids are assigned
349 * added retval on CLOSE
351 * added payload checksumming
353 * reworked checksumming a bit, changed payload checksums
355 #define GNILND_MSG_VERSION 8
356 /* kgn_connreq_t connection request datagram wire struct
361 #define GNILND_CONNREQ_VERSION 2
363 typedef struct kgn_gniparams {
364 __u32 gnpr_host_id; /* ph. host ID of the NIC */
365 __u32 gnpr_cqid; /* cqid I want peer to use when sending events to me */
366 gni_smsg_attr_t gnpr_smsg_attr; /* my short msg. attributes */
367 } __packed kgn_gniparams_t;
369 typedef struct kgn_nak_data {
370 __s32 gnnd_errno; /* errno reason for NAK */
372 } __packed kgn_nak_data_t;
374 /* the first bits of the connreq struct CANNOT CHANGE FORM EVER
375 * without breaking the ability for us to properly NAK someone */
376 typedef struct kgn_connreq { /* connection request/response */
377 __u32 gncr_magic; /* I'm an gnilnd connreq */
378 __u32 gncr_cksum; /* checksum (0 == disabled) */
379 __u16 gncr_type; /* REQ, NAK, etc */
380 __u16 gncr_version; /* this is my version number */
381 __u32 gncr_timeout; /* sender's timeout */
382 __u64 gncr_srcnid; /* sender's NID */
383 __u64 gncr_dstnid; /* who sender expects to listen */
384 __u64 gncr_peerstamp; /* sender's instance stamp */
385 __u64 gncr_connstamp; /* sender's connection stamp */
387 /* everything before this needs to stay static, adding after should
388 * result in a change to GNILND_CONNREQ_VERSION */
391 kgn_gniparams_t gncr_gnparams; /* sender's endpoint info */
392 kgn_nak_data_t gncr_nakdata; /* data (rc, etc) for NAK */
394 } __packed kgn_connreq_t;
397 gni_mem_handle_t gnrd_key;
400 } __packed kgn_rdma_desc_t;
403 struct lnet_hdr_nid4 gnim_hdr; /* LNet header */
404 /* LNet payload is in FMA "Message Data" */
405 } __packed kgn_immediate_msg_t;
408 struct lnet_hdr_nid4 gnprm_hdr; /* LNet header */
409 __u64 gnprm_cookie; /* opaque completion cookie */
410 } __packed kgn_putreq_msg_t;
413 __u64 gnpam_src_cookie; /* reflected completion cookie */
414 __u64 gnpam_dst_cookie; /* opaque completion cookie */
415 __u16 gnpam_payload_cksum; /* checksum for get msg */
416 kgn_rdma_desc_t gnpam_desc; /* sender's sink buffer */
417 } __packed kgn_putack_msg_t;
420 struct lnet_hdr_nid4 gngm_hdr; /* LNet header */
421 __u64 gngm_cookie; /* opaque completion cookie */
422 __u16 gngm_payload_cksum; /* checksum for put msg */
423 kgn_rdma_desc_t gngm_desc; /* sender's sink buffer */
424 } __packed kgn_get_msg_t;
427 int gncm_retval; /* error on NAK, size on REQ */
428 __u64 gncm_cookie; /* reflected completion cookie */
429 } __packed kgn_completion_msg_t;
431 typedef struct { /* NB must fit in FMA "Prefix" */
432 __u32 gnm_magic; /* I'm an gni message */
433 __u16 gnm_version; /* this is my version number */
434 __u16 gnm_type; /* msg type */
435 __u64 gnm_srcnid; /* sender's NID */
436 __u64 gnm_connstamp; /* sender's connection stamp */
437 __u32 gnm_seq; /* incrementing sequence number */
438 __u16 gnm_cksum; /* checksum (0 == no checksum ) */
439 __u16 gnm_payload_cksum; /* payload checksum (0 == no checksum ) */
440 __u32 gnm_payload_len; /* size of the FMA payload sent */
442 kgn_immediate_msg_t immediate;
443 kgn_putreq_msg_t putreq;
444 kgn_putack_msg_t putack;
446 kgn_completion_msg_t completion;
448 } __packed kgn_msg_t;
450 /************************************************************************
451 * runtime tunable data
454 typedef struct kgn_tunables {
455 int *kgn_min_reconnect_interval; /* connreq starting timeout & retransmit interval */
456 int *kgn_max_reconnect_interval; /* ...exponentially increasing to this */
457 int *kgn_credits; /* # concurrent sends */
458 int *kgn_fma_cq_size; /* # entries in receive CQ */
459 int *kgn_peer_credits; /* # LNet peer credits */
460 int *kgn_concurrent_sends; /* max # of max_immediate in mbox */
461 int *kgn_timeout; /* comms timeout (seconds) */
462 int *kgn_max_immediate; /* immediate payload breakpoint */
463 int *kgn_checksum; /* checksum data */
464 int *kgn_checksum_dump; /* dump raw data to D_INFO log when checksumming */
465 int *kgn_bte_put_dlvr_mode; /* BTE Put delivery mode */
466 int *kgn_bte_get_dlvr_mode; /* BTE Get delivery mode */
467 int *kgn_bte_relaxed_ordering; /* relaxed ordering (PASSPW) on BTE transfers */
468 int *kgn_ptag; /* PTAG for cdm_create */
469 int *kgn_pkey; /* PKEY for cdm_create */
470 int *kgn_max_retransmits; /* max number of FMA retransmits before entering delay list */
471 int *kgn_nwildcard; /* # wildcard per net to post */
472 int *kgn_nice; /* nice value for kgnilnd threads */
473 int *kgn_rdmaq_intervals; /* # intervals per second for rdmaq throttle */
474 int *kgn_loops; /* # of loops sched does before flush/heartbeat tickle */
475 int *kgn_peer_hash_size; /* size of kgn_peers */
476 int *kgn_peer_health; /* enable/disable peer health */
477 int *kgn_peer_timeout; /* Override of the default peer_timeout used by peer_health */
478 int *kgn_vmap_cksum; /* enable/disable vmap of kiov checksums */
479 int *kgn_mbox_per_block; /* mailboxes per fmablk */
480 int *kgn_nphys_mbox; /* # mailboxes to preallocate with physical memory */
481 int *kgn_mbox_credits; /* max credits per fma */
482 int *kgn_sched_threads; /* number of kgnilnd_scheduler threads */
483 int *kgn_net_hash_size; /* size of kgn_net_ht */
484 int *kgn_hardware_timeout; /* max time for a message to get across the network */
485 int *kgn_mdd_timeout; /* max time for ghal to hold an mdd in minutes */
486 int *kgn_sched_timeout; /* max time for scheduler to run before yielding */
487 int *kgn_dgram_timeout; /* max time for dgram mover to run before scheduling */
488 int *kgn_sched_nice; /* nice value for kgnilnd scheduler threads */
489 int *kgn_reverse_rdma; /* Reverse RDMA setting */
490 int *kgn_eager_credits; /* allocated eager buffers */
491 int *kgn_fast_reconn; /* fast reconnection on conn timeout */
492 int *kgn_efault_lbug; /* LBUG on receiving an EFAULT */
493 int *kgn_max_purgatory; /* # conns/peer to keep in purgatory */
494 int *kgn_reg_fail_timeout; /* registration failure timeout */
495 int *kgn_thread_affinity; /* bind scheduler threads to cpus */
496 int *kgn_to_reconn_disable;/* disable reconnect after timeout */
497 int *kgn_thread_safe; /* use thread safe kgni API */
498 int *kgn_vzalloc_noretry; /* Should we pass the noretry flag */
501 typedef struct kgn_mbox_info {
502 lnet_nid_t mbx_prev_nid;
503 lnet_nid_t mbx_prev_purg_nid;
504 unsigned long mbx_create_conn_memset;
505 unsigned long mbx_add_purgatory;
506 unsigned long mbx_detach_of_purgatory;
507 unsigned long mbx_release_from_purgatory;
508 unsigned long mbx_release_purg_active_dgram;
510 int mbx_nallocs_total;
513 typedef struct kgn_fma_memblock {
514 struct list_head gnm_bufflist; /* memblock is part of device's gnd_fma_buffs */
515 kgn_fmablk_state_t gnm_state; /* how this memory allocated & state of it */
516 int gnm_hold_timeout; /* hold_timeout if used at unmap time */
517 int gnm_num_mboxs; /* total mboxes allocated */
518 int gnm_avail_mboxs; /* number of available mailboxes in the block */
519 int gnm_held_mboxs; /* number of purgatory held mailboxes */
520 int gnm_mbox_size; /* size of the single mailbox */
521 int gnm_next_avail_mbox; /* next available mailbox */
522 long gnm_max_timeout; /* max timeout for possible purgatory hold */
523 unsigned int gnm_blk_size; /* how big is our hunk o memory ? */
524 void *gnm_block; /* pointer to mem. block */
525 gni_mem_handle_t gnm_hndl; /* mem. handle of the block */
526 unsigned long *gnm_bit_array; /* bit array tracking allocation of mailboxes */
527 kgn_mbox_info_t *gnm_mbox_info; /* array of mbox_information about each mbox */
528 } kgn_fma_memblock_t;
530 typedef struct kgn_device {
531 gni_nic_handle_t gnd_handle; /* device handle */
532 gni_cdm_handle_t gnd_domain; /* GNI communication domain */
533 gni_err_handle_t gnd_err_handle; /* device error handle */
534 unsigned long gnd_sched_alive; /* scheduler thread alive stamp */
535 gni_cq_handle_t gnd_rcv_fma_cqh; /* FMA rcv. completion queue handle */
536 gni_cq_handle_t gnd_snd_rdma_cqh; /* rdma send completion queue handle */
537 gni_cq_handle_t gnd_snd_fma_cqh; /* rdma send completion queue handle */
538 struct mutex gnd_cq_mutex; /* CQ access serialization */
539 __u32 gnd_host_id; /* ph. host ID of the NIC */
540 int gnd_id; /* device id, also index in kgn_devices */
541 __u32 gnd_nid; /* ph host ID translated to NID */
542 struct list_head gnd_fma_buffs; /* list of FMA memory blocks */
543 struct mutex gnd_fmablk_mutex; /* mutex for FMA block memory alloc/free */
544 spinlock_t gnd_fmablk_lock; /* lock for mbox alloc/release */
545 atomic_t gnd_nfmablk; /* # of fmablk live */
546 atomic_t gnd_fmablk_vers; /* gnd_fma_bufs stamp */
547 atomic_t gnd_neps; /* # EP allocated to conns */
548 short gnd_ready; /* stuff to do in scheduler thread */
549 struct list_head gnd_ready_conns; /* connections ready to tx/rx */
550 struct list_head gnd_delay_conns; /* connections in need of dla/or smsg credits */
551 struct list_head gnd_map_tx; /* TX: needing buffer mapping */
552 wait_queue_head_t gnd_waitq; /* scheduler wakeup */
553 spinlock_t gnd_lock; /* serialise gnd_ready_conns */
554 struct list_head gnd_connd_peers; /* peers waiting for a connection */
555 spinlock_t gnd_connd_lock; /* serialise connd_peers */
556 wait_queue_head_t gnd_dgram_waitq; /* dgram_mover thread wakeup */
557 wait_queue_head_t gnd_dgping_waitq; /* dgram thread ping-pong */
558 int gnd_dgram_ready; /* dgrams need movin' */
559 struct list_head *gnd_dgrams; /* nid hash to dgrams */
560 atomic_t gnd_ndgrams; /* # dgrams extant */
561 atomic_t gnd_nwcdgrams; /* # wildcard dgrams to post*/
562 spinlock_t gnd_dgram_lock; /* serialize gnd_dgrams */
563 struct list_head gnd_map_list; /* list of all mapped regions */
564 int gnd_map_version; /* version flag for map list */
565 struct timer_list gnd_map_timer; /* wakey-wakey */
566 atomic_t gnd_n_mdd; /* number of total MDD - fma, tx, etc */
567 atomic_t gnd_n_mdd_held; /* number of total MDD held - fma, tx, etc */
568 atomic_t gnd_nq_map; /* # queued waiting for mapping (MDD/GART) */
569 atomic64_t gnd_nbytes_map; /* bytes of total GART maps - fma, tx, etc */
570 __u32 gnd_map_nphys; /* # TX phys mappings */
571 __u32 gnd_map_physnop; /* # TX phys pages mapped */
572 spinlock_t gnd_map_lock; /* serialize gnd_map_XXX */
573 unsigned long gnd_next_map; /* next mapping attempt in jiffies */
574 int gnd_map_attempt; /* last map attempt # */
575 unsigned long gnd_last_map; /* map timeout base */
576 struct list_head gnd_rdmaq; /* RDMA to be sent */
577 spinlock_t gnd_rdmaq_lock; /* play nice with others */
578 atomic64_t gnd_rdmaq_bytes_out; /* # bytes authorized */
579 atomic64_t gnd_rdmaq_bytes_ok; /* # bytes allowed until deadline */
580 atomic_t gnd_rdmaq_nstalls; /* # stalls due to throttle */
581 unsigned long gnd_rdmaq_deadline; /* when does bucket roll over ? */
582 struct timer_list gnd_rdmaq_timer; /* wakey-wakey */
583 atomic_t gnd_short_ntx; /* TX stats: short messages */
584 atomic64_t gnd_short_txbytes; /* TX stats: short message payload*/
585 atomic_t gnd_rdma_ntx; /* TX stats: rdma messages */
586 atomic64_t gnd_rdma_txbytes; /* TX stats: rdma message payload*/
587 atomic_t gnd_short_nrx; /* RX stats: short messages */
588 atomic64_t gnd_short_rxbytes; /* RX stats: short message payload*/
589 atomic_t gnd_rdma_nrx; /* RX stats: rdma messages */
590 atomic64_t gnd_rdma_rxbytes; /* RX stats: rdma message payload*/
591 atomic_t gnd_fast_try; /* # of times fast send tried */
592 atomic_t gnd_fast_ok; /* # of times fast send ok */
593 atomic_t gnd_fast_block; /* # of times fast send blocked */
594 unsigned long gnd_mutex_delay;
595 atomic_t gnd_n_yield;
596 atomic_t gnd_n_schedule;
597 atomic_t gnd_canceled_dgrams; /* # of outstanding cancels */
598 struct rw_semaphore gnd_conn_sem; /* serialize connection changes/data movement */
599 void *gnd_smdd_hold_buf; /* buffer to keep smdd */
600 gni_mem_handle_t gnd_smdd_hold_hndl; /* buffer mem handle */
603 typedef struct kgn_net {
604 struct list_head gnn_list; /* chain on kgni_data::kgn_nets */
605 kgn_device_t *gnn_dev; /* device for this net */
606 struct lnet_ni *gnn_ni; /* network interface instance */
607 atomic_t gnn_refcount; /* # current references */
608 int gnn_shutdown; /* lnd_shutdown set */
609 __u16 gnn_netnum; /* stash netnum for quicker lookup */
612 static inline lnet_nid_t
613 kgnilnd_lnd2lnetnid(lnet_nid_t ni_nid, lnet_nid_t kgnilnd_nid)
615 return LNET_MKNID(LNET_NIDNET(ni_nid), LNET_NIDADDR(kgnilnd_nid));
618 static inline lnet_nid_t
619 kgnilnd_lnet2lndnid(lnet_nid_t lnet_nid, lnet_nid_t kgnilnd_nid)
621 return LNET_MKNID(LNET_NIDNET(kgnilnd_nid), LNET_NIDADDR(lnet_nid));
624 /* The code for this is a bit ugly - but really this just boils down to a __u64
625 * that can have various parts accessed separately.
627 * The lower 32 bits is the ID
628 * we give to SMSG for our completion event - it needs to be globally unique across
629 * all TX currently in flight. We separate that out into the CQID so that we can
630 * reference the connection (kgnilnd_cqid2conn_locked) and then the msg_id to pull
631 * the actual TX out of the per-connection gnc_tx_ref_table.
633 * The upper 32 bits are just extra stuff we put into the cookie to ensure this TX
634 * has a unique value we can send with RDMA setup messages to ensure the completion for
635 * those is unique across the wire. The extra 32 bits are there to ensure that TX id
636 * reuse is separated.
639 typedef struct kgn_tx_ev_id {
641 __u64 txe_cookie; /* are you my mommy ? */
643 __u32 txe_chips; /* extra bits to ensure ID unique across reuse */
645 __u32 txe_smsg_id; /* ID for SMSG CQ event */
646 /* N.B: Never ever ever ever use the bit shifts directly,
647 * you are just asking for a world of pain and are at the
648 * mercy of the compiler layouts */
650 __u32 txe_cqid :GNILND_CQID_NBITS;
651 __u32 txe_idx :GNILND_MSGID_TX_NBITS;
658 typedef struct kgn_dgram {
659 struct list_head gndg_list; /* on hash dev::gnd_dgrams */
660 kgn_dgram_state_t gndg_state; /* state of this dgram */
661 kgn_dgram_type_t gndg_type; /* REQ, NAK, etc */
662 __u32 gndg_magic; /* saftey word */
663 unsigned long gndg_post_time; /* time when we posted */
664 struct kgn_conn *gndg_conn; /* unbound conn with ep & smsg */
665 kgn_connreq_t gndg_conn_out; /* connreq from local node */
666 kgn_connreq_t gndg_conn_in; /* connreq from remote node */
669 typedef struct kgn_tx { /* message descriptor */
670 struct list_head tx_list; /* TX queues - peer, conn, rdma */
671 kgn_tx_list_state_t tx_list_state;/* where in state machine is this TX ? */
672 struct list_head *tx_list_p; /* pointer to current list */
673 struct kgn_conn *tx_conn; /* owning conn */
674 struct lnet_msg *tx_lntmsg[2]; /* ptl msgs to finalize on completion */
675 unsigned long tx_qtime; /* when tx started to wait for something (jiffies) */
676 unsigned long tx_cred_wait; /* time spend waiting for smsg creds */
677 struct list_head tx_map_list; /* list entry on device map list */
678 unsigned int tx_nob; /* # bytes of payload */
679 int tx_buftype; /* payload buffer type */
680 int tx_phys_npages; /* # physical pages */
681 gni_mem_handle_t tx_map_key; /* mapping key */
682 gni_mem_handle_t tx_buffer_copy_map_key; /* mapping key for page aligned copy */
683 gni_mem_segment_t *tx_phys; /* page descriptors */
684 kgn_msg_t tx_msg; /* FMA message buffer */
685 kgn_tx_ev_id_t tx_id; /* who are you, who ? who ? */
686 __u8 tx_state; /* state of the descriptor */
687 int tx_retrans; /* retrans count of RDMA */
688 int tx_rc; /* if we need to stash the ret code until we see completion */
689 void *tx_buffer; /* source/sink buffer */
690 void *tx_buffer_copy; /* pointer to page aligned buffer */
691 unsigned int tx_nob_rdma; /* nob actually rdma */
692 unsigned int tx_offset; /* offset of data into copied buffer */
694 gni_post_descriptor_t tx_rdma_desc; /* rdma descriptor */
695 struct page *tx_imm_pages[GNILND_MAX_IMMEDIATE/PAGE_SIZE]; /* page array to map kiov for immediate send */
698 /* we only use one or the other */
700 kgn_putack_msg_t tx_putinfo; /* data for differed rdma & re-try */
701 kgn_get_msg_t tx_getinfo; /* data for rdma re-try*/
705 typedef struct kgn_conn {
706 kgn_device_t *gnc_device; /* which device */
707 struct kgn_peer *gnc_peer; /* owning peer */
708 int gnc_magic; /* magic value cleared before free */
709 struct list_head gnc_list; /* stash on peer's conn list - or pending purgatory lists as we clear them */
710 struct list_head gnc_hashlist; /* stash in connection hash table */
711 struct list_head gnc_schedlist; /* schedule (on gnd_?_conns) for attention */
712 struct list_head gnc_fmaq; /* txs queued for FMA */
713 struct list_head gnc_mdd_list; /* hold list for MDD on hard conn reset */
714 struct list_head gnc_delaylist; /* If on this list schedule anytime we get interrupted */
715 __u64 gnc_peerstamp; /* peer's unique stamp */
716 __u64 gnc_peer_connstamp; /* peer's unique connection stamp */
717 __u64 gnc_my_connstamp; /* my unique connection stamp */
718 unsigned long gnc_first_rx; /* when I first received an FMA message (jiffies) */
719 unsigned long gnc_last_tx; /* when I last sent an FMA message (jiffies) */
720 unsigned long gnc_last_rx; /* when I last sent an FMA message (jiffies) */
721 unsigned long gnc_last_tx_cq; /* when I last received an FMA CQ (jiffies) */
722 unsigned long gnc_last_rx_cq; /* when I last received an FMA CQ (jiffies) */
723 unsigned long gnc_last_noop_want; /* time I wanted to send NOOP */
724 unsigned long gnc_last_noop_sent; /* time I did gni_smsg_send on NOOP */
725 unsigned long gnc_last_noop_cq; /* time when NOOP completed */
726 unsigned long gnc_last_sched_ask; /* time when conn added to ready_conns */
727 unsigned long gnc_last_sched_do; /* time when conn processed from ready_conns */
728 atomic_t gnc_reaper_noop; /* # reaper triggered NOOP */
729 atomic_t gnc_sched_noop; /* # sched triggered NOOP */
730 unsigned int gnc_timeout; /* infer peer death if no rx for this many seconds */
731 __u32 gnc_cqid; /* my completion callback id (non-unique) */
732 atomic_t gnc_tx_seq; /* tx msg sequence number */
733 atomic_t gnc_rx_seq; /* rx msg sequence number */
734 struct mutex gnc_smsg_mutex; /* tx smsg sequence serialization */
735 struct mutex gnc_rdma_mutex; /* tx rdma sequence serialization */
736 __u64 gnc_tx_retrans; /* # retrans on SMSG */
737 atomic_t gnc_nlive_fma; /* # live FMA */
738 atomic_t gnc_nq_rdma; /* # queued (on device) RDMA */
739 atomic_t gnc_nlive_rdma; /* # live RDMA */
740 short gnc_close_sent; /* I've sent CLOSE */
741 short gnc_close_recvd; /* I've received CLOSE */
742 short gnc_in_purgatory; /* in the sin bin */
743 int gnc_error; /* errno when conn being closed due to error */
744 int gnc_peer_error; /* errno peer sent us on CLOSE */
745 kgn_conn_state_t gnc_state; /* connection state */
746 int gnc_scheduled; /* being attented to */
747 char gnc_sched_caller[30]; /* what function last called schedule */
748 int gnc_sched_line; /* what line # last called schedule */
749 atomic_t gnc_refcount; /* # users */
750 spinlock_t gnc_list_lock; /* serialise tx lists, max_rx_age */
751 gni_ep_handle_t gnc_ephandle; /* GNI endpoint */
752 kgn_fma_memblock_t *gnc_fma_blk; /* pointer to fma block for our mailbox */
753 gni_smsg_attr_t gnpr_smsg_attr; /* my short msg. attributes */
754 spinlock_t gnc_tx_lock; /* protect tx alloc/free */
755 unsigned long gnc_tx_bits[(GNILND_MAX_MSG_ID/8)/sizeof(unsigned long)]; /* bit table for tx id */
756 int gnc_next_tx; /* next tx to use in tx_ref_table */
757 kgn_tx_t **gnc_tx_ref_table; /* table of TX descriptors for this conn */
758 int gnc_mbox_id; /* id of mbox in fma_blk */
759 short gnc_needs_detach; /* flag set in detach_purgatory_all_locked so reaper will clear out purgatory */
760 short gnc_needs_closing; /* flag set in del_conns when called from kgnilnd_del_peer_or_conn */
761 atomic_t gnc_tx_in_use; /* # of tx's currently in use by another thread use kgnilnd_peer_conn_lock */
762 kgn_dgram_type_t gnc_dgram_type; /* save dgram type used to establish this conn */
763 void *remote_mbox_addr; /* save remote mbox address */
766 typedef struct kgn_mdd_purgatory {
767 gni_mem_handle_t gmp_map_key; /* mapping key */
768 struct list_head gmp_list; /* entry point for purgatory list */
769 } kgn_mdd_purgatory_t;
771 typedef struct kgn_peer {
772 struct list_head gnp_list; /* stash on global peer list */
773 struct list_head gnp_connd_list; /* schedule on kgn_connd_peers */
774 struct list_head gnp_conns; /* all active connections and all conns in purgatory for the peer */
775 struct list_head gnp_tx_queue; /* msgs waiting for a conn */
776 kgn_net_t *gnp_net; /* net instance for this peer */
777 lnet_nid_t gnp_nid; /* who's on the other end(s) */
778 atomic_t gnp_refcount; /* # users */
779 __u32 gnp_host_id; /* ph. host ID of the peer */
780 short gnp_connecting; /* connection forming */
781 short gnp_pending_unlink; /* need last conn close to trigger unlink */
782 int gnp_last_errno; /* last error conn saw */
783 time64_t gnp_last_alive; /* last time I had valid comms */
784 int gnp_last_dgram_errno; /* last error dgrams saw */
785 unsigned long gnp_last_dgram_time; /* last time I tried to connect */
786 unsigned long gnp_reconnect_time; /* get_seconds() when reconnect OK */
787 unsigned long gnp_reconnect_interval; /* exponential backoff */
788 atomic_t gnp_dirty_eps; /* # of old but yet to be destroyed EPs from conns */
789 int gnp_state; /* up/down/timedout */
790 unsigned long gnp_down_event_time; /* time peer down */
791 unsigned long gnp_up_event_time; /* time peer back up */
794 /* the kgn_rx_t is a struct for handing to LNET as the private pointer for things
795 * like lnet_parse. It allows a single pointer to let us get enough
796 * information in _recv and friends */
797 typedef struct kgn_rx {
798 kgn_conn_t *grx_conn; /* connection */
799 kgn_msg_t *grx_msg; /* message */
800 struct lnet_msg *grx_lntmsg; /* lnet msg for this rx (eager only) */
801 int grx_eager; /* if eager, we copied msg to somewhere */
802 struct timespec64 grx_received; /* time this msg received */
805 typedef struct kgn_data {
806 int kgn_init; /* initialisation state */
807 int kgn_shutdown; /* shut down? */
808 int kgn_wc_kill; /* Should I repost the WC */
809 atomic_t kgn_nthreads; /* # live threads */
810 int kgn_nresets; /* number of stack resets */
811 int kgn_in_reset; /* are we in stack reset ? */
813 __u64 kgn_nid_trans_private;/* private data for each of the HW nid2nic arenas */
815 kgn_device_t kgn_devices[GNILND_MAXDEVS]; /* device/ptag/cq etc */
816 int kgn_ndevs; /* # devices */
818 int kgn_ruhroh_running; /* ruhroh thread is running */
819 int kgn_ruhroh_shutdown; /* ruhroh thread should or is shut down */
820 wait_queue_head_t kgn_ruhroh_waitq; /* ruhroh thread wakeup */
821 int kgn_quiesce_trigger; /* should we quiesce ? */
822 atomic_t kgn_nquiesce; /* how many quiesced ? */
823 struct mutex kgn_quiesce_mutex; /* serialize ruhroh task, startup and shutdown */
824 int kgn_needs_reset; /* we need stack reset */
826 /* These next three members implement communication from gnilnd into
827 * the ruhroh task. To ensure correct operation of the task, code that
828 * writes into them must use memory barriers to ensure that the changes
829 * are visible to other cores in the order the members appear below. */
830 __u32 kgn_quiesce_secs; /* seconds to bump timeouts */
831 int kgn_bump_info_rdy; /* we have info needed to bump */
832 int kgn_needs_pause; /* we need to pause for network quiesce */
834 struct list_head *kgn_nets; /* hashtable of kgn_net instances */
835 struct rw_semaphore kgn_net_rw_sem; /* serialise gnn_shutdown, kgn_nets */
837 rwlock_t kgn_peer_conn_lock; /* stabilize peer/conn ops */
838 struct list_head *kgn_peers; /* hash table of all my known peers */
839 atomic_t kgn_npeers; /* # peers extant */
840 int kgn_peer_version; /* version flag for peer tables */
842 struct list_head *kgn_conns; /* conns hashed by cqid */
843 atomic_t kgn_nconns; /* # connections extant */
844 atomic_t kgn_neager_allocs; /* # of eager allocations */
845 __u64 kgn_peerstamp; /* when I started up */
846 __u64 kgn_connstamp; /* conn stamp generator */
847 int kgn_conn_version; /* version flag for conn tables */
848 int kgn_next_cqid; /* cqid generator */
850 long kgn_new_min_timeout; /* minimum timeout on any new conn */
851 wait_queue_head_t kgn_reaper_waitq; /* reaper sleeps here */
852 spinlock_t kgn_reaper_lock; /* serialise */
854 struct kmem_cache *kgn_rx_cache; /* rx descriptor space */
855 struct kmem_cache *kgn_tx_cache; /* tx descriptor memory */
856 struct kmem_cache *kgn_tx_phys_cache; /* tx phys descriptor memory */
857 atomic_t kgn_ntx; /* # tx in use */
858 struct kmem_cache *kgn_dgram_cache; /* outgoing datagrams */
860 struct page ***kgn_cksum_map_pages; /* page arrays for mapping pages on checksum */
861 __u64 kgn_cksum_npages; /* # pages alloc'd for checksumming */
862 atomic_t kgn_nvmap_cksum; /* # times we vmapped for checksums */
863 atomic_t kgn_nvmap_short; /* # times we vmapped for short kiov */
865 atomic_t kgn_nkmap_short; /* # time we kmapped for a short kiov */
866 long kgn_rdmaq_override; /* bytes per second override */
868 struct kmem_cache *kgn_mbox_cache; /* mailboxes from not-GART */
870 atomic_t kgn_npending_unlink; /* # of peers pending unlink */
871 atomic_t kgn_npending_conns; /* # of conns with pending closes */
872 atomic_t kgn_npending_detach; /* # of conns with a pending detach */
873 unsigned long kgn_last_scheduled; /* last time schedule was called */
874 unsigned long kgn_last_condresched; /* last time cond_resched was called */
875 atomic_t kgn_rev_offset; /* # of REV rdma w/misaligned offsets */
876 atomic_t kgn_rev_length; /* # of REV rdma have misaligned len */
877 atomic_t kgn_rev_copy_buff; /* # of REV rdma buffer copies */
878 unsigned long free_pages_limit; /* # of free pages reserve from fma block allocations */
879 int kgn_enable_gl_mutex; /* kgni api mtx enable */
882 extern kgn_data_t kgnilnd_data;
883 extern kgn_tunables_t kgnilnd_tunables;
885 extern void kgnilnd_destroy_peer(kgn_peer_t *peer);
886 extern void kgnilnd_destroy_conn(kgn_conn_t *conn);
887 extern int _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held);
888 extern int _kgnilnd_schedule_delay_conn(kgn_conn_t *conn);
890 static inline int kgnilnd_timeout(void)
892 return *kgnilnd_tunables.kgn_timeout ?: lnet_get_lnd_timeout();
895 /* Macro wrapper for _kgnilnd_schedule_conn. This will store the function
896 * and the line of the calling function to allow us to debug problematic
897 * schedule calls in the future without the programmer having to mark
898 * the location manually.
900 #define kgnilnd_schedule_conn(conn) \
901 _kgnilnd_schedule_conn(conn, __func__, __LINE__, 0, 0);
903 #define kgnilnd_schedule_conn_refheld(conn, refheld) \
904 _kgnilnd_schedule_conn(conn, __func__, __LINE__, refheld, 0);
906 #define kgnilnd_schedule_conn_nolock(conn) \
907 _kgnilnd_schedule_conn(conn, __func__, __LINE__, 0, 1);
910 /* Macro wrapper for _kgnilnd_schedule_delay_conn. This will allow us to store
911 * extra data if we need to.
913 #define kgnilnd_schedule_delay_conn(conn) \
914 _kgnilnd_schedule_delay_conn(conn);
917 kgnilnd_thread_fini(void)
919 atomic_dec(&kgnilnd_data.kgn_nthreads);
922 static inline void kgnilnd_gl_mutex_lock(struct mutex *lock)
924 if (kgnilnd_data.kgn_enable_gl_mutex)
928 static inline void kgnilnd_gl_mutex_unlock(struct mutex *lock)
930 if (kgnilnd_data.kgn_enable_gl_mutex)
934 static inline void kgnilnd_conn_mutex_lock(struct mutex *lock)
936 if (!kgnilnd_data.kgn_enable_gl_mutex)
940 static inline void kgnilnd_conn_mutex_unlock(struct mutex *lock)
942 if (!kgnilnd_data.kgn_enable_gl_mutex)
946 /* like mutex_trylock but with a jiffies spinner. This is to allow certain
947 * parts of the code to avoid a scheduler trip when the mutex is held
949 * Try to acquire the mutex atomically for 1 jiffie. Returns 1 if the mutex
950 * has been acquired successfully, and 0 on contention.
952 * NOTE: this function follows the spin_trylock() convention, so
953 * it is negated to the down_trylock() return values! Be careful
954 * about this when converting semaphore users to mutexes.
956 * This function must not be used in interrupt context. The
957 * mutex must be released by the same task that acquired it.
959 static inline int __kgnilnd_mutex_trylock(struct mutex *lock)
962 unsigned long timeout;
964 LASSERT(!in_interrupt());
966 for (timeout = jiffies + 1; time_before(jiffies, timeout);) {
968 ret = mutex_trylock(lock);
975 static inline int kgnilnd_mutex_trylock(struct mutex *lock)
977 if (!kgnilnd_data.kgn_enable_gl_mutex)
980 return __kgnilnd_mutex_trylock(lock);
983 static inline int kgnilnd_trylock(struct mutex *cq_lock,
984 struct mutex *c_lock)
986 if (kgnilnd_data.kgn_enable_gl_mutex)
987 return __kgnilnd_mutex_trylock(cq_lock);
989 return __kgnilnd_mutex_trylock(c_lock);
992 static inline void *kgnilnd_vzalloc(int size)
995 if (*kgnilnd_tunables.kgn_vzalloc_noretry)
996 ret = __ll_vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_ZERO |
999 ret = __ll_vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_ZERO);
1001 LIBCFS_ALLOC_POST(ret, size, "alloc");
1005 static inline void kgnilnd_vfree(void *ptr, int size)
1007 LIBCFS_FREE_PRE(ptr, size, "vfree");
1011 /* as of kernel version 4.2, set_mb is replaced with smp_store_mb */
1013 #define set_mb smp_store_mb
1016 /* Copied from DEBUG_REQ in Lustre - the dance is needed to save stack space */
1019 _kgnilnd_debug_msg(kgn_msg_t *msg,
1020 struct libcfs_debug_msg_data *data, const char *fmt, ... );
1022 #define kgnilnd_debug_msg(msgdata, mask, cdls, msg, fmt, a...) \
1024 if (((mask) & D_CANTMASK) != 0 || \
1025 ((libcfs_debug & (mask)) != 0 && \
1026 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1027 _kgnilnd_debug_msg((msg), msgdata, fmt, ##a); \
1030 /* for most callers (level is a constant) this is resolved at compile time */
1031 #define GNIDBG_MSG(level, msg, fmt, args...) \
1033 if ((level) & (D_ERROR | D_WARNING | D_NETERROR)) { \
1034 static struct cfs_debug_limit_state cdls; \
1035 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1036 kgnilnd_debug_msg(&msgdata, level, &cdls, msg, \
1037 "$$ "fmt" from %s ", ## args, \
1038 libcfs_nid2str((msg)->gnm_srcnid)); \
1040 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1041 kgnilnd_debug_msg(&msgdata, level, NULL, msg, \
1042 "$$ "fmt" from %s ", ## args, \
1043 libcfs_nid2str((msg)->gnm_srcnid)); \
1047 /* user puts 'to nid' in msg for us */
1048 #define GNIDBG_TOMSG(level, msg, fmt, args...) \
1050 if ((level) & (D_ERROR | D_WARNING | D_NETERROR)) { \
1051 static struct cfs_debug_limit_state cdls; \
1052 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1053 kgnilnd_debug_msg(&msgdata, level, &cdls, msg, \
1054 "$$ "fmt" ", ## args); \
1056 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1057 kgnilnd_debug_msg(&msgdata, level, NULL, msg, \
1058 "$$ "fmt" ", ## args); \
1063 _kgnilnd_debug_conn(kgn_conn_t *conn,
1064 struct libcfs_debug_msg_data *data, const char *fmt, ... );
1066 #define kgnilnd_debug_conn(msgdata, mask, cdls, conn, fmt, a...) \
1068 if (((mask) & D_CANTMASK) != 0 || \
1069 ((libcfs_debug & (mask)) != 0 && \
1070 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1071 _kgnilnd_debug_conn((conn), msgdata, fmt, ##a); \
1074 /* for most callers (level is a constant) this is resolved at compile time */
1075 #define GNIDBG_CONN(level, conn, fmt, args...) \
1077 if ((level) & (D_ERROR | D_WARNING | D_NETERROR)) { \
1078 static struct cfs_debug_limit_state cdls; \
1079 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1080 kgnilnd_debug_conn(&msgdata, level, &cdls, conn, \
1081 "$$ "fmt" ", ## args); \
1083 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1084 kgnilnd_debug_conn(&msgdata, level, NULL, conn, \
1085 "$$ "fmt" ", ## args); \
1090 _kgnilnd_debug_tx(kgn_tx_t *tx,
1091 struct libcfs_debug_msg_data *data, const char *fmt, ... );
1093 #define kgnilnd_debug_tx(msgdata, mask, cdls, tx, fmt, a...) \
1095 if (((mask) & D_CANTMASK) != 0 || \
1096 ((libcfs_debug & (mask)) != 0 && \
1097 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1098 _kgnilnd_debug_tx((tx), msgdata, fmt, ##a); \
1101 /* for most callers (level is a constant) this is resolved at compile time */
1102 #define GNIDBG_TX(level, tx, fmt, args...) \
1104 if ((level) & (D_ERROR | D_WARNING | D_NETERROR)) { \
1105 static struct cfs_debug_limit_state cdls; \
1106 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1107 kgnilnd_debug_tx(&msgdata, level, &cdls, tx, \
1108 "$$ "fmt" ", ## args); \
1110 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1111 kgnilnd_debug_tx(&msgdata, level, NULL, tx, \
1112 "$$ "fmt" ", ## args); \
1116 #define GNITX_ASSERTF(tx, cond, fmt, a...) \
1118 if (unlikely(!(cond))) { \
1119 GNIDBG_TX(D_EMERG, tx, "ASSERTION(" #cond ") failed:" fmt, a); \
1124 #define GNILND_IS_QUIESCED \
1125 (atomic_read(&kgnilnd_data.kgn_nquiesce) == \
1126 atomic_read(&kgnilnd_data.kgn_nthreads))
1128 #define KGNILND_SPIN_QUIESCE \
1130 /* E.T phone home */ \
1131 atomic_inc(&kgnilnd_data.kgn_nquiesce); \
1132 CDEBUG(D_NET, "Waiting for thread pause to be over...\n"); \
1133 while (kgnilnd_data.kgn_quiesce_trigger) { \
1134 msleep_interruptible(MSEC_PER_SEC); \
1136 /* Mom, my homework is done */ \
1137 CDEBUG(D_NET, "Waking up from thread pause\n"); \
1138 atomic_dec(&kgnilnd_data.kgn_nquiesce); \
1141 /* use macros for addref/decref to get the calling function name in the CDEBUG */
1142 #ifndef LIBCFS_DEBUG
1143 #error "this code uses actions inside LASSERT for ref counting"
1146 #define kgnilnd_admin_addref(atomic) \
1148 int val = atomic_inc_return(&atomic); \
1149 LASSERTF(val > 0, #atomic " refcount %d\n", val); \
1150 CDEBUG(D_NETTRACE, #atomic " refcount %d\n", val); \
1153 #define kgnilnd_admin_decref(atomic) \
1155 int val = atomic_dec_return(&atomic); \
1156 LASSERTF(val >= 0, #atomic " refcount %d\n", val); \
1157 CDEBUG(D_NETTRACE, #atomic " refcount %d\n", val); \
1159 wake_up_var(&kgnilnd_data); \
1162 #define kgnilnd_net_addref(net) \
1164 int val = atomic_inc_return(&net->gnn_refcount); \
1165 LASSERTF(val > 1, "net %px refcount %d\n", net, val); \
1166 CDEBUG(D_NETTRACE, "net %p->%s++ (%d)\n", net, \
1167 libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
1170 #define kgnilnd_net_decref(net) \
1172 int val = atomic_dec_return(&net->gnn_refcount); \
1173 LASSERTF(val >= 0, "net %px refcount %d\n", net, val); \
1174 CDEBUG(D_NETTRACE, "net %p->%s-- (%d)\n", net, \
1175 libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
1178 #define kgnilnd_peer_addref(peer) \
1180 int val = atomic_inc_return(&peer->gnp_refcount); \
1181 LASSERTF(val > 1, "peer %px refcount %d\n", peer, val); \
1182 CDEBUG(D_NETTRACE, "peer %p->%s++ (%d)\n", peer, \
1183 libcfs_nid2str(peer->gnp_nid), val); \
1186 #define kgnilnd_peer_decref(peer) \
1188 int val = atomic_dec_return(&peer->gnp_refcount); \
1189 LASSERTF(val >= 0, "peer %px refcount %d\n", peer, val); \
1190 CDEBUG(D_NETTRACE, "peer %p->%s--(%d)\n", peer, \
1191 libcfs_nid2str(peer->gnp_nid), val); \
1193 kgnilnd_destroy_peer(peer); \
1196 #define kgnilnd_conn_addref(conn) \
1201 val = atomic_inc_return(&conn->gnc_refcount); \
1202 LASSERTF(val > 1 && conn->gnc_magic == GNILND_CONN_MAGIC, \
1203 "conn %px refc %d to %s\n", \
1206 ? libcfs_nid2str(conn->gnc_peer->gnp_nid) \
1208 CDEBUG(D_NETTRACE, "conn %p->%s++ (%d)\n", conn, \
1210 ? libcfs_nid2str(conn->gnc_peer->gnp_nid) \
1215 /* we hijack conn_decref && gnc_refcount = 1 to allow us to push the conn
1216 * through the scheduler thread to get the EP destroyed. This avoids some
1217 * messy semaphore business and allows us to reuse the connd_list and existing
1218 * linkage and avoid creating extra lists just for destroying EPs */
1220 /* Safety Disclaimer:
1221 * Q: If we decrement the refcount and then check it again, is it possible that
1222 * another caller could have passed through this macro concurrently? If so,
1223 * then it is possible that both will attempt to call kgnilnd_destroy_conn().
1225 * A: Yes, entirely possible in most cases, but we can't get concurrent users
1226 * once we are refcount <= 2. It hinges around gnc_state and membership of
1227 * gnc_hashlist. There are two ways to find a connection - either ask for
1228 * it from the peer, kgnilnd_find_conn_locked(peer) or from the CQ id,
1229 * kgnilnd_cqid2conn_locked(id). While a conn is live, we'll have at least
1232 * - #1 from create (kgnilnd_create_conn)
1233 * - #2 for EP (kgnilnd_create_conn)
1234 * - #3 - living on peer (gnc_list, kgnilnd_finish_connect)
1235 * - #4 living in global hash (gnc_hashlist, kgnilnd_finish_connect).
1237 * Actually, only 3 live, as at the end of kgnilnd_finish_connect, we drop:
1238 * - #1 - the ref the dgram inherited from kgnilnd_create_conn.
1240 * There could be more from TX descriptors during the lifetime of a live
1243 * If we nuke the conn before finish_connect, we won't have parallel paths
1244 * because nobody besides the dgram handler for the single outstanding
1245 * dgram can find the connection as it isn't in any searchable tables yet.
1247 * This leaves connection close, we'll drop 2 refs (#4 and #3) but only
1248 * after calling kgnilnd_schedule_conn, which would add a new ref (#5). At
1249 * this point gnc_refcount=2 (#2, #5). We have a 'maybe' send of the CLOSE
1250 * now on the next scheduler loop, this could be #6 (schedule_conn again)
1251 * and #7 (TX on gnc_fmaq). Both would be cleared quickly as that TX is
1252 * sent. Now the gnc_state == CLOSED, so we hit
1253 * kgnilnd_complete_closed_conn. At this point, nobody can 'find' this conn
1254 * - we've nuked them from the peer and CQ id tables, so we own them and
1255 * are guaranteed serial access - hence the complete lack of conn list
1256 * locking in kgnilnd_complete_closed_conn. We are free then to mark the
1257 * conn DESTROY_EP (add #6 for schedule_conn), then lose #5 in
1258 * kgnilnd_process_conns. Then the next scheduler loop would call
1259 * kgnilnd_destroy_conn_ep (drop #2 for EP) and lose #6 (refcount=0) in
1260 * kgnilnd_process_conns.
1262 * Clearly, we are totally safe. Clearly.
1265 #define kgnilnd_conn_decref(conn) \
1270 val = atomic_dec_return(&conn->gnc_refcount); \
1271 LASSERTF(val >= 0, "conn %px refc %d to %s\n", \
1274 ? libcfs_nid2str(conn->gnc_peer->gnp_nid) \
1276 CDEBUG(D_NETTRACE, "conn %p->%s-- (%d)\n", conn, \
1278 ? libcfs_nid2str(conn->gnc_peer->gnp_nid) \
1283 (conn->gnc_ephandle != NULL) && \
1284 (conn->gnc_state != GNILND_CONN_DESTROY_EP)) { \
1285 set_mb(conn->gnc_state, GNILND_CONN_DESTROY_EP); \
1286 kgnilnd_schedule_conn(conn); \
1287 } else if (val == 0) { \
1288 kgnilnd_destroy_conn(conn); \
1292 static inline struct list_head *
1293 kgnilnd_nid2peerlist(lnet_nid_t nid)
1295 unsigned int hash = ((unsigned int)LNET_NIDADDR(nid)) % *kgnilnd_tunables.kgn_peer_hash_size;
1297 RETURN(&kgnilnd_data.kgn_peers[hash]);
1300 static inline struct list_head *
1301 kgnilnd_netnum2netlist(__u16 netnum)
1303 unsigned int hash = ((unsigned int) netnum) % *kgnilnd_tunables.kgn_net_hash_size;
1305 RETURN(&kgnilnd_data.kgn_nets[hash]);
1309 kgnilnd_peer_active(kgn_peer_t *peer)
1311 /* Am I in the peer hash table? */
1312 return (!list_empty(&peer->gnp_list));
1315 /* need write_lock on kgn_peer_conn_lock */
1317 kgnilnd_can_unlink_peer_locked(kgn_peer_t *peer)
1319 CDEBUG(D_NET, "peer 0x%p->%s conns? %d tx? %d\n",
1320 peer, libcfs_nid2str(peer->gnp_nid),
1321 !list_empty(&peer->gnp_conns),
1322 !list_empty(&peer->gnp_tx_queue));
1324 /* kgn_peer_conn_lock protects us from conflict with
1325 * kgnilnd_peer_notify and gnp_persistent */
1326 RETURN ((list_empty(&peer->gnp_conns)) &&
1327 (list_empty(&peer->gnp_tx_queue)));
1330 /* returns positive if error was for a clean shutdown of conn */
1332 kgnilnd_conn_clean_errno(int errno)
1334 /* - ESHUTDOWN - LND is unloading
1335 * - EUCLEAN - admin requested via "lctl del_peer"
1336 * - ENETRESET - admin requested via "lctl disconnect" or rca event
1337 * - ENOTRECOVERABLE - stack reset
1338 * - EISCONN - cleared via "lctl push"
1339 * not doing ESTALE - that isn't clean */
1340 RETURN ((errno == 0) ||
1341 (errno == -ESHUTDOWN) ||
1342 (errno == -EUCLEAN) ||
1343 (errno == -ENETRESET) ||
1344 (errno == -EISCONN) ||
1345 (errno == -ENOTRECOVERABLE));
1348 /* returns positive if error results in purgatory hold */
1350 kgnilnd_check_purgatory_errno(int errno)
1352 /* We don't want to save the purgatory lists these cases:
1353 * - EUCLEAN - admin requested via "lctl del_peer"
1354 * - ESHUTDOWN - LND is unloading
1356 RETURN ((errno != -ESHUTDOWN) &&
1357 (errno != -EUCLEAN));
1361 /* returns positive if a purgatory hold is needed */
1363 kgnilnd_check_purgatory_conn(kgn_conn_t *conn)
1367 if (conn->gnc_peer) {
1368 loopback = conn->gnc_peer->gnp_nid ==
1369 lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
1371 /* short circuit - a conn that didn't complete
1372 * setup never needs a purgatory hold */
1375 CDEBUG(D_NETTRACE, "conn 0x%p->%s loopback %d close_recvd %d\n",
1376 conn, conn->gnc_peer ?
1377 libcfs_nid2str(conn->gnc_peer->gnp_nid) :
1379 loopback, conn->gnc_close_recvd);
1381 /* we only use a purgatory hold if we've not received the CLOSE msg
1382 * from our peer - without that message, we can't know the state of
1383 * the other end of this connection and must put it into purgatory
1384 * to prevent reuse and corruption.
1385 * The theory is that a TX error can be communicated in all other cases
1387 RETURN(likely(!loopback) && !conn->gnc_close_recvd &&
1388 kgnilnd_check_purgatory_errno(conn->gnc_error));
1391 static inline const char *
1392 kgnilnd_tx_state2str(kgn_tx_list_state_t state);
1394 static inline struct list_head *
1395 kgnilnd_tx_state2list(kgn_peer_t *peer, kgn_conn_t *conn,
1396 kgn_tx_list_state_t to_state)
1399 case GNILND_TX_PEERQ:
1400 return &peer->gnp_tx_queue;
1401 case GNILND_TX_FMAQ:
1402 return &conn->gnc_fmaq;
1403 case GNILND_TX_LIVE_FMAQ:
1404 case GNILND_TX_LIVE_RDMAQ:
1405 case GNILND_TX_DYING:
1407 case GNILND_TX_MAPQ:
1408 return &conn->gnc_device->gnd_map_tx;
1409 case GNILND_TX_RDMAQ:
1410 return &conn->gnc_device->gnd_rdmaq;
1412 /* IDLE, FREED or ALLOCD is not valid "on list" state */
1413 CERROR("invalid state requested: %s\n",
1414 kgnilnd_tx_state2str(to_state));
1420 /* should hold tx, conn or peer lock when calling */
1422 kgnilnd_tx_add_state_locked(kgn_tx_t *tx, kgn_peer_t *peer,
1423 kgn_conn_t *conn, kgn_tx_list_state_t state,
1426 struct list_head *list = NULL;
1428 /* make sure we have a sane TX state to start */
1429 GNITX_ASSERTF(tx, (tx->tx_list_p == NULL &&
1430 tx->tx_list_state == GNILND_TX_ALLOCD) &&
1431 list_empty(&tx->tx_list),
1432 "bad state with tx_list %s",
1433 list_empty(&tx->tx_list) ? "empty" : "not empty");
1435 /* WTF - you are already on that state buttmunch */
1436 GNITX_ASSERTF(tx, state != tx->tx_list_state,
1437 "already at %s", kgnilnd_tx_state2str(state));
1439 /* get proper list from the state requested */
1440 list = kgnilnd_tx_state2list(peer, conn, state);
1444 case GNILND_TX_PEERQ:
1445 kgnilnd_peer_addref(peer);
1447 case GNILND_TX_ALLOCD:
1448 /* no refs needed */
1450 case GNILND_TX_FMAQ:
1451 kgnilnd_conn_addref(conn);
1453 case GNILND_TX_MAPQ:
1454 atomic_inc(&conn->gnc_device->gnd_nq_map);
1455 kgnilnd_conn_addref(conn);
1457 case GNILND_TX_LIVE_FMAQ:
1458 atomic_inc(&conn->gnc_nlive_fma);
1459 kgnilnd_conn_addref(conn);
1461 case GNILND_TX_LIVE_RDMAQ:
1462 atomic_inc(&conn->gnc_nlive_rdma);
1463 kgnilnd_conn_addref(conn);
1465 case GNILND_TX_RDMAQ:
1466 atomic_inc(&conn->gnc_nq_rdma);
1467 kgnilnd_conn_addref(conn);
1469 case GNILND_TX_DYING:
1470 kgnilnd_conn_addref(conn);
1473 CERROR("invalid state requested: %s\n",
1474 kgnilnd_tx_state2str(state));
1479 /* if this changes, change kgnilnd_alloc_tx */
1480 tx->tx_list_state = state;
1482 /* some states don't have lists - we track them in the per conn
1483 * TX table instead. Waste not, want not! */
1485 tx->tx_list_p = list;
1487 list_add_tail(&tx->tx_list, list);
1489 list_add(&tx->tx_list, list);
1491 /* set dummy list_p to make book keeping happy and let debugging
1492 * be a hair easier */
1493 tx->tx_list_p = (void *)state;
1496 GNIDBG_TX(D_NET, tx, "onto %s->0x%p",
1497 kgnilnd_tx_state2str(state), list);
1501 kgnilnd_tx_del_state_locked(kgn_tx_t *tx, kgn_peer_t *peer,
1502 kgn_conn_t *conn, kgn_tx_list_state_t new_state)
1504 /* These is only 1 "off-list" state */
1505 GNITX_ASSERTF(tx, new_state == GNILND_TX_ALLOCD,
1506 "invalid new_state %s", kgnilnd_tx_state2str(new_state));
1508 /* new_state == ALLOCD means we are deallocating this tx,
1509 * so make sure it was on a valid list to start with */
1510 GNITX_ASSERTF(tx, (tx->tx_list_p != NULL) &&
1511 (((tx->tx_list_state == GNILND_TX_LIVE_FMAQ) ||
1512 (tx->tx_list_state == GNILND_TX_LIVE_RDMAQ) ||
1513 (tx->tx_list_state == GNILND_TX_DYING)) == list_empty(&tx->tx_list)),
1516 GNIDBG_TX(D_NET, tx, "off %p", tx->tx_list_p);
1519 switch (tx->tx_list_state) {
1520 case GNILND_TX_PEERQ:
1521 kgnilnd_peer_decref(peer);
1523 case GNILND_TX_FREED:
1524 case GNILND_TX_IDLE:
1525 case GNILND_TX_ALLOCD:
1526 /* no refs needed */
1528 case GNILND_TX_DYING:
1529 kgnilnd_conn_decref(conn);
1531 case GNILND_TX_FMAQ:
1532 kgnilnd_conn_decref(conn);
1534 case GNILND_TX_MAPQ:
1535 atomic_dec(&conn->gnc_device->gnd_nq_map);
1536 kgnilnd_conn_decref(conn);
1538 case GNILND_TX_LIVE_FMAQ:
1539 atomic_dec(&conn->gnc_nlive_fma);
1540 kgnilnd_conn_decref(conn);
1542 case GNILND_TX_LIVE_RDMAQ:
1543 atomic_dec(&conn->gnc_nlive_rdma);
1544 kgnilnd_conn_decref(conn);
1546 case GNILND_TX_RDMAQ:
1547 atomic_dec(&conn->gnc_nq_rdma);
1548 kgnilnd_conn_decref(conn);
1549 /* don't need to assert on default, already did in set */
1552 /* for ALLOCD, this might already be true, but no harm doing it again */
1553 list_del_init(&tx->tx_list);
1554 tx->tx_list_p = NULL;
1555 tx->tx_list_state = new_state;
1559 kgnilnd_tx_mapped(kgn_tx_t *tx)
1561 return tx->tx_buftype == GNILND_BUF_PHYS_MAPPED;
1564 static inline struct list_head *
1565 kgnilnd_cqid2connlist(__u32 cqid)
1567 unsigned int hash = cqid % *kgnilnd_tunables.kgn_peer_hash_size;
1569 return (&kgnilnd_data.kgn_conns [hash]);
1572 static inline kgn_conn_t *
1573 kgnilnd_cqid2conn_locked(__u32 cqid)
1575 struct list_head *conns = kgnilnd_cqid2connlist(cqid);
1576 struct list_head *tmp;
1579 list_for_each(tmp, conns) {
1580 conn = list_entry(tmp, kgn_conn_t, gnc_hashlist);
1582 if (conn->gnc_cqid == cqid)
1589 /* returns 1..GNILND_MAX_CQID on success, 0 on failure */
1591 kgnilnd_get_cqid_locked(void)
1597 cqid = kgnilnd_data.kgn_next_cqid++;
1598 if (kgnilnd_data.kgn_next_cqid >= GNILND_MAX_CQID) {
1602 kgnilnd_data.kgn_next_cqid = 1;
1605 } while (kgnilnd_cqid2conn_locked(cqid) != NULL);
1611 kgnilnd_validate_tx_ev_id(kgn_tx_ev_id_t *ev_id, kgn_tx_t **txp, kgn_conn_t **connp)
1613 kgn_tx_t *tx = NULL;
1614 kgn_conn_t *conn = NULL;
1616 /* set to NULL so any early return is an error */
1620 LASSERTF((ev_id->txe_idx > 0) &&
1621 (ev_id->txe_idx < GNILND_MAX_MSG_ID),
1622 "bogus txe_idx %d >= %d\n",
1623 ev_id->txe_idx, GNILND_MAX_MSG_ID);
1625 LASSERTF((ev_id->txe_cqid > 0) &&
1626 (ev_id->txe_cqid < GNILND_MAX_CQID),
1627 "bogus txe_cqid %d >= %d\n",
1628 ev_id->txe_cqid, GNILND_MAX_CQID);
1630 read_lock(&kgnilnd_data.kgn_peer_conn_lock);
1631 conn = kgnilnd_cqid2conn_locked(ev_id->txe_cqid);
1634 /* Conn was destroyed? */
1635 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1636 CDEBUG(D_NET, "CQID %d lookup failed\n", ev_id->txe_cqid);
1639 /* just insurance */
1640 kgnilnd_conn_addref(conn);
1641 kgnilnd_admin_addref(conn->gnc_tx_in_use);
1642 read_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1644 /* we know this is safe - as the TX won't be reused until AFTER
1645 * the conn is unlinked from the cqid hash, so we can use the TX
1646 * (serializing to avoid any cache oddness) freely from the conn tx ref table */
1648 spin_lock(&conn->gnc_tx_lock);
1649 tx = conn->gnc_tx_ref_table[ev_id->txe_idx];
1650 spin_unlock(&conn->gnc_tx_lock);
1652 /* We could have a tx that was cleared out by other forces
1653 * lctl disconnect or del_peer. */
1655 CNETERR("txe_idx %d is gone, ignoring event\n", ev_id->txe_idx);
1656 kgnilnd_admin_decref(conn->gnc_tx_in_use);
1657 kgnilnd_conn_decref(conn);
1661 /* check tx->tx_msg magic to make sure kgni didn't eat it */
1662 GNITX_ASSERTF(tx, tx->tx_msg.gnm_magic == GNILND_MSG_MAGIC,
1663 "came back from kgni with bad magic %x", tx->tx_msg.gnm_magic);
1665 GNITX_ASSERTF(tx, tx->tx_id.txe_idx == ev_id->txe_idx,
1666 "conn 0x%p->%s tx_ref_table hosed: wanted txe_idx %d "
1667 "found tx %px txe_idx %d",
1668 conn, libcfs_nid2str(conn->gnc_peer->gnp_nid),
1669 ev_id->txe_idx, tx, tx->tx_id.txe_idx);
1671 GNITX_ASSERTF(tx, tx->tx_conn != NULL, "tx with NULL connection", NULL);
1673 GNITX_ASSERTF(tx, tx->tx_conn == conn, "tx conn does not equal conn", NULL);
1678 GNIDBG_TX(D_NET, tx, "validated to 0x%p", conn);
1681 /* set_normalized_timepsec isn't exported from the kernel, so
1682 * we need to do the same thing inline */
1683 static inline struct timespec
1684 kgnilnd_ts_sub(struct timespec lhs, struct timespec rhs)
1690 sec = lhs.tv_sec - rhs.tv_sec;
1691 nsec = lhs.tv_nsec - rhs.tv_nsec;
1693 while (nsec >= NSEC_PER_SEC) {
1694 nsec -= NSEC_PER_SEC;
1698 nsec += NSEC_PER_SEC;
1707 kgnilnd_count_list(struct list_head *q)
1709 struct list_head *e;
1712 list_for_each(e, q) {
1719 /* kgnilnd_find_net adds a reference to the net it finds
1720 * this is so the net will not be removed before the calling function
1721 * has time to use the data returned. This reference needs to be released
1722 * by the calling function once it has finished using the returned net
1726 kgnilnd_find_net(lnet_nid_t nid, kgn_net_t **netp)
1731 rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem);
1737 list_for_each_entry(net,
1738 kgnilnd_netnum2netlist(LNET_NETNUM(LNET_NIDNET(nid))),
1740 if (!net->gnn_shutdown &&
1741 LNET_NID_NET(&net->gnn_ni->ni_nid) == LNET_NIDNET(nid)) {
1742 kgnilnd_net_addref(net);
1743 up_read(&kgnilnd_data.kgn_net_rw_sem);
1749 up_read(&kgnilnd_data.kgn_net_rw_sem);
1754 #ifdef CONFIG_DEBUG_SLAB
1755 #define KGNILND_POISON(ptr, c, s) do {} while(0)
1757 #define KGNILND_POISON(ptr, c, s) memset(ptr, c, s)
1760 enum kgnilnd_ni_lnd_tunables_attr {
1761 LNET_NET_GNILND_TUNABLES_ATTR_UNSPEC = 0,
1763 LNET_NET_GNILND_TUNABLES_ATTR_LND_TIMEOUT,
1764 __LNET_NET_GNILND_TUNABLES_ATTR_MAX_PLUS_ONE,
1767 #define LNET_NET_GNILND_TUNABLES_ATTR_MAX (__LNET_NET_GNILND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
1769 int kgnilnd_dev_init(kgn_device_t *dev);
1770 void kgnilnd_dev_fini(kgn_device_t *dev);
1771 int kgnilnd_startup(struct lnet_ni *ni);
1772 void kgnilnd_shutdown(struct lnet_ni *ni);
1773 int kgnilnd_base_startup(void);
1774 void kgnilnd_base_shutdown(void);
1776 int kgnilnd_allocate_phys_fmablk(kgn_device_t *device);
1777 int kgnilnd_map_phys_fmablk(kgn_device_t *device);
1778 void kgnilnd_unmap_fma_blocks(kgn_device_t *device);
1779 void kgnilnd_free_phys_fmablk(kgn_device_t *device);
1781 int kgnilnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
1782 int kgnilnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
1783 int kgnilnd_eager_recv(struct lnet_ni *ni, void *private,
1784 struct lnet_msg *lntmsg, void **new_private);
1785 int kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1786 int delayed, unsigned int niov,
1787 struct bio_vec *kiov,
1788 unsigned int offset, unsigned int mlen, unsigned int rlen);
1790 __u16 kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
1791 unsigned int offset, unsigned int nob, int dump_blob);
1793 /* purgatory functions */
1794 void kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer);
1795 void kgnilnd_mark_for_detach_purgatory_all_locked(kgn_peer_t *peer);
1796 void kgnilnd_detach_purgatory_locked(kgn_conn_t *conn, struct list_head *conn_list);
1797 void kgnilnd_release_purgatory_list(struct list_head *conn_list);
1799 void kgnilnd_update_reaper_timeout(long timeout);
1800 void kgnilnd_unmap_buffer(kgn_tx_t *tx, int error);
1801 kgn_tx_t *kgnilnd_new_tx_msg(int type, lnet_nid_t source);
1802 void kgnilnd_tx_done(kgn_tx_t *tx, int completion);
1803 void kgnilnd_txlist_done(struct list_head *txlist, int error);
1804 void kgnilnd_unlink_peer_locked(kgn_peer_t *peer);
1805 int _kgnilnd_schedule_conn(kgn_conn_t *conn, const char *caller, int line, int refheld, int lock_held);
1806 int kgnilnd_schedule_process_conn(kgn_conn_t *conn, int sched_intent);
1808 void kgnilnd_schedule_dgram(kgn_device_t *dev);
1809 int kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net, int node_state);
1810 void kgnilnd_add_peer_locked(lnet_nid_t nid, kgn_peer_t *new_stub_peer, kgn_peer_t **peerp);
1811 int kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp);
1813 kgn_peer_t *kgnilnd_find_peer_locked(lnet_nid_t nid);
1814 int kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command, int error);
1815 void kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer);
1816 void kgnilnd_queue_reply(kgn_conn_t *conn, kgn_tx_t *tx);
1817 void kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx);
1818 void kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net,
1819 struct lnet_processid *target);
1820 int kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full);
1821 void kgnilnd_consume_rx(kgn_rx_t *rx);
1823 void kgnilnd_schedule_device(kgn_device_t *dev);
1824 void kgnilnd_device_callback(__u32 devid, __u64 arg);
1825 void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data);
1826 void kgnilnd_schedule_device_timer_rd(cfs_timer_cb_arg_t data);
1828 int kgnilnd_reaper(void *arg);
1829 int kgnilnd_scheduler(void *arg);
1830 int kgnilnd_dgram_mover(void *arg);
1831 int kgnilnd_rca(void *arg);
1832 int kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id);
1834 int kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev);
1835 int kgnilnd_conn_isdup_locked(kgn_peer_t *peer, kgn_conn_t *newconn);
1836 kgn_conn_t *kgnilnd_find_conn_locked(kgn_peer_t *peer);
1837 int kgnilnd_get_conn(kgn_conn_t **connp, kgn_peer_t);
1838 kgn_conn_t *kgnilnd_find_or_create_conn_locked(kgn_peer_t *peer);
1839 void kgnilnd_peer_cancel_tx_queue(kgn_peer_t *peer);
1840 void kgnilnd_cancel_peer_connect_locked(kgn_peer_t *peer, struct list_head *zombies);
1841 int kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn);
1842 void kgnilnd_peer_alive(kgn_peer_t *peer);
1843 void kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive);
1844 void kgnilnd_close_conn_locked(kgn_conn_t *conn, int error);
1845 void kgnilnd_close_conn(kgn_conn_t *conn, int error);
1846 void kgnilnd_complete_closed_conn(kgn_conn_t *conn);
1847 void kgnilnd_destroy_conn_ep(kgn_conn_t *conn);
1849 int kgnilnd_close_peer_conns_locked(kgn_peer_t *peer, int why);
1850 int kgnilnd_report_node_state(lnet_nid_t nid, int down);
1851 void kgnilnd_wakeup_rca_thread(void);
1852 int kgnilnd_start_rca_thread(void);
1853 int kgnilnd_get_node_state(__u32 nid);
1855 int kgnilnd_tunables_setup(struct lnet_ni *ni);
1856 int kgnilnd_tunables_init(void);
1858 void kgnilnd_init_msg(kgn_msg_t *msg, int type, lnet_nid_t source);
1860 void kgnilnd_bump_timeouts(__u32 nap_time, char *reason);
1861 void kgnilnd_pause_threads(void);
1862 int kgnilnd_hw_in_quiesce(void);
1863 int kgnilnd_check_hw_quiesce(void);
1864 void kgnilnd_quiesce_wait(char *reason);
1865 void kgnilnd_quiesce_end_callback(gni_nic_handle_t nic_handle, uint64_t msecs);
1866 int kgnilnd_ruhroh_thread(void *arg);
1867 void kgnilnd_reset_stack(void);
1868 void kgnilnd_critical_error(gni_err_handle_t err_handle);
1870 void kgnilnd_insert_sysctl(void);
1871 void kgnilnd_remove_sysctl(void);
1872 void kgnilnd_proc_init(void);
1873 void kgnilnd_proc_fini(void);
1876 void kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold);
1878 int kgnilnd_find_and_cancel_dgram(kgn_device_t *dev, lnet_nid_t dst_nid);
1879 void kgnilnd_cancel_dgram_locked(kgn_dgram_t *dgram);
1880 void kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram, int shutdown);
1882 int kgnilnd_setup_wildcard_dgram(kgn_device_t *dev);
1883 int kgnilnd_cancel_net_dgrams(kgn_net_t *net);
1884 int kgnilnd_cancel_wc_dgrams(kgn_device_t *dev);
1885 int kgnilnd_cancel_dgrams(kgn_device_t *dev);
1886 void kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev);
1888 int kgnilnd_dgram_waitq(void *arg);
1890 int kgnilnd_set_conn_params(kgn_dgram_t *dgram);
1892 /* struct2str functions - we don't use a default: case to cause the compile
1893 * to fail if there is a missing case. This allows us to hide these down here
1894 * out of the way but ensure we'll catch any updates to the enum/types
1897 #define DO_TYPE(x) case x: return #x;
1898 static inline const char *
1899 kgnilnd_fmablk_state2str(kgn_fmablk_state_t state)
1901 /* Only want single char string for this */
1903 case GNILND_FMABLK_IDLE:
1905 case GNILND_FMABLK_PHYS:
1907 case GNILND_FMABLK_VIRT:
1909 case GNILND_FMABLK_FREED:
1912 return "<unknown state>";
1915 static inline const char *
1916 kgnilnd_msgtype2str(int type)
1919 DO_TYPE(GNILND_MSG_NONE);
1920 DO_TYPE(GNILND_MSG_NOOP);
1921 DO_TYPE(GNILND_MSG_IMMEDIATE);
1922 DO_TYPE(GNILND_MSG_PUT_REQ);
1923 DO_TYPE(GNILND_MSG_PUT_NAK);
1924 DO_TYPE(GNILND_MSG_PUT_ACK);
1925 DO_TYPE(GNILND_MSG_PUT_DONE);
1926 DO_TYPE(GNILND_MSG_GET_REQ);
1927 DO_TYPE(GNILND_MSG_GET_NAK);
1928 DO_TYPE(GNILND_MSG_GET_DONE);
1929 DO_TYPE(GNILND_MSG_CLOSE);
1930 DO_TYPE(GNILND_MSG_PUT_REQ_REV);
1931 DO_TYPE(GNILND_MSG_PUT_DONE_REV);
1932 DO_TYPE(GNILND_MSG_PUT_NAK_REV);
1933 DO_TYPE(GNILND_MSG_GET_REQ_REV);
1934 DO_TYPE(GNILND_MSG_GET_ACK_REV);
1935 DO_TYPE(GNILND_MSG_GET_DONE_REV);
1936 DO_TYPE(GNILND_MSG_GET_NAK_REV);
1938 return "<unknown msg type>";
1941 static inline const char *
1942 kgnilnd_tx_state2str(kgn_tx_list_state_t state)
1945 DO_TYPE(GNILND_TX_IDLE);
1946 DO_TYPE(GNILND_TX_ALLOCD);
1947 DO_TYPE(GNILND_TX_PEERQ);
1948 DO_TYPE(GNILND_TX_MAPQ);
1949 DO_TYPE(GNILND_TX_FMAQ);
1950 DO_TYPE(GNILND_TX_LIVE_FMAQ);
1951 DO_TYPE(GNILND_TX_RDMAQ);
1952 DO_TYPE(GNILND_TX_LIVE_RDMAQ);
1953 DO_TYPE(GNILND_TX_DYING);
1954 DO_TYPE(GNILND_TX_FREED);
1956 return "<unknown state>";
1959 static inline const char *
1960 kgnilnd_conn_state2str(kgn_conn_t *conn)
1962 kgn_conn_state_t state = conn->gnc_state;
1964 DO_TYPE(GNILND_CONN_DUMMY);
1965 DO_TYPE(GNILND_CONN_LISTEN);
1966 DO_TYPE(GNILND_CONN_CONNECTING);
1967 DO_TYPE(GNILND_CONN_ESTABLISHED);
1968 DO_TYPE(GNILND_CONN_CLOSING);
1969 DO_TYPE(GNILND_CONN_CLOSED);
1970 DO_TYPE(GNILND_CONN_DONE);
1971 DO_TYPE(GNILND_CONN_DESTROY_EP);
1976 static inline const char *
1977 kgnilnd_connreq_type2str(kgn_connreq_t *connreq)
1979 kgn_connreq_type_t type = connreq->gncr_type;
1982 DO_TYPE(GNILND_CONNREQ_REQ);
1983 DO_TYPE(GNILND_CONNREQ_NAK);
1984 DO_TYPE(GNILND_CONNREQ_CLOSE);
1989 static inline const char *
1990 kgnilnd_dgram_state2str(kgn_dgram_t *dgram)
1992 kgn_dgram_state_t state = dgram->gndg_state;
1995 DO_TYPE(GNILND_DGRAM_USED);
1996 DO_TYPE(GNILND_DGRAM_POSTING);
1997 DO_TYPE(GNILND_DGRAM_POSTED);
1998 DO_TYPE(GNILND_DGRAM_PROCESSING);
1999 DO_TYPE(GNILND_DGRAM_DONE);
2000 DO_TYPE(GNILND_DGRAM_CANCELED);
2005 static inline const char *
2006 kgnilnd_dgram_type2str(kgn_dgram_t *dgram)
2008 kgn_dgram_type_t type = dgram->gndg_type;
2011 DO_TYPE(GNILND_DGRAM_REQ);
2012 DO_TYPE(GNILND_DGRAM_WC_REQ);
2013 DO_TYPE(GNILND_DGRAM_NAK);
2014 DO_TYPE(GNILND_DGRAM_CLOSE);
2019 static inline const char *
2020 kgnilnd_conn_dgram_type2str(kgn_dgram_type_t type)
2023 DO_TYPE(GNILND_DGRAM_REQ);
2024 DO_TYPE(GNILND_DGRAM_WC_REQ);
2025 DO_TYPE(GNILND_DGRAM_NAK);
2026 DO_TYPE(GNILND_DGRAM_CLOSE);
2033 /* pulls in tunables per platform and adds in nid/nic conversion
2034 * if RCA wasn't available at build time */
2035 #include "gnilnd_hss_ops.h"
2036 /* API wrapper functions - include late to pick up all of the other defines */
2037 #include "gnilnd_api_wrap.h"
2039 #if defined(CONFIG_CRAY_GEMINI)
2040 #include "gnilnd_gemini.h"
2041 #elif defined(CONFIG_CRAY_ARIES)
2042 #include "gnilnd_aries.h"
2044 #error "Undefined Network Hardware Type"
2047 extern uint32_t kgni_driver_version;
2050 kgnilnd_check_kgni_version(void)
2054 kgnilnd_data.kgn_enable_gl_mutex = 1;
2055 kdv = symbol_get(kgni_driver_version);
2057 LCONSOLE_INFO("Not using thread safe locking -"
2058 " no symbol kgni_driver_version\n");
2062 /* Thread-safe kgni implemented in minor ver 0x44/45, code rev 0xb9 */
2063 if (*kdv < GNI_VERSION_CHECK(0, GNILND_KGNI_TS_MINOR_VER, 0xb9)) {
2064 symbol_put(kgni_driver_version);
2065 LCONSOLE_INFO("Not using thread safe locking, gni version 0x%x,"
2066 " need >= 0x%x\n", *kdv,
2067 GNI_VERSION_CHECK(0, GNILND_KGNI_TS_MINOR_VER, 0xb9));
2071 symbol_put(kgni_driver_version);
2073 if (!*kgnilnd_tunables.kgn_thread_safe) {
2077 /* Use thread-safe locking */
2078 kgnilnd_data.kgn_enable_gl_mutex = 0;
2081 #endif /* _GNILND_GNILND_H_ */