4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/include/lnet/lib-types.h
33 * Types used by the library side routines that do not need to be
34 * exposed to the user application
37 #ifndef __LNET_LIB_TYPES_H__
38 #define __LNET_LIB_TYPES_H__
41 # error This include is only for kernel use.
44 #include <linux/kthread.h>
45 #include <linux/uio.h>
46 #include <linux/semaphore.h>
47 #include <linux/types.h>
48 #include <linux/kref.h>
49 #include <net/genetlink.h>
51 #include <uapi/linux/lnet/lnet-nl.h>
52 #include <uapi/linux/lnet/lnet-dlc.h>
53 #include <uapi/linux/lnet/lnetctl.h>
54 #include <uapi/linux/lnet/nidstr.h>
56 char *libcfs_nidstr_r(const struct lnet_nid *nid,
57 char *buf, size_t buf_size);
59 static inline char *libcfs_nidstr(const struct lnet_nid *nid)
61 return libcfs_nidstr_r(nid, libcfs_next_nidstring(),
65 int libcfs_strnid(struct lnet_nid *nid, const char *str);
66 char *libcfs_idstr(struct lnet_processid *id);
68 int cfs_match_nid_net(struct lnet_nid *nid, u32 net,
69 struct list_head *net_num_list,
70 struct list_head *addr);
72 /* Max payload size */
73 #define LNET_MAX_PAYLOAD LNET_MTU
75 /** limit on the number of fragments in discontiguous MDs */
76 #define LNET_MAX_IOV 256
79 * This is the maximum health value.
80 * All local and peer NIs created have their health default to this value.
82 #define LNET_MAX_HEALTH_VALUE 1000
83 #define LNET_MAX_SELECTION_PRIORITY UINT_MAX
88 enum lnet_msg_hstatus {
89 LNET_MSG_STATUS_OK = 0,
90 LNET_MSG_STATUS_LOCAL_INTERRUPT,
91 LNET_MSG_STATUS_LOCAL_DROPPED,
92 LNET_MSG_STATUS_LOCAL_ABORTED,
93 LNET_MSG_STATUS_LOCAL_NO_ROUTE,
94 LNET_MSG_STATUS_LOCAL_ERROR,
95 LNET_MSG_STATUS_LOCAL_TIMEOUT,
96 LNET_MSG_STATUS_REMOTE_ERROR,
97 LNET_MSG_STATUS_REMOTE_DROPPED,
98 LNET_MSG_STATUS_REMOTE_TIMEOUT,
99 LNET_MSG_STATUS_NETWORK_TIMEOUT,
103 struct lnet_rsp_tracker {
104 /* chain on the waiting list */
105 struct list_head rspt_on_list;
108 /* nid of next hop */
109 struct lnet_nid rspt_next_hop_nid;
110 /* deadline of the REPLY/ACK */
111 ktime_t rspt_deadline;
113 struct lnet_handle_md rspt_mdh;
117 struct list_head msg_activelist;
118 struct list_head msg_list; /* Q for credits/MD */
120 struct lnet_processid msg_target;
121 /* Primary NID of the source. */
122 struct lnet_nid msg_initiator;
123 /* where is it from, it's only for building event */
124 struct lnet_nid msg_from;
128 * hold parameters in case message is with held due
131 struct lnet_nid msg_src_nid_param;
132 struct lnet_nid msg_rtr_nid_param;
135 * Deadline for the message after which it will be finalized if it
138 ktime_t msg_deadline;
140 /* The message health status. */
141 enum lnet_msg_hstatus msg_health_status;
142 /* This is a recovery message */
144 /* the number of times a transmission has been retried */
146 /* flag to indicate that we do not want to resend this message */
149 /* committed for sending */
150 unsigned int msg_tx_committed:1;
151 /* CPT # this message committed for sending */
152 unsigned int msg_tx_cpt:15;
153 /* committed for receiving */
154 unsigned int msg_rx_committed:1;
155 /* CPT # this message committed for receiving */
156 unsigned int msg_rx_cpt:15;
157 /* queued for tx credit */
158 unsigned int msg_tx_delayed:1;
159 /* queued for RX buffer */
160 unsigned int msg_rx_delayed:1;
161 /* ready for pending on RX delay list */
162 unsigned int msg_rx_ready_delay:1;
164 unsigned int msg_vmflush:1; /* VM trying to free memory */
165 unsigned int msg_target_is_router:1; /* sending to a router */
166 unsigned int msg_routing:1; /* being forwarded */
167 unsigned int msg_ack:1; /* ack on finalize (PUT) */
168 unsigned int msg_sending:1; /* outgoing message */
169 unsigned int msg_receiving:1; /* being received */
170 unsigned int msg_txcredit:1; /* taken an NI send credit */
171 unsigned int msg_peertxcredit:1; /* taken a peer send credit */
172 unsigned int msg_rtrcredit:1; /* taken a globel router credit */
173 unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
174 unsigned int msg_onactivelist:1; /* on the activelist */
175 unsigned int msg_rdma_get:1;
177 struct lnet_peer_ni *msg_txpeer; /* peer I'm sending to */
178 struct lnet_peer_ni *msg_rxpeer; /* peer I received from */
181 struct lnet_libmd *msg_md;
182 /* the NI the message was sent or received over */
183 struct lnet_ni *msg_txni;
184 struct lnet_ni *msg_rxni;
186 unsigned int msg_len;
187 unsigned int msg_wanted;
188 unsigned int msg_offset;
189 unsigned int msg_niov;
190 struct bio_vec *msg_kiov;
192 struct lnet_event msg_ev;
193 struct lnet_hdr msg_hdr;
196 struct lnet_libhandle {
197 struct list_head lh_hash_chain;
201 #define lh_entry(ptr, type, member) \
202 ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
205 struct list_head me_list;
207 struct lnet_processid me_match_id;
208 unsigned int me_portal;
209 unsigned int me_pos; /* hash offset in mt_hash */
211 __u64 me_ignore_bits;
212 enum lnet_unlink me_unlink;
213 struct lnet_libmd *me_md;
217 struct list_head md_list;
218 struct lnet_libhandle md_lh;
219 struct lnet_me *md_me;
221 unsigned int md_offset;
222 unsigned int md_length;
223 unsigned int md_max_size;
226 unsigned int md_options;
227 unsigned int md_flags;
228 unsigned int md_niov; /* # frags at end of struct */
230 struct lnet_rsp_tracker *md_rspt_ptr;
231 lnet_handler_t md_handler;
232 struct lnet_handle_md md_bulk_handle;
233 struct bio_vec md_kiov[LNET_MAX_IOV];
236 #define LNET_MD_FLAG_ZOMBIE BIT(0)
237 #define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
238 #define LNET_MD_FLAG_ABORTED BIT(2)
239 /* LNET_MD_FLAG_HANDLING is set when a non-unlink event handler
240 * is being called for an event relating to the md.
241 * It ensures only one such handler runs at a time.
242 * The final "unlink" event is only called once the
243 * md_refcount has reached zero, and this flag has been cleared,
244 * ensuring that it doesn't race with any other event handler
247 #define LNET_MD_FLAG_HANDLING BIT(3)
248 #define LNET_MD_FLAG_DISCARD BIT(4)
249 #define LNET_MD_FLAG_GPU BIT(5) /**< Special mapping needs */
251 struct lnet_test_peer {
252 /* info about peers we are trying to fail */
253 struct list_head tp_list; /* ln_test_peers */
254 struct lnet_nid tp_nid; /* matching nid */
255 unsigned int tp_threshold; /* # failures to simulate */
258 #define LNET_COOKIE_TYPE_MD 1
259 #define LNET_COOKIE_TYPE_ME 2
260 #define LNET_COOKIE_TYPE_EQ 3
261 #define LNET_COOKIE_TYPE_BITS 2
262 #define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
268 void (*nf_addr2str)(u32 addr, char *str, size_t size);
269 void (*nf_addr2str_size)(const __be32 *addr, size_t asize,
270 char *str, size_t size);
271 int (*nf_str2addr)(const char *str, int nob, u32 *addr);
272 int (*nf_str2addr_size)(const char *str, int nob,
273 __be32 *addr, size_t *asize);
274 int (*nf_parse_addrlist)(char *str, int len,
275 struct list_head *list);
276 int (*nf_print_addrlist)(char *buffer, int count,
277 struct list_head *list);
278 int (*nf_match_addr)(u32 addr, struct list_head *list);
279 int (*nf_min_max)(struct list_head *nidlist, u32 *min_nid,
283 struct lnet_ni; /* forward ref */
287 /* fields initialized by the LND */
290 int (*lnd_startup)(struct lnet_ni *ni);
291 void (*lnd_shutdown)(struct lnet_ni *ni);
292 int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
294 /* In data movement APIs below, payload buffers are described as a set
295 * of 'niov' fragments which are in pages.
296 * The LND may NOT overwrite these fragment descriptors.
297 * An 'offset' and may specify a byte offset within the set of
298 * fragments to start from
301 /* Start sending a preformatted message. 'private' is NULL for PUT and
302 * GET messages; otherwise this is a response to an incoming message
303 * and 'private' is the 'private' passed to lnet_parse(). Return
304 * non-zero for immediate failure, otherwise complete later with
306 int (*lnd_send)(struct lnet_ni *ni, void *private,
307 struct lnet_msg *msg);
309 /* Start receiving 'mlen' bytes of payload data, skipping the following
310 * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
311 * lnet_parse(). Return non-zero for immedaite failure, otherwise
312 * complete later with lnet_finalize(). This also gives back a receive
313 * credit if the LND does flow control. */
314 int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
315 int delayed, unsigned int niov,
316 struct bio_vec *kiov,
317 unsigned int offset, unsigned int mlen, unsigned int rlen);
319 /* lnet_parse() has had to delay processing of this message
320 * (e.g. waiting for a forwarding buffer or send credits). Give the
321 * LND a chance to free urgently needed resources. If called, return 0
322 * for success and do NOT give back a receive credit; that has to wait
323 * until lnd_recv() gets called. On failure return < 0 and
324 * release resources; lnd_recv() will not be called. */
325 int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
326 struct lnet_msg *msg, void **new_privatep);
328 /* notification of peer down */
329 void (*lnd_notify_peer_down)(struct lnet_nid *peer);
331 /* accept a new connection */
332 int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
334 /* get dma_dev priority */
335 unsigned int (*lnd_get_dev_prio)(struct lnet_ni *ni,
336 unsigned int dev_idx);
339 struct lnet_tx_queue {
340 int tq_credits; /* # tx credits free */
341 int tq_credits_min; /* lowest it's been */
342 int tq_credits_max; /* total # tx credits */
343 struct list_head tq_delayed; /* delayed TXs */
346 enum lnet_net_state {
347 /* set when net block is allocated */
348 LNET_NET_STATE_INIT = 0,
349 /* set when NIs in net are started successfully */
350 LNET_NET_STATE_ACTIVE,
351 /* set if all NIs in net are in FAILED state */
352 LNET_NET_STATE_INACTIVE,
353 /* set when shutting down a NET */
354 LNET_NET_STATE_DELETING
358 /* initial state when NI is created */
359 LNET_NI_STATE_INIT = 0,
360 /* set when NI is brought up */
361 LNET_NI_STATE_ACTIVE,
362 /* set when NI is being shutdown */
363 LNET_NI_STATE_DELETING,
366 #define LNET_NI_RECOVERY_PENDING BIT(0)
367 #define LNET_NI_RECOVERY_FAILED BIT(1)
369 enum lnet_stats_type {
370 LNET_STATS_TYPE_SEND = 0,
371 LNET_STATS_TYPE_RECV,
375 struct lnet_comm_count {
376 atomic_t co_get_count;
377 atomic_t co_put_count;
378 atomic_t co_reply_count;
379 atomic_t co_ack_count;
380 atomic_t co_hello_count;
383 struct lnet_element_stats {
384 struct lnet_comm_count el_send_stats;
385 struct lnet_comm_count el_recv_stats;
386 struct lnet_comm_count el_drop_stats;
389 struct lnet_health_local_stats {
390 atomic_t hlt_local_interrupt;
391 atomic_t hlt_local_dropped;
392 atomic_t hlt_local_aborted;
393 atomic_t hlt_local_no_route;
394 atomic_t hlt_local_timeout;
395 atomic_t hlt_local_error;
398 struct lnet_health_remote_stats {
399 atomic_t hlt_remote_dropped;
400 atomic_t hlt_remote_timeout;
401 atomic_t hlt_remote_error;
402 atomic_t hlt_network_timeout;
406 /* chain on the ln_nets */
407 struct list_head net_list;
409 /* net ID, which is composed of
410 * (net_type << 16) | net_num.
411 * net_type can be one of the enumerated types defined in
412 * lnet/include/lnet/nidstr.h */
415 /* round robin selection */
418 /* total number of CPTs in the array */
421 /* cumulative CPTs of all NIs in this net */
424 /* relative net selection priority */
425 __u32 net_sel_priority;
427 /* network tunables */
428 struct lnet_ioctl_config_lnd_cmn_tunables net_tunables;
431 * boolean to indicate that the tunables have been set and
434 bool net_tunables_set;
436 /* procedural interface */
437 const struct lnet_lnd *net_lnd;
439 /* list of NIs on this net */
440 struct list_head net_ni_list;
442 /* list of NIs being added, but not started yet */
443 struct list_head net_ni_added;
445 /* dying LND instances */
446 struct list_head net_ni_zombie;
448 /* when I was last alive */
449 time64_t net_last_alive;
451 /* protects access to net_last_alive */
454 /* list of router nids preferred for this network */
455 struct list_head net_rtr_pref_nids;
459 /* chain on the lnet_net structure */
460 struct list_head ni_netlist;
462 /* chain on the recovery queue */
463 struct list_head ni_recovery;
465 /* MD handle for recovery ping */
466 struct lnet_handle_md ni_ping_mdh;
473 /* bond NI on some CPTs */
476 /* interface's NID */
477 struct lnet_nid ni_nid;
479 /* instance-specific data */
483 atomic_t ni_tx_credits;
485 /* percpt TX queues */
486 struct lnet_tx_queue **ni_tx_queues;
488 /* percpt reference count */
491 /* pointer to parent network */
492 struct lnet_net *ni_net;
494 /* my health status */
495 struct lnet_ni_status *ni_status;
497 /* NI FSM. Protected by lnet_ni_lock() */
498 enum lnet_ni_state ni_state;
500 /* Recovery state. Protected by lnet_ni_lock() */
501 __u32 ni_recovery_state;
503 /* When to send the next recovery ping */
504 time64_t ni_next_ping;
505 /* How many pings sent during current recovery period did not receive
506 * a reply. NB: reset whenever _any_ message arrives on this NI
508 unsigned int ni_ping_count;
510 /* per NI LND tunables */
511 struct lnet_lnd_tunables ni_lnd_tunables;
513 /* lnd tunables set explicitly */
514 bool ni_lnd_tunables_set;
517 struct lnet_element_stats ni_stats;
518 struct lnet_health_local_stats ni_hstats;
520 /* physical device CPT */
523 /* sequence number used to round robin over nis within a net */
528 * initialized to LNET_MAX_HEALTH_VALUE
529 * Value is decremented every time we fail to send a message over
530 * this NI because of a NI specific failure.
531 * Value is incremented if we successfully send a message.
536 * Set to 1 by the LND when it receives an event telling it the device
537 * has gone into a fatal state. Set to 0 when the LND receives an
538 * even telling it the device is back online.
540 atomic_t ni_fatal_error_on;
542 /* the relative selection priority of this NI */
543 __u32 ni_sel_priority;
546 * equivalent interface to use
549 struct net *ni_net_ns; /* original net namespace */
552 #define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
555 * Descriptor of a ping info buffer: keep a separate indicator of the
556 * size and a reference count. The type is used both as a source and
557 * sink of data, so we need to keep some information outside of the
558 * area that may be overwritten by network data.
560 struct lnet_ping_buffer {
564 struct lnet_ping_info pb_info;
567 #define LNET_PING_BUFFER_SIZE(NNIDS) \
568 offsetof(struct lnet_ping_buffer, pb_info.pi_ni[NNIDS])
569 #define LNET_PING_BUFFER_LONI(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_nid)
570 #define LNET_PING_BUFFER_SEQNO(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_status)
572 #define LNET_PING_INFO_TO_BUFFER(PINFO) \
573 container_of((PINFO), struct lnet_ping_buffer, pb_info)
575 struct lnet_nid_list {
576 struct list_head nl_list;
577 struct lnet_nid nl_nid;
580 struct lnet_peer_ni {
581 /* chain on lpn_peer_nis */
582 struct list_head lpni_peer_nis;
583 /* chain on remote peer list */
584 struct list_head lpni_on_remote_peer_ni_list;
585 /* chain on recovery queue */
586 struct list_head lpni_recovery;
587 /* chain on peer hash */
588 struct list_head lpni_hashlist;
589 /* messages blocking for tx credits */
590 struct list_head lpni_txq;
591 /* pointer to peer net I'm part of */
592 struct lnet_peer_net *lpni_peer_net;
593 /* statistics kept on each peer NI */
594 struct lnet_element_stats lpni_stats;
595 struct lnet_health_remote_stats lpni_hstats;
596 /* spin lock protecting credits and lpni_txq */
597 spinlock_t lpni_lock;
598 /* # tx credits available */
601 int lpni_mintxcredits;
603 * Each peer_ni in a gateway maintains its own credits. This
604 * allows more traffic to gateways that have multiple interfaces.
606 /* # router credits */
609 int lpni_minrtrcredits;
610 /* bytes queued for sending */
612 /* network peer is on */
613 struct lnet_net *lpni_net;
615 struct lnet_nid lpni_nid;
617 struct kref lpni_kref;
618 /* health value for the peer */
619 atomic_t lpni_healthv;
620 /* recovery ping mdh */
621 struct lnet_handle_md lpni_recovery_ping_mdh;
622 /* When to send the next recovery ping */
623 time64_t lpni_next_ping;
624 /* How many pings sent during current recovery period did not receive
625 * a reply. NB: reset whenever _any_ message arrives from this peer NI
627 unsigned int lpni_ping_count;
628 /* CPT this peer attached on */
630 /* state flags -- protected by lpni_lock */
632 /* status of the peer NI as reported by the peer */
633 __u32 lpni_ns_status;
634 /* sequence number used to round robin over peer nis within a net */
636 /* sequence number used to round robin over gateways */
638 /* returned RC ping features. Protected with lpni_lock */
639 unsigned int lpni_ping_feats;
640 /* time last message was received from the peer */
641 time64_t lpni_last_alive;
642 /* preferred local nids: if only one, use lpni_pref.nid */
645 struct list_head nids;
647 /* list of router nids preferred for this peer NI */
648 struct list_head lpni_rtr_pref_nids;
649 /* The relative selection priority of this peer NI */
650 __u32 lpni_sel_priority;
651 /* number of preferred NIDs in lnpi_pref_nids */
652 __u32 lpni_pref_nnids;
655 /* Preferred path added due to traffic on non-MR peer_ni */
656 #define LNET_PEER_NI_NON_MR_PREF BIT(0)
657 /* peer is being recovered. */
658 #define LNET_PEER_NI_RECOVERY_PENDING BIT(1)
659 /* recovery ping failed */
660 #define LNET_PEER_NI_RECOVERY_FAILED BIT(2)
661 /* peer is being deleted */
662 #define LNET_PEER_NI_DELETING BIT(3)
665 /* chain on pt_peer_list */
666 struct list_head lp_peer_list;
668 /* list of peer nets */
669 struct list_head lp_peer_nets;
671 /* list of messages pending discovery*/
672 struct list_head lp_dc_pendq;
674 /* chain on router list */
675 struct list_head lp_rtr_list;
677 /* primary NID of the peer */
678 struct lnet_nid lp_primary_nid;
680 /* source NID to use during discovery */
681 struct lnet_nid lp_disc_src_nid;
682 /* destination NID to use during discovery */
683 struct lnet_nid lp_disc_dst_nid;
685 /* net to perform discovery on */
686 __u32 lp_disc_net_id;
688 /* CPT of peer_table */
691 /* number of NIDs on this peer */
694 /* # refs from lnet_route::lr_gateway */
698 * peer specific health sensitivity value to decrement peer nis in
699 * this peer with if set to something other than 0
701 __u32 lp_health_sensitivity;
703 /* messages blocking for router credits */
704 struct list_head lp_rtrq;
706 /* routes on this peer */
707 struct list_head lp_routes;
709 /* reference count */
710 atomic_t lp_refcount;
712 /* lock protecting peer state flags and lpni_rtrq */
715 /* peer state flags */
718 /* buffer for data pushed by peer */
719 struct lnet_ping_buffer *lp_data;
721 /* MD handle for ping in progress */
722 struct lnet_handle_md lp_ping_mdh;
724 /* MD handle for push in progress */
725 struct lnet_handle_md lp_push_mdh;
727 /* number of NIDs for sizing push data */
730 /* NI config sequence number of peer */
733 /* Local NI config sequence number acked by peer */
736 /* Local NI config sequence number sent to peer */
737 __u32 lp_node_seqno_sent;
739 /* Ping error encountered during discovery. */
742 /* Push error encountered during discovery. */
745 /* Error encountered during discovery. */
748 /* time it was put on the ln_dc_working queue */
749 time64_t lp_last_queued;
751 /* link on discovery-related lists */
752 struct list_head lp_dc_list;
754 /* tasks waiting on discovery of this peer */
755 wait_queue_head_t lp_dc_waitq;
757 /* cached peer aliveness */
762 * The status flags in lp_state. Their semantics have chosen so that
763 * lp_state can be zero-initialized.
765 * A peer is marked MULTI_RAIL in two cases: it was configured using DLC
766 * as multi-rail aware, or the LNET_PING_FEAT_MULTI_RAIL bit was set.
768 * A peer is marked NO_DISCOVERY if the LNET_PING_FEAT_DISCOVERY bit was
769 * NOT set when the peer was pinged by discovery.
771 * A peer is marked ROUTER if it indicates so in the feature bit.
773 #define LNET_PEER_MULTI_RAIL BIT(0) /* Multi-rail aware */
774 #define LNET_PEER_NO_DISCOVERY BIT(1) /* Peer disabled discovery */
775 #define LNET_PEER_ROUTER_ENABLED BIT(2) /* router feature enabled */
778 * A peer is marked CONFIGURED if it was configured by DLC.
780 * In addition, a peer is marked DISCOVERED if it has fully passed
781 * through Peer Discovery.
783 * When Peer Discovery is disabled, the discovery thread will mark
784 * peers REDISCOVER to indicate that they should be re-examined if
785 * discovery is (re)enabled on the node.
787 * A peer that was created as the result of inbound traffic will not
790 #define LNET_PEER_CONFIGURED BIT(3) /* Configured via DLC */
791 #define LNET_PEER_DISCOVERED BIT(4) /* Peer was discovered */
792 #define LNET_PEER_REDISCOVER BIT(5) /* Discovery was disabled */
794 * A peer is marked DISCOVERING when discovery is in progress.
795 * The other flags below correspond to stages of discovery.
797 #define LNET_PEER_DISCOVERING BIT(6) /* Discovering */
798 #define LNET_PEER_DATA_PRESENT BIT(7) /* Remote peer data present */
799 #define LNET_PEER_NIDS_UPTODATE BIT(8) /* Remote peer info uptodate */
800 #define LNET_PEER_PING_SENT BIT(9) /* Waiting for REPLY to Ping */
801 #define LNET_PEER_PUSH_SENT BIT(10) /* Waiting for ACK of Push */
802 #define LNET_PEER_PING_FAILED BIT(11) /* Ping send failure */
803 #define LNET_PEER_PUSH_FAILED BIT(12) /* Push send failure */
805 * A ping can be forced as a way to fix up state, or as a manual
806 * intervention by an admin.
807 * A push can be forced in circumstances that would normally not
808 * allow for one to happen.
810 #define LNET_PEER_FORCE_PING BIT(13) /* Forced Ping */
811 #define LNET_PEER_FORCE_PUSH BIT(14) /* Forced Push */
813 /* force delete even if router */
814 #define LNET_PEER_RTR_NI_FORCE_DEL BIT(15)
816 /* gw undergoing alive discovery */
817 #define LNET_PEER_RTR_DISCOVERY BIT(16)
818 /* gw has undergone discovery (does not indicate success or failure) */
819 #define LNET_PEER_RTR_DISCOVERED BIT(17)
821 /* peer is marked for deletion */
822 #define LNET_PEER_MARK_DELETION BIT(18)
823 /* lnet_peer_del()/lnet_peer_del_locked() has been called on the peer */
824 #define LNET_PEER_MARK_DELETED BIT(19)
825 /* lock primary NID to what's requested by ULP */
826 #define LNET_PEER_LOCK_PRIMARY BIT(20)
827 /* this is for informational purposes only. It is set if a peer gets
828 * configured from Lustre with a primary NID which belongs to another peer
829 * which is also configured by Lustre as the primary NID.
831 #define LNET_PEER_BAD_CONFIG BIT(21)
833 struct lnet_peer_net {
834 /* chain on lp_peer_nets */
835 struct list_head lpn_peer_nets;
837 /* list of peer_nis on this network */
838 struct list_head lpn_peer_nis;
840 /* pointer to the peer I'm part of */
841 struct lnet_peer *lpn_peer;
846 /* peer net health */
849 /* time of next router ping on this net */
850 time64_t lpn_next_ping;
852 /* selection sequence number */
855 /* relative peer net selection priority */
856 __u32 lpn_sel_priority;
858 /* reference count */
859 atomic_t lpn_refcount;
863 #define LNET_PEER_HASH_BITS 9
864 #define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
867 * peer hash table - one per CPT
869 * protected by lnet_net_lock/EX for update
874 * protected by pt_zombie_lock:
878 * pt_zombie lock nests inside lnet_net_lock
880 struct lnet_peer_table {
881 int pt_version; /* /proc validity stamp */
882 struct list_head *pt_hash; /* NID->peer hash */
883 struct list_head pt_peer_list; /* peers */
884 int pt_peers; /* # peers */
885 struct list_head pt_zombie_list; /* zombie peer_ni */
886 int pt_zombies; /* # zombie peers_ni */
887 spinlock_t pt_zombie_lock; /* protect list and count */
890 /* peer aliveness is enabled only on routers for peers in a network where the
891 * struct lnet_ni::ni_peertimeout has been set to a positive value
893 #define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
894 ((lp)->lpni_net) && \
895 (lp)->lpni_net->net_tunables.lct_peer_timeout > 0)
898 struct list_head lr_list; /* chain on net */
899 struct list_head lr_gwlist; /* chain on gateway */
900 struct lnet_peer *lr_gateway; /* router node */
901 struct lnet_nid lr_nid; /* NID used to add route */
902 __u32 lr_net; /* remote network number */
903 __u32 lr_lnet; /* local network number */
904 int lr_seq; /* sequence for round-robin */
905 __u32 lr_hops; /* how far I am */
906 unsigned int lr_priority; /* route priority */
907 atomic_t lr_alive; /* cached route aliveness */
908 bool lr_single_hop; /* this route is single-hop */
911 #define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
912 #define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
913 #define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
915 struct lnet_remotenet {
916 /* chain on ln_remote_nets_hash */
917 struct list_head lrn_list;
919 struct list_head lrn_routes;
924 /** lnet message has credit and can be submitted to lnd for send/receive */
925 #define LNET_CREDIT_OK 0
926 /** lnet message is waiting for credit */
927 #define LNET_CREDIT_WAIT 1
928 /** lnet message is waiting for discovery */
929 #define LNET_DC_WAIT 2
931 struct lnet_rtrbufpool {
932 /* my free buffer pool */
933 struct list_head rbp_bufs;
934 /* messages blocking for a buffer */
935 struct list_head rbp_msgs;
936 /* # pages in each buffer */
938 /* requested number of buffers */
939 int rbp_req_nbuffers;
940 /* # buffers actually allocated */
942 /* # free buffers / blocked messages */
949 struct list_head rb_list; /* chain on rbp_bufs */
950 struct lnet_rtrbufpool *rb_pool; /* owning pool */
951 struct bio_vec rb_kiov[0]; /* the buffer space */
954 #define LNET_PEER_HASHSIZE 503 /* prime! */
956 enum lnet_match_flags {
957 /* Didn't match anything */
958 LNET_MATCHMD_NONE = BIT(0),
960 LNET_MATCHMD_OK = BIT(1),
961 /* Must be discarded */
962 LNET_MATCHMD_DROP = BIT(2),
963 /* match and buffer is exhausted */
964 LNET_MATCHMD_EXHAUSTED = BIT(3),
966 LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
969 /* Options for struct lnet_portal::ptl_options */
970 #define LNET_PTL_LAZY BIT(0)
971 #define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
972 #define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
974 /* parameter for matching operations (GET, PUT) */
975 struct lnet_match_info {
977 struct lnet_processid mi_id;
980 unsigned int mi_portal;
981 unsigned int mi_rlength;
982 unsigned int mi_roffset;
985 /* ME hash of RDMA portal */
986 #define LNET_MT_HASH_BITS 8
987 #define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
988 #define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
989 /* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
990 * the last entry is reserved for MEs with ignore-bits */
991 #define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
992 /* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
993 * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
994 * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
995 #define LNET_MT_BITS_U64 6 /* 2^6 bits */
996 #define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
997 #define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
999 /* portal match table */
1000 struct lnet_match_table {
1001 /* reserved for upcoming patches, CPU partition ID */
1002 unsigned int mt_cpt;
1003 unsigned int mt_portal; /* portal index */
1004 /* match table is set as "enabled" if there's non-exhausted MD
1005 * attached on mt_mhash, it's only valid for wildcard portal */
1006 unsigned int mt_enabled;
1007 /* bitmap to flag whether MEs on mt_hash are exhausted or not */
1008 __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
1009 struct list_head *mt_mhash; /* matching hash */
1012 /* these are only useful for wildcard portal */
1013 /* Turn off message rotor for wildcard portals */
1014 #define LNET_PTL_ROTOR_OFF 0
1015 /* round-robin dispatch all PUT messages for wildcard portals */
1016 #define LNET_PTL_ROTOR_ON 1
1017 /* round-robin dispatch routed PUT message for wildcard portals */
1018 #define LNET_PTL_ROTOR_RR_RT 2
1019 /* dispatch routed PUT message by hashing source NID for wildcard portals */
1020 #define LNET_PTL_ROTOR_HASH_RT 3
1022 struct lnet_portal {
1023 spinlock_t ptl_lock;
1024 unsigned int ptl_index; /* portal ID, reserved */
1025 /* flags on this portal: lazy, unique... */
1026 unsigned int ptl_options;
1027 /* list of messages which are stealing buffer */
1028 struct list_head ptl_msg_stealing;
1029 /* messages blocking for MD */
1030 struct list_head ptl_msg_delayed;
1031 /* Match table for each CPT */
1032 struct lnet_match_table **ptl_mtables;
1033 /* spread rotor of incoming "PUT" */
1034 unsigned int ptl_rotor;
1035 /* # active entries for this portal */
1037 /* array of active entries' cpu-partition-id */
1041 #define LNET_LH_HASH_BITS 12
1042 #define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
1043 #define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
1045 /* resource container (ME, MD, EQ) */
1046 struct lnet_res_container {
1047 unsigned int rec_type; /* container type */
1048 __u64 rec_lh_cookie; /* cookie generator */
1049 struct list_head rec_active; /* active resource list */
1050 struct list_head *rec_lh_hash; /* handle hash */
1053 /* message container */
1054 struct lnet_msg_container {
1055 int msc_init; /* initialized or not */
1056 /* max # threads finalizing */
1057 int msc_nfinalizers;
1058 /* msgs waiting to complete finalizing */
1059 struct list_head msc_finalizing;
1060 /* msgs waiting to be resent */
1061 struct list_head msc_resending;
1062 struct list_head msc_active; /* active message list */
1063 /* threads doing finalization */
1064 void **msc_finalizers;
1065 /* threads doing resends */
1066 void **msc_resenders;
1069 /* This UDSP structures need to match the user space liblnetconfig structures
1070 * in order for the marshall and unmarshall functions to be common.
1073 /* Net is described as a
1077 struct lnet_ud_net_descr {
1079 struct list_head udn_net_num_range;
1082 /* each NID range is defined as
1084 * 2. address range descriptor
1086 struct lnet_ud_nid_descr {
1087 struct lnet_ud_net_descr ud_net_id;
1088 struct list_head ud_addr_range;
1092 /* a UDSP rule can have up to three user defined NID descriptors
1093 * - src: defines the local NID range for the rule
1094 * - dst: defines the peer NID range for the rule
1095 * - rte: defines the router NID range for the rule
1097 * An action union defines the action to take when the rule
1101 struct list_head udsp_on_list;
1103 struct lnet_ud_nid_descr udsp_src;
1104 struct lnet_ud_nid_descr udsp_dst;
1105 struct lnet_ud_nid_descr udsp_rte;
1106 enum lnet_udsp_action_type udsp_action_type;
1108 __u32 udsp_priority;
1112 /* Peer Discovery states */
1113 #define LNET_DC_STATE_SHUTDOWN 0 /* not started */
1114 #define LNET_DC_STATE_RUNNING 1 /* started up OK */
1115 #define LNET_DC_STATE_STOPPING 2 /* telling thread to stop */
1117 /* Router Checker states */
1118 #define LNET_MT_STATE_SHUTDOWN 0 /* not started */
1119 #define LNET_MT_STATE_RUNNING 1 /* started up OK */
1120 #define LNET_MT_STATE_STOPPING 2 /* telling thread to stop */
1123 #define LNET_STATE_SHUTDOWN 0 /* not started */
1124 #define LNET_STATE_RUNNING 1 /* started up OK */
1125 #define LNET_STATE_STOPPING 2 /* telling thread to stop */
1128 /* CPU partition table of LNet */
1129 struct cfs_cpt_table *ln_cpt_table;
1130 /* number of CPTs in ln_cpt_table */
1131 unsigned int ln_cpt_number;
1132 unsigned int ln_cpt_bits;
1134 /* protect LNet resources (ME/MD/EQ) */
1135 struct cfs_percpt_lock *ln_res_lock;
1138 /* the vector of portals */
1139 struct lnet_portal **ln_portals;
1140 /* percpt MD container */
1141 struct lnet_res_container **ln_md_containers;
1143 /* Event Queue container */
1144 struct lnet_res_container ln_eq_container;
1145 spinlock_t ln_eq_wait_lock;
1147 unsigned int ln_remote_nets_hbits;
1149 /* protect NI, peer table, credits, routers, rtrbuf... */
1150 struct cfs_percpt_lock *ln_net_lock;
1151 /* percpt message containers for active/finalizing/freed message */
1152 struct lnet_msg_container **ln_msg_containers;
1153 struct lnet_counters **ln_counters;
1154 struct lnet_peer_table **ln_peer_tables;
1155 /* list of peer nis not on a local network */
1156 struct list_head ln_remote_peer_ni_list;
1157 /* failure simulation */
1158 struct list_head ln_test_peers;
1159 struct list_head ln_drop_rules;
1160 struct list_head ln_delay_rules;
1162 struct list_head ln_nets;
1163 /* the loopback NI */
1164 struct lnet_ni *ln_loni;
1165 /* network zombie list */
1166 struct list_head ln_net_zombie;
1167 /* resend messages list */
1168 struct list_head ln_msg_resend;
1169 /* spin lock to protect the msg resend list */
1170 spinlock_t ln_msg_resend_lock;
1172 /* remote networks with routes to them */
1173 struct list_head *ln_remote_nets_hash;
1174 /* validity stamp */
1175 __u64 ln_remote_nets_version;
1176 /* list of all known routers */
1177 struct list_head ln_routers;
1178 /* validity stamp */
1179 __u64 ln_routers_version;
1180 /* percpt router buffer pools */
1181 struct lnet_rtrbufpool **ln_rtrpools;
1184 * Ping target / Push source
1186 * The ping target and push source share a single buffer. The
1187 * ln_ping_target is protected against concurrent updates by
1190 struct lnet_handle_md ln_ping_target_md;
1191 lnet_handler_t ln_ping_target_handler;
1192 struct lnet_ping_buffer *ln_ping_target;
1193 atomic_t ln_ping_target_seqno;
1198 * ln_push_nnis contains the desired size of the push target.
1199 * The lnet_net_lock is used to handle update races. The old
1200 * buffer may linger a while after it has been unlinked, in
1201 * which case the event handler cleans up.
1203 lnet_handler_t ln_push_target_handler;
1204 struct lnet_handle_md ln_push_target_md;
1205 struct lnet_ping_buffer *ln_push_target;
1206 int ln_push_target_nnis;
1208 /* discovery event queue handle */
1209 lnet_handler_t ln_dc_handler;
1210 /* discovery requests */
1211 struct list_head ln_dc_request;
1212 /* discovery working list */
1213 struct list_head ln_dc_working;
1214 /* discovery expired list */
1215 struct list_head ln_dc_expired;
1216 /* discovery thread wait queue */
1217 wait_queue_head_t ln_dc_waitq;
1218 /* discovery startup/shutdown state */
1221 /* monitor thread startup/shutdown state */
1223 /* serialise startup/shutdown */
1224 struct semaphore ln_mt_signal;
1226 struct mutex ln_api_mutex;
1227 struct mutex ln_lnd_mutex;
1228 /* Have I called LNetNIInit myself? */
1230 /* LNetNIInit/LNetNIFini counter */
1232 /* SHUTDOWN/RUNNING/STOPPING */
1235 int ln_routing; /* am I a router? */
1236 lnet_pid_t ln_pid; /* requested pid */
1237 /* uniquely identifies this ni in this epoch */
1238 __u64 ln_interface_cookie;
1239 /* registered LNDs */
1240 const struct lnet_lnd *ln_lnds[NUM_LNDS];
1242 /* test protocol compatibility flags */
1243 unsigned long ln_testprotocompat;
1245 /* 0 - load the NIs from the mod params
1246 * 1 - do not load the NIs from the mod params
1247 * Reverse logic to ensure that other calls to LNetNIInit
1250 bool ln_nis_from_mod_params;
1253 * completion for the monitor thread. The monitor thread takes care of
1254 * checking routes, timedout messages and resending messages.
1256 struct completion ln_mt_wait_complete;
1258 /* per-cpt resend queues */
1259 struct list_head **ln_mt_resendqs;
1260 /* local NIs to recover */
1261 struct list_head ln_mt_localNIRecovq;
1262 /* local NIs to recover */
1263 struct list_head ln_mt_peerNIRecovq;
1265 * An array of queues for GET/PUT waiting for REPLY/ACK respectively.
1266 * There are CPT number of queues. Since response trackers will be
1267 * added on the fast path we can't afford to grab the exclusive
1268 * net lock to protect these queues. The CPT will be calculated
1269 * based on the mdh cookie.
1271 struct list_head **ln_mt_rstq;
1273 * A response tracker becomes a zombie when the associated MD is queued
1274 * for unlink before the response tracker is detached from the MD. An
1275 * entry on a zombie list can be freed when either the remaining
1276 * operations on the MD complete or when LNet has shut down.
1278 struct list_head **ln_mt_zombie_rstqs;
1279 /* recovery handler */
1280 lnet_handler_t ln_mt_handler;
1283 * Completed when the discovery and monitor threads can enter their
1286 struct completion ln_started;
1288 struct list_head ln_udsp_list;
1291 struct genl_filter_list {
1292 struct list_head lp_list;
1297 static const struct nla_policy scalar_attr_policy[LN_SCALAR_MAX + 1] = {
1298 [LN_SCALAR_ATTR_LIST] = { .type = NLA_NESTED },
1299 [LN_SCALAR_ATTR_LIST_SIZE] = { .type = NLA_U16 },
1300 [LN_SCALAR_ATTR_INDEX] = { .type = NLA_U16 },
1301 [LN_SCALAR_ATTR_NLA_TYPE] = { .type = NLA_U16 },
1302 [LN_SCALAR_ATTR_VALUE] = { .type = NLA_STRING },
1303 [LN_SCALAR_ATTR_KEY_FORMAT] = { .type = NLA_U16 },
1306 int lnet_genl_send_scalar_list(struct sk_buff *msg, u32 portid, u32 seq,
1307 const struct genl_family *family, int flags,
1308 u8 cmd, const struct ln_key_list *data[]);
1310 /* Special workaround for pre-4.19 kernels to send error messages
1311 * from dumpit routines. Newer kernels will send message with
1312 * NL_SET_ERR_MSG information by default if NETLINK_EXT_ACK is set.
1314 static inline int lnet_nl_send_error(struct sk_buff *msg, int portid, int seq,
1317 #ifndef HAVE_NL_DUMP_WITH_EXT_ACK
1318 struct nlmsghdr *nlh;
1323 nlh = nlmsg_put(msg, portid, seq, NLMSG_ERROR, sizeof(error), 0);
1326 #ifdef HAVE_NL_PARSE_WITH_EXT_ACK
1327 netlink_ack(msg, nlh, error, NULL);
1329 netlink_ack(msg, nlh, error);
1331 return nlmsg_len(nlh);