4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/include/lnet/lib-types.h
34 * Types used by the library side routines that do not need to be
35 * exposed to the user application
38 #ifndef __LNET_LIB_TYPES_H__
39 #define __LNET_LIB_TYPES_H__
42 # error This include is only for kernel use.
45 #include <linux/kthread.h>
46 #include <linux/uio.h>
47 #include <linux/semaphore.h>
48 #include <linux/types.h>
50 #include <uapi/linux/lnet/lnet-dlc.h>
51 #include <uapi/linux/lnet/lnetctl.h>
52 #include <uapi/linux/lnet/nidstr.h>
54 /* Max payload size */
55 #define LNET_MAX_PAYLOAD LNET_MTU
57 #define LNET_MAX_IOV (LNET_MAX_PAYLOAD >> PAGE_SHIFT)
60 * This is the maximum health value.
61 * All local and peer NIs created have their health default to this value.
63 #define LNET_MAX_HEALTH_VALUE 1000
68 enum lnet_msg_hstatus {
69 LNET_MSG_STATUS_OK = 0,
70 LNET_MSG_STATUS_LOCAL_INTERRUPT,
71 LNET_MSG_STATUS_LOCAL_DROPPED,
72 LNET_MSG_STATUS_LOCAL_ABORTED,
73 LNET_MSG_STATUS_LOCAL_NO_ROUTE,
74 LNET_MSG_STATUS_LOCAL_ERROR,
75 LNET_MSG_STATUS_LOCAL_TIMEOUT,
76 LNET_MSG_STATUS_REMOTE_ERROR,
77 LNET_MSG_STATUS_REMOTE_DROPPED,
78 LNET_MSG_STATUS_REMOTE_TIMEOUT,
79 LNET_MSG_STATUS_NETWORK_TIMEOUT,
83 struct lnet_rsp_tracker {
84 /* chain on the waiting list */
85 struct list_head rspt_on_list;
89 lnet_nid_t rspt_next_hop_nid;
90 /* deadline of the REPLY/ACK */
91 ktime_t rspt_deadline;
93 struct lnet_handle_md rspt_mdh;
97 struct list_head msg_activelist;
98 struct list_head msg_list; /* Q for credits/MD */
100 struct lnet_process_id msg_target;
101 /* Primary NID of the source. */
102 lnet_nid_t msg_initiator;
103 /* where is it from, it's only for building event */
108 * hold parameters in case message is with held due
111 lnet_nid_t msg_src_nid_param;
112 lnet_nid_t msg_rtr_nid_param;
115 * Deadline for the message after which it will be finalized if it
118 ktime_t msg_deadline;
120 /* The message health status. */
121 enum lnet_msg_hstatus msg_health_status;
122 /* This is a recovery message */
124 /* the number of times a transmission has been retried */
126 /* flag to indicate that we do not want to resend this message */
129 /* committed for sending */
130 unsigned int msg_tx_committed:1;
131 /* CPT # this message committed for sending */
132 unsigned int msg_tx_cpt:15;
133 /* committed for receiving */
134 unsigned int msg_rx_committed:1;
135 /* CPT # this message committed for receiving */
136 unsigned int msg_rx_cpt:15;
137 /* queued for tx credit */
138 unsigned int msg_tx_delayed:1;
139 /* queued for RX buffer */
140 unsigned int msg_rx_delayed:1;
141 /* ready for pending on RX delay list */
142 unsigned int msg_rx_ready_delay:1;
144 unsigned int msg_vmflush:1; /* VM trying to free memory */
145 unsigned int msg_target_is_router:1; /* sending to a router */
146 unsigned int msg_routing:1; /* being forwarded */
147 unsigned int msg_ack:1; /* ack on finalize (PUT) */
148 unsigned int msg_sending:1; /* outgoing message */
149 unsigned int msg_receiving:1; /* being received */
150 unsigned int msg_txcredit:1; /* taken an NI send credit */
151 unsigned int msg_peertxcredit:1; /* taken a peer send credit */
152 unsigned int msg_rtrcredit:1; /* taken a globel router credit */
153 unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
154 unsigned int msg_onactivelist:1; /* on the activelist */
155 unsigned int msg_rdma_get:1;
157 struct lnet_peer_ni *msg_txpeer; /* peer I'm sending to */
158 struct lnet_peer_ni *msg_rxpeer; /* peer I received from */
161 struct lnet_libmd *msg_md;
162 /* the NI the message was sent or received over */
163 struct lnet_ni *msg_txni;
164 struct lnet_ni *msg_rxni;
166 unsigned int msg_len;
167 unsigned int msg_wanted;
168 unsigned int msg_offset;
169 unsigned int msg_niov;
170 struct kvec *msg_iov;
171 struct bio_vec *msg_kiov;
173 struct lnet_event msg_ev;
174 struct lnet_hdr msg_hdr;
177 struct lnet_libhandle {
178 struct list_head lh_hash_chain;
182 #define lh_entry(ptr, type, member) \
183 ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
186 lnet_eq_handler_t eq_callback;
187 int **eq_refs; /* percpt refcount for EQ */
191 struct list_head me_list;
193 struct lnet_process_id me_match_id;
194 unsigned int me_portal;
195 unsigned int me_pos; /* hash offset in mt_hash */
197 __u64 me_ignore_bits;
198 enum lnet_unlink me_unlink;
199 struct lnet_libmd *me_md;
203 struct list_head md_list;
204 struct lnet_libhandle md_lh;
205 struct lnet_me *md_me;
207 unsigned int md_offset;
208 unsigned int md_length;
209 unsigned int md_max_size;
212 unsigned int md_options;
213 unsigned int md_flags;
214 unsigned int md_niov; /* # frags at end of struct */
216 struct lnet_rsp_tracker *md_rspt_ptr;
217 struct lnet_eq *md_eq;
218 struct lnet_handle_md md_bulk_handle;
220 struct kvec iov[LNET_MAX_IOV];
221 struct bio_vec kiov[LNET_MAX_IOV];
225 #define LNET_MD_FLAG_ZOMBIE (1 << 0)
226 #define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
227 #define LNET_MD_FLAG_ABORTED (1 << 2)
229 struct lnet_test_peer {
230 /* info about peers we are trying to fail */
231 struct list_head tp_list; /* ln_test_peers */
232 lnet_nid_t tp_nid; /* matching nid */
233 unsigned int tp_threshold; /* # failures to simulate */
236 #define LNET_COOKIE_TYPE_MD 1
237 #define LNET_COOKIE_TYPE_ME 2
238 #define LNET_COOKIE_TYPE_EQ 3
239 #define LNET_COOKIE_TYPE_BITS 2
240 #define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
242 struct lnet_ni; /* forward ref */
246 /* fields initialized by the LND */
249 int (*lnd_startup)(struct lnet_ni *ni);
250 void (*lnd_shutdown)(struct lnet_ni *ni);
251 int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
253 /* In data movement APIs below, payload buffers are described as a set
254 * of 'niov' fragments which are...
256 * in virtual memory (struct kvec *iov != NULL)
258 * in pages (kernel only: plt_kiov_t *kiov != NULL).
259 * The LND may NOT overwrite these fragment descriptors.
260 * An 'offset' and may specify a byte offset within the set of
261 * fragments to start from
264 /* Start sending a preformatted message. 'private' is NULL for PUT and
265 * GET messages; otherwise this is a response to an incoming message
266 * and 'private' is the 'private' passed to lnet_parse(). Return
267 * non-zero for immediate failure, otherwise complete later with
269 int (*lnd_send)(struct lnet_ni *ni, void *private,
270 struct lnet_msg *msg);
272 /* Start receiving 'mlen' bytes of payload data, skipping the following
273 * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
274 * lnet_parse(). Return non-zero for immedaite failure, otherwise
275 * complete later with lnet_finalize(). This also gives back a receive
276 * credit if the LND does flow control. */
277 int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
278 int delayed, unsigned int niov,
279 struct kvec *iov, struct bio_vec *kiov,
280 unsigned int offset, unsigned int mlen, unsigned int rlen);
282 /* lnet_parse() has had to delay processing of this message
283 * (e.g. waiting for a forwarding buffer or send credits). Give the
284 * LND a chance to free urgently needed resources. If called, return 0
285 * for success and do NOT give back a receive credit; that has to wait
286 * until lnd_recv() gets called. On failure return < 0 and
287 * release resources; lnd_recv() will not be called. */
288 int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
289 struct lnet_msg *msg, void **new_privatep);
291 /* notification of peer down */
292 void (*lnd_notify_peer_down)(lnet_nid_t peer);
294 /* accept a new connection */
295 int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
298 struct lnet_tx_queue {
299 int tq_credits; /* # tx credits free */
300 int tq_credits_min; /* lowest it's been */
301 int tq_credits_max; /* total # tx credits */
302 struct list_head tq_delayed; /* delayed TXs */
305 enum lnet_net_state {
306 /* set when net block is allocated */
307 LNET_NET_STATE_INIT = 0,
308 /* set when NIs in net are started successfully */
309 LNET_NET_STATE_ACTIVE,
310 /* set if all NIs in net are in FAILED state */
311 LNET_NET_STATE_INACTIVE,
312 /* set when shutting down a NET */
313 LNET_NET_STATE_DELETING
317 /* initial state when NI is created */
318 LNET_NI_STATE_INIT = 0,
319 /* set when NI is brought up */
320 LNET_NI_STATE_ACTIVE,
321 /* set when NI is being shutdown */
322 LNET_NI_STATE_DELETING,
325 #define LNET_NI_RECOVERY_PENDING BIT(0)
326 #define LNET_NI_RECOVERY_FAILED BIT(1)
328 enum lnet_stats_type {
329 LNET_STATS_TYPE_SEND = 0,
330 LNET_STATS_TYPE_RECV,
334 struct lnet_comm_count {
335 atomic_t co_get_count;
336 atomic_t co_put_count;
337 atomic_t co_reply_count;
338 atomic_t co_ack_count;
339 atomic_t co_hello_count;
342 struct lnet_element_stats {
343 struct lnet_comm_count el_send_stats;
344 struct lnet_comm_count el_recv_stats;
345 struct lnet_comm_count el_drop_stats;
348 struct lnet_health_local_stats {
349 atomic_t hlt_local_interrupt;
350 atomic_t hlt_local_dropped;
351 atomic_t hlt_local_aborted;
352 atomic_t hlt_local_no_route;
353 atomic_t hlt_local_timeout;
354 atomic_t hlt_local_error;
357 struct lnet_health_remote_stats {
358 atomic_t hlt_remote_dropped;
359 atomic_t hlt_remote_timeout;
360 atomic_t hlt_remote_error;
361 atomic_t hlt_network_timeout;
365 /* chain on the ln_nets */
366 struct list_head net_list;
368 /* net ID, which is composed of
369 * (net_type << 16) | net_num.
370 * net_type can be one of the enumerated types defined in
371 * lnet/include/lnet/nidstr.h */
374 /* priority of the network */
377 /* total number of CPTs in the array */
380 /* cumulative CPTs of all NIs in this net */
383 /* network tunables */
384 struct lnet_ioctl_config_lnd_cmn_tunables net_tunables;
387 * boolean to indicate that the tunables have been set and
390 bool net_tunables_set;
392 /* procedural interface */
393 const struct lnet_lnd *net_lnd;
395 /* list of NIs on this net */
396 struct list_head net_ni_list;
398 /* list of NIs being added, but not started yet */
399 struct list_head net_ni_added;
401 /* dying LND instances */
402 struct list_head net_ni_zombie;
404 /* when I was last alive */
405 time64_t net_last_alive;
407 /* protects access to net_last_alive */
412 /* chain on the lnet_net structure */
413 struct list_head ni_netlist;
415 /* chain on the recovery queue */
416 struct list_head ni_recovery;
418 /* MD handle for recovery ping */
419 struct lnet_handle_md ni_ping_mdh;
426 /* bond NI on some CPTs */
429 /* interface's NID */
432 /* instance-specific data */
436 atomic_t ni_tx_credits;
438 /* percpt TX queues */
439 struct lnet_tx_queue **ni_tx_queues;
441 /* percpt reference count */
444 /* pointer to parent network */
445 struct lnet_net *ni_net;
447 /* my health status */
448 struct lnet_ni_status *ni_status;
450 /* NI FSM. Protected by lnet_ni_lock() */
451 enum lnet_ni_state ni_state;
453 /* Recovery state. Protected by lnet_ni_lock() */
454 __u32 ni_recovery_state;
456 /* per NI LND tunables */
457 struct lnet_lnd_tunables ni_lnd_tunables;
459 /* lnd tunables set explicitly */
460 bool ni_lnd_tunables_set;
463 struct lnet_element_stats ni_stats;
464 struct lnet_health_local_stats ni_hstats;
466 /* physical device CPT */
469 /* sequence number used to round robin over nis within a net */
474 * initialized to LNET_MAX_HEALTH_VALUE
475 * Value is decremented every time we fail to send a message over
476 * this NI because of a NI specific failure.
477 * Value is incremented if we successfully send a message.
482 * Set to 1 by the LND when it receives an event telling it the device
483 * has gone into a fatal state. Set to 0 when the LND receives an
484 * even telling it the device is back online.
486 atomic_t ni_fatal_error_on;
489 * equivalent interfaces to use
490 * This is an array because socklnd bonding can still be configured
492 char *ni_interfaces[LNET_INTERFACES_NUM];
493 struct net *ni_net_ns; /* original net namespace */
496 #define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
499 * Descriptor of a ping info buffer: keep a separate indicator of the
500 * size and a reference count. The type is used both as a source and
501 * sink of data, so we need to keep some information outside of the
502 * area that may be overwritten by network data.
504 struct lnet_ping_buffer {
508 struct lnet_ping_info pb_info;
511 #define LNET_PING_BUFFER_SIZE(NNIDS) \
512 offsetof(struct lnet_ping_buffer, pb_info.pi_ni[NNIDS])
513 #define LNET_PING_BUFFER_LONI(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_nid)
514 #define LNET_PING_BUFFER_SEQNO(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_status)
516 #define LNET_PING_INFO_TO_BUFFER(PINFO) \
517 container_of((PINFO), struct lnet_ping_buffer, pb_info)
519 struct lnet_peer_ni {
520 /* chain on lpn_peer_nis */
521 struct list_head lpni_peer_nis;
522 /* chain on remote peer list */
523 struct list_head lpni_on_remote_peer_ni_list;
524 /* chain on recovery queue */
525 struct list_head lpni_recovery;
526 /* chain on peer hash */
527 struct list_head lpni_hashlist;
528 /* messages blocking for tx credits */
529 struct list_head lpni_txq;
530 /* pointer to peer net I'm part of */
531 struct lnet_peer_net *lpni_peer_net;
532 /* statistics kept on each peer NI */
533 struct lnet_element_stats lpni_stats;
534 struct lnet_health_remote_stats lpni_hstats;
535 /* spin lock protecting credits and lpni_txq */
536 spinlock_t lpni_lock;
537 /* # tx credits available */
540 int lpni_mintxcredits;
542 * Each peer_ni in a gateway maintains its own credits. This
543 * allows more traffic to gateways that have multiple interfaces.
545 /* # router credits */
548 int lpni_minrtrcredits;
549 /* bytes queued for sending */
551 /* network peer is on */
552 struct lnet_net *lpni_net;
556 atomic_t lpni_refcount;
557 /* health value for the peer */
558 atomic_t lpni_healthv;
559 /* recovery ping mdh */
560 struct lnet_handle_md lpni_recovery_ping_mdh;
561 /* CPT this peer attached on */
563 /* state flags -- protected by lpni_lock */
565 /* status of the peer NI as reported by the peer */
566 __u32 lpni_ns_status;
567 /* sequence number used to round robin over peer nis within a net */
569 /* sequence number used to round robin over gateways */
571 /* returned RC ping features. Protected with lpni_lock */
572 unsigned int lpni_ping_feats;
573 /* time last message was received from the peer */
574 time64_t lpni_last_alive;
575 /* preferred local nids: if only one, use lpni_pref.nid */
580 /* number of preferred NIDs in lnpi_pref_nids */
581 __u32 lpni_pref_nnids;
584 /* Preferred path added due to traffic on non-MR peer_ni */
585 #define LNET_PEER_NI_NON_MR_PREF (1 << 0)
586 /* peer is being recovered. */
587 #define LNET_PEER_NI_RECOVERY_PENDING (1 << 1)
588 /* recovery ping failed */
589 #define LNET_PEER_NI_RECOVERY_FAILED (1 << 2)
590 /* peer is being deleted */
591 #define LNET_PEER_NI_DELETING (1 << 3)
594 /* chain on pt_peer_list */
595 struct list_head lp_peer_list;
597 /* list of peer nets */
598 struct list_head lp_peer_nets;
600 /* list of messages pending discovery*/
601 struct list_head lp_dc_pendq;
603 /* chain on router list */
604 struct list_head lp_rtr_list;
606 /* primary NID of the peer */
607 lnet_nid_t lp_primary_nid;
609 /* net to perform discovery on */
610 __u32 lp_disc_net_id;
612 /* CPT of peer_table */
615 /* number of NIDs on this peer */
618 /* # refs from lnet_route::lr_gateway */
622 * peer specific health sensitivity value to decrement peer nis in
623 * this peer with if set to something other than 0
625 __u32 lp_health_sensitivity;
627 /* messages blocking for router credits */
628 struct list_head lp_rtrq;
630 /* routes on this peer */
631 struct list_head lp_routes;
633 /* reference count */
634 atomic_t lp_refcount;
636 /* lock protecting peer state flags and lpni_rtrq */
639 /* peer state flags */
642 /* buffer for data pushed by peer */
643 struct lnet_ping_buffer *lp_data;
645 /* MD handle for ping in progress */
646 struct lnet_handle_md lp_ping_mdh;
648 /* MD handle for push in progress */
649 struct lnet_handle_md lp_push_mdh;
651 /* number of NIDs for sizing push data */
654 /* NI config sequence number of peer */
657 /* Local NI config sequence number acked by peer */
660 /* Local NI config sequence number sent to peer */
661 __u32 lp_node_seqno_sent;
663 /* Ping error encountered during discovery. */
666 /* Push error encountered during discovery. */
669 /* Error encountered during discovery. */
672 /* time it was put on the ln_dc_working queue */
673 time64_t lp_last_queued;
675 /* link on discovery-related lists */
676 struct list_head lp_dc_list;
678 /* tasks waiting on discovery of this peer */
679 wait_queue_head_t lp_dc_waitq;
681 /* cached peer aliveness */
686 * The status flags in lp_state. Their semantics have chosen so that
687 * lp_state can be zero-initialized.
689 * A peer is marked MULTI_RAIL in two cases: it was configured using DLC
690 * as multi-rail aware, or the LNET_PING_FEAT_MULTI_RAIL bit was set.
692 * A peer is marked NO_DISCOVERY if the LNET_PING_FEAT_DISCOVERY bit was
693 * NOT set when the peer was pinged by discovery.
695 * A peer is marked ROUTER if it indicates so in the feature bit.
697 #define LNET_PEER_MULTI_RAIL (1 << 0) /* Multi-rail aware */
698 #define LNET_PEER_NO_DISCOVERY (1 << 1) /* Peer disabled discovery */
699 #define LNET_PEER_ROUTER_ENABLED (1 << 2) /* router feature enabled */
702 * A peer is marked CONFIGURED if it was configured by DLC.
704 * In addition, a peer is marked DISCOVERED if it has fully passed
705 * through Peer Discovery.
707 * When Peer Discovery is disabled, the discovery thread will mark
708 * peers REDISCOVER to indicate that they should be re-examined if
709 * discovery is (re)enabled on the node.
711 * A peer that was created as the result of inbound traffic will not
714 #define LNET_PEER_CONFIGURED (1 << 3) /* Configured via DLC */
715 #define LNET_PEER_DISCOVERED (1 << 4) /* Peer was discovered */
716 #define LNET_PEER_REDISCOVER (1 << 5) /* Discovery was disabled */
718 * A peer is marked DISCOVERING when discovery is in progress.
719 * The other flags below correspond to stages of discovery.
721 #define LNET_PEER_DISCOVERING (1 << 6) /* Discovering */
722 #define LNET_PEER_DATA_PRESENT (1 << 7) /* Remote peer data present */
723 #define LNET_PEER_NIDS_UPTODATE (1 << 8) /* Remote peer info uptodate */
724 #define LNET_PEER_PING_SENT (1 << 9) /* Waiting for REPLY to Ping */
725 #define LNET_PEER_PUSH_SENT (1 << 10) /* Waiting for ACK of Push */
726 #define LNET_PEER_PING_FAILED (1 << 11) /* Ping send failure */
727 #define LNET_PEER_PUSH_FAILED (1 << 12) /* Push send failure */
729 * A ping can be forced as a way to fix up state, or as a manual
730 * intervention by an admin.
731 * A push can be forced in circumstances that would normally not
732 * allow for one to happen.
734 #define LNET_PEER_FORCE_PING (1 << 13) /* Forced Ping */
735 #define LNET_PEER_FORCE_PUSH (1 << 14) /* Forced Push */
737 /* force delete even if router */
738 #define LNET_PEER_RTR_NI_FORCE_DEL (1 << 15)
740 /* gw undergoing alive discovery */
741 #define LNET_PEER_RTR_DISCOVERY (1 << 16)
742 /* gw has undergone discovery (does not indicate success or failure) */
743 #define LNET_PEER_RTR_DISCOVERED (1 << 17)
745 /* peer is marked for deletion */
746 #define LNET_PEER_MARK_DELETION (1 << 18)
748 struct lnet_peer_net {
749 /* chain on lp_peer_nets */
750 struct list_head lpn_peer_nets;
752 /* list of peer_nis on this network */
753 struct list_head lpn_peer_nis;
755 /* pointer to the peer I'm part of */
756 struct lnet_peer *lpn_peer;
761 /* peer net health */
764 /* time of last router net check attempt */
765 time64_t lpn_rtrcheck_timestamp;
767 /* selection sequence number */
770 /* reference count */
771 atomic_t lpn_refcount;
775 #define LNET_PEER_HASH_BITS 9
776 #define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
779 * peer hash table - one per CPT
781 * protected by lnet_net_lock/EX for update
786 * protected by pt_zombie_lock:
790 * pt_zombie lock nests inside lnet_net_lock
792 struct lnet_peer_table {
793 int pt_version; /* /proc validity stamp */
794 struct list_head *pt_hash; /* NID->peer hash */
795 struct list_head pt_peer_list; /* peers */
796 int pt_peers; /* # peers */
797 struct list_head pt_zombie_list; /* zombie peer_ni */
798 int pt_zombies; /* # zombie peers_ni */
799 spinlock_t pt_zombie_lock; /* protect list and count */
802 /* peer aliveness is enabled only on routers for peers in a network where the
803 * struct lnet_ni::ni_peertimeout has been set to a positive value
805 #define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
806 ((lp)->lpni_net) && \
807 (lp)->lpni_net->net_tunables.lct_peer_timeout > 0)
810 struct list_head lr_list; /* chain on net */
811 struct list_head lr_gwlist; /* chain on gateway */
812 struct lnet_peer *lr_gateway; /* router node */
813 lnet_nid_t lr_nid; /* NID used to add route */
814 __u32 lr_net; /* remote network number */
815 __u32 lr_lnet; /* local network number */
816 int lr_seq; /* sequence for round-robin */
817 __u32 lr_hops; /* how far I am */
818 unsigned int lr_priority; /* route priority */
819 bool lr_alive; /* cached route aliveness */
820 bool lr_single_hop; /* this route is single-hop */
823 #define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
824 #define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
825 #define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
827 struct lnet_remotenet {
828 /* chain on ln_remote_nets_hash */
829 struct list_head lrn_list;
831 struct list_head lrn_routes;
836 /** lnet message has credit and can be submitted to lnd for send/receive */
837 #define LNET_CREDIT_OK 0
838 /** lnet message is waiting for credit */
839 #define LNET_CREDIT_WAIT 1
840 /** lnet message is waiting for discovery */
841 #define LNET_DC_WAIT 2
843 struct lnet_rtrbufpool {
844 /* my free buffer pool */
845 struct list_head rbp_bufs;
846 /* messages blocking for a buffer */
847 struct list_head rbp_msgs;
848 /* # pages in each buffer */
850 /* requested number of buffers */
851 int rbp_req_nbuffers;
852 /* # buffers actually allocated */
854 /* # free buffers / blocked messages */
861 struct list_head rb_list; /* chain on rbp_bufs */
862 struct lnet_rtrbufpool *rb_pool; /* owning pool */
863 struct bio_vec rb_kiov[0]; /* the buffer space */
866 #define LNET_PEER_HASHSIZE 503 /* prime! */
868 enum lnet_match_flags {
869 /* Didn't match anything */
870 LNET_MATCHMD_NONE = (1 << 0),
872 LNET_MATCHMD_OK = (1 << 1),
873 /* Must be discarded */
874 LNET_MATCHMD_DROP = (1 << 2),
875 /* match and buffer is exhausted */
876 LNET_MATCHMD_EXHAUSTED = (1 << 3),
878 LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
881 /* Options for struct lnet_portal::ptl_options */
882 #define LNET_PTL_LAZY (1 << 0)
883 #define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
884 #define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */
886 /* parameter for matching operations (GET, PUT) */
887 struct lnet_match_info {
889 struct lnet_process_id mi_id;
892 unsigned int mi_portal;
893 unsigned int mi_rlength;
894 unsigned int mi_roffset;
897 /* ME hash of RDMA portal */
898 #define LNET_MT_HASH_BITS 8
899 #define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
900 #define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
901 /* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
902 * the last entry is reserved for MEs with ignore-bits */
903 #define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
904 /* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
905 * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
906 * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
907 #define LNET_MT_BITS_U64 6 /* 2^6 bits */
908 #define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
909 #define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
911 /* portal match table */
912 struct lnet_match_table {
913 /* reserved for upcoming patches, CPU partition ID */
915 unsigned int mt_portal; /* portal index */
916 /* match table is set as "enabled" if there's non-exhausted MD
917 * attached on mt_mhash, it's only valid for wildcard portal */
918 unsigned int mt_enabled;
919 /* bitmap to flag whether MEs on mt_hash are exhausted or not */
920 __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
921 struct list_head *mt_mhash; /* matching hash */
924 /* these are only useful for wildcard portal */
925 /* Turn off message rotor for wildcard portals */
926 #define LNET_PTL_ROTOR_OFF 0
927 /* round-robin dispatch all PUT messages for wildcard portals */
928 #define LNET_PTL_ROTOR_ON 1
929 /* round-robin dispatch routed PUT message for wildcard portals */
930 #define LNET_PTL_ROTOR_RR_RT 2
931 /* dispatch routed PUT message by hashing source NID for wildcard portals */
932 #define LNET_PTL_ROTOR_HASH_RT 3
936 unsigned int ptl_index; /* portal ID, reserved */
937 /* flags on this portal: lazy, unique... */
938 unsigned int ptl_options;
939 /* list of messages which are stealing buffer */
940 struct list_head ptl_msg_stealing;
941 /* messages blocking for MD */
942 struct list_head ptl_msg_delayed;
943 /* Match table for each CPT */
944 struct lnet_match_table **ptl_mtables;
945 /* spread rotor of incoming "PUT" */
946 unsigned int ptl_rotor;
947 /* # active entries for this portal */
949 /* array of active entries' cpu-partition-id */
953 #define LNET_LH_HASH_BITS 12
954 #define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
955 #define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
957 /* resource container (ME, MD, EQ) */
958 struct lnet_res_container {
959 unsigned int rec_type; /* container type */
960 __u64 rec_lh_cookie; /* cookie generator */
961 struct list_head rec_active; /* active resource list */
962 struct list_head *rec_lh_hash; /* handle hash */
965 /* message container */
966 struct lnet_msg_container {
967 int msc_init; /* initialized or not */
968 /* max # threads finalizing */
970 /* msgs waiting to complete finalizing */
971 struct list_head msc_finalizing;
972 /* msgs waiting to be resent */
973 struct list_head msc_resending;
974 struct list_head msc_active; /* active message list */
975 /* threads doing finalization */
976 void **msc_finalizers;
977 /* threads doing resends */
978 void **msc_resenders;
981 /* Peer Discovery states */
982 #define LNET_DC_STATE_SHUTDOWN 0 /* not started */
983 #define LNET_DC_STATE_RUNNING 1 /* started up OK */
984 #define LNET_DC_STATE_STOPPING 2 /* telling thread to stop */
986 /* Router Checker states */
987 #define LNET_MT_STATE_SHUTDOWN 0 /* not started */
988 #define LNET_MT_STATE_RUNNING 1 /* started up OK */
989 #define LNET_MT_STATE_STOPPING 2 /* telling thread to stop */
992 #define LNET_STATE_SHUTDOWN 0 /* not started */
993 #define LNET_STATE_RUNNING 1 /* started up OK */
994 #define LNET_STATE_STOPPING 2 /* telling thread to stop */
997 /* CPU partition table of LNet */
998 struct cfs_cpt_table *ln_cpt_table;
999 /* number of CPTs in ln_cpt_table */
1000 unsigned int ln_cpt_number;
1001 unsigned int ln_cpt_bits;
1003 /* protect LNet resources (ME/MD/EQ) */
1004 struct cfs_percpt_lock *ln_res_lock;
1007 /* the vector of portals */
1008 struct lnet_portal **ln_portals;
1009 /* percpt MD container */
1010 struct lnet_res_container **ln_md_containers;
1012 /* Event Queue container */
1013 struct lnet_res_container ln_eq_container;
1014 spinlock_t ln_eq_wait_lock;
1016 unsigned int ln_remote_nets_hbits;
1018 /* protect NI, peer table, credits, routers, rtrbuf... */
1019 struct cfs_percpt_lock *ln_net_lock;
1020 /* percpt message containers for active/finalizing/freed message */
1021 struct lnet_msg_container **ln_msg_containers;
1022 struct lnet_counters **ln_counters;
1023 struct lnet_peer_table **ln_peer_tables;
1024 /* list of peer nis not on a local network */
1025 struct list_head ln_remote_peer_ni_list;
1026 /* failure simulation */
1027 struct list_head ln_test_peers;
1028 struct list_head ln_drop_rules;
1029 struct list_head ln_delay_rules;
1031 struct list_head ln_nets;
1032 /* the loopback NI */
1033 struct lnet_ni *ln_loni;
1034 /* network zombie list */
1035 struct list_head ln_net_zombie;
1036 /* resend messages list */
1037 struct list_head ln_msg_resend;
1038 /* spin lock to protect the msg resend list */
1039 spinlock_t ln_msg_resend_lock;
1041 /* remote networks with routes to them */
1042 struct list_head *ln_remote_nets_hash;
1043 /* validity stamp */
1044 __u64 ln_remote_nets_version;
1045 /* list of all known routers */
1046 struct list_head ln_routers;
1047 /* validity stamp */
1048 __u64 ln_routers_version;
1049 /* percpt router buffer pools */
1050 struct lnet_rtrbufpool **ln_rtrpools;
1053 * Ping target / Push source
1055 * The ping target and push source share a single buffer. The
1056 * ln_ping_target is protected against concurrent updates by
1059 struct lnet_handle_md ln_ping_target_md;
1060 struct lnet_eq *ln_ping_target_eq;
1061 struct lnet_ping_buffer *ln_ping_target;
1062 atomic_t ln_ping_target_seqno;
1067 * ln_push_nnis contains the desired size of the push target.
1068 * The lnet_net_lock is used to handle update races. The old
1069 * buffer may linger a while after it has been unlinked, in
1070 * which case the event handler cleans up.
1072 struct lnet_eq *ln_push_target_eq;
1073 struct lnet_handle_md ln_push_target_md;
1074 struct lnet_ping_buffer *ln_push_target;
1075 int ln_push_target_nnis;
1077 /* discovery event queue handle */
1078 struct lnet_eq *ln_dc_eq;
1079 /* discovery requests */
1080 struct list_head ln_dc_request;
1081 /* discovery working list */
1082 struct list_head ln_dc_working;
1083 /* discovery expired list */
1084 struct list_head ln_dc_expired;
1085 /* discovery thread wait queue */
1086 wait_queue_head_t ln_dc_waitq;
1087 /* discovery startup/shutdown state */
1090 /* monitor thread startup/shutdown state */
1092 /* serialise startup/shutdown */
1093 struct semaphore ln_mt_signal;
1095 struct mutex ln_api_mutex;
1096 struct mutex ln_lnd_mutex;
1097 /* Have I called LNetNIInit myself? */
1099 /* LNetNIInit/LNetNIFini counter */
1101 /* SHUTDOWN/RUNNING/STOPPING */
1104 int ln_routing; /* am I a router? */
1105 lnet_pid_t ln_pid; /* requested pid */
1106 /* uniquely identifies this ni in this epoch */
1107 __u64 ln_interface_cookie;
1108 /* registered LNDs */
1109 const struct lnet_lnd *ln_lnds[NUM_LNDS];
1111 /* test protocol compatibility flags */
1112 unsigned long ln_testprotocompat;
1114 /* 0 - load the NIs from the mod params
1115 * 1 - do not load the NIs from the mod params
1116 * Reverse logic to ensure that other calls to LNetNIInit
1119 bool ln_nis_from_mod_params;
1122 * completion for the monitor thread. The monitor thread takes care of
1123 * checking routes, timedout messages and resending messages.
1125 struct completion ln_mt_wait_complete;
1127 /* per-cpt resend queues */
1128 struct list_head **ln_mt_resendqs;
1129 /* local NIs to recover */
1130 struct list_head ln_mt_localNIRecovq;
1131 /* local NIs to recover */
1132 struct list_head ln_mt_peerNIRecovq;
1134 * An array of queues for GET/PUT waiting for REPLY/ACK respectively.
1135 * There are CPT number of queues. Since response trackers will be
1136 * added on the fast path we can't afford to grab the exclusive
1137 * net lock to protect these queues. The CPT will be calculated
1138 * based on the mdh cookie.
1140 struct list_head **ln_mt_rstq;
1142 * A response tracker becomes a zombie when the associated MD is queued
1143 * for unlink before the response tracker is detached from the MD. An
1144 * entry on a zombie list can be freed when either the remaining
1145 * operations on the MD complete or when LNet has shut down.
1147 struct list_head **ln_mt_zombie_rstqs;
1148 /* recovery eq handler */
1149 struct lnet_eq *ln_mt_eq;
1152 * Completed when the discovery and monitor threads can enter their
1155 struct completion ln_started;