4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/include/lnet/lib-types.h
34 * Types used by the library side routines that do not need to be
35 * exposed to the user application
38 #ifndef __LNET_LIB_TYPES_H__
39 #define __LNET_LIB_TYPES_H__
42 # error This include is only for kernel use.
45 #include <linux/kthread.h>
46 #include <linux/uio.h>
47 #include <linux/semaphore.h>
48 #include <linux/types.h>
50 #include <uapi/linux/lnet/lnet-dlc.h>
51 #include <uapi/linux/lnet/lnetctl.h>
53 /* Max payload size */
54 #define LNET_MAX_PAYLOAD LNET_MTU
56 #define LNET_MAX_IOV (LNET_MAX_PAYLOAD >> PAGE_SHIFT)
59 * This is the maximum health value.
60 * All local and peer NIs created have their health default to this value.
62 #define LNET_MAX_HEALTH_VALUE 1000
67 enum lnet_msg_hstatus {
68 LNET_MSG_STATUS_OK = 0,
69 LNET_MSG_STATUS_LOCAL_INTERRUPT,
70 LNET_MSG_STATUS_LOCAL_DROPPED,
71 LNET_MSG_STATUS_LOCAL_ABORTED,
72 LNET_MSG_STATUS_LOCAL_NO_ROUTE,
73 LNET_MSG_STATUS_LOCAL_ERROR,
74 LNET_MSG_STATUS_LOCAL_TIMEOUT,
75 LNET_MSG_STATUS_REMOTE_ERROR,
76 LNET_MSG_STATUS_REMOTE_DROPPED,
77 LNET_MSG_STATUS_REMOTE_TIMEOUT,
78 LNET_MSG_STATUS_NETWORK_TIMEOUT
82 struct list_head msg_activelist;
83 struct list_head msg_list; /* Q for credits/MD */
85 struct lnet_process_id msg_target;
86 /* Primary NID of the source. */
87 lnet_nid_t msg_initiator;
88 /* where is it from, it's only for building event */
93 * hold parameters in case message is with held due
96 lnet_nid_t msg_src_nid_param;
97 lnet_nid_t msg_rtr_nid_param;
100 * Deadline for the message after which it will be finalized if it
103 ktime_t msg_deadline;
105 /* The message health status. */
106 enum lnet_msg_hstatus msg_health_status;
107 /* This is a recovery message */
109 /* flag to indicate that we do not want to resend this message */
112 /* committed for sending */
113 unsigned int msg_tx_committed:1;
114 /* CPT # this message committed for sending */
115 unsigned int msg_tx_cpt:15;
116 /* committed for receiving */
117 unsigned int msg_rx_committed:1;
118 /* CPT # this message committed for receiving */
119 unsigned int msg_rx_cpt:15;
120 /* queued for tx credit */
121 unsigned int msg_tx_delayed:1;
122 /* queued for RX buffer */
123 unsigned int msg_rx_delayed:1;
124 /* ready for pending on RX delay list */
125 unsigned int msg_rx_ready_delay:1;
127 unsigned int msg_vmflush:1; /* VM trying to free memory */
128 unsigned int msg_target_is_router:1; /* sending to a router */
129 unsigned int msg_routing:1; /* being forwarded */
130 unsigned int msg_ack:1; /* ack on finalize (PUT) */
131 unsigned int msg_sending:1; /* outgoing message */
132 unsigned int msg_receiving:1; /* being received */
133 unsigned int msg_txcredit:1; /* taken an NI send credit */
134 unsigned int msg_peertxcredit:1; /* taken a peer send credit */
135 unsigned int msg_rtrcredit:1; /* taken a globel router credit */
136 unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
137 unsigned int msg_onactivelist:1; /* on the activelist */
138 unsigned int msg_rdma_get:1;
140 struct lnet_peer_ni *msg_txpeer; /* peer I'm sending to */
141 struct lnet_peer_ni *msg_rxpeer; /* peer I received from */
144 struct lnet_libmd *msg_md;
145 /* the NI the message was sent or received over */
146 struct lnet_ni *msg_txni;
147 struct lnet_ni *msg_rxni;
149 unsigned int msg_len;
150 unsigned int msg_wanted;
151 unsigned int msg_offset;
152 unsigned int msg_niov;
153 struct kvec *msg_iov;
154 lnet_kiov_t *msg_kiov;
156 struct lnet_event msg_ev;
157 struct lnet_hdr msg_hdr;
160 struct lnet_libhandle {
161 struct list_head lh_hash_chain;
165 #define lh_entry(ptr, type, member) \
166 ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
169 struct list_head eq_list;
170 struct lnet_libhandle eq_lh;
171 unsigned long eq_enq_seq;
172 unsigned long eq_deq_seq;
173 unsigned int eq_size;
174 lnet_eq_handler_t eq_callback;
175 struct lnet_event *eq_events;
176 int **eq_refs; /* percpt refcount for EQ */
180 struct list_head me_list;
181 struct lnet_libhandle me_lh;
182 struct lnet_process_id me_match_id;
183 unsigned int me_portal;
184 unsigned int me_pos; /* hash offset in mt_hash */
186 __u64 me_ignore_bits;
187 enum lnet_unlink me_unlink;
188 struct lnet_libmd *me_md;
192 struct list_head md_list;
193 struct lnet_libhandle md_lh;
194 struct lnet_me *md_me;
196 unsigned int md_offset;
197 unsigned int md_length;
198 unsigned int md_max_size;
201 unsigned int md_options;
202 unsigned int md_flags;
203 unsigned int md_niov; /* # frags at end of struct */
205 struct lnet_eq *md_eq;
206 struct lnet_handle_md md_bulk_handle;
208 struct kvec iov[LNET_MAX_IOV];
209 lnet_kiov_t kiov[LNET_MAX_IOV];
213 #define LNET_MD_FLAG_ZOMBIE (1 << 0)
214 #define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
215 #define LNET_MD_FLAG_ABORTED (1 << 2)
217 struct lnet_test_peer {
218 /* info about peers we are trying to fail */
219 struct list_head tp_list; /* ln_test_peers */
220 lnet_nid_t tp_nid; /* matching nid */
221 unsigned int tp_threshold; /* # failures to simulate */
224 #define LNET_COOKIE_TYPE_MD 1
225 #define LNET_COOKIE_TYPE_ME 2
226 #define LNET_COOKIE_TYPE_EQ 3
227 #define LNET_COOKIE_TYPE_BITS 2
228 #define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
230 struct lnet_ni; /* forward ref */
234 /* fields managed by portals */
235 struct list_head lnd_list; /* stash in the LND table */
236 int lnd_refcount; /* # active instances */
238 /* fields initialized by the LND */
241 int (*lnd_startup)(struct lnet_ni *ni);
242 void (*lnd_shutdown)(struct lnet_ni *ni);
243 int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
245 /* In data movement APIs below, payload buffers are described as a set
246 * of 'niov' fragments which are...
248 * in virtual memory (struct kvec *iov != NULL)
250 * in pages (kernel only: plt_kiov_t *kiov != NULL).
251 * The LND may NOT overwrite these fragment descriptors.
252 * An 'offset' and may specify a byte offset within the set of
253 * fragments to start from
256 /* Start sending a preformatted message. 'private' is NULL for PUT and
257 * GET messages; otherwise this is a response to an incoming message
258 * and 'private' is the 'private' passed to lnet_parse(). Return
259 * non-zero for immediate failure, otherwise complete later with
261 int (*lnd_send)(struct lnet_ni *ni, void *private,
262 struct lnet_msg *msg);
264 /* Start receiving 'mlen' bytes of payload data, skipping the following
265 * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
266 * lnet_parse(). Return non-zero for immedaite failure, otherwise
267 * complete later with lnet_finalize(). This also gives back a receive
268 * credit if the LND does flow control. */
269 int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
270 int delayed, unsigned int niov,
271 struct kvec *iov, lnet_kiov_t *kiov,
272 unsigned int offset, unsigned int mlen, unsigned int rlen);
274 /* lnet_parse() has had to delay processing of this message
275 * (e.g. waiting for a forwarding buffer or send credits). Give the
276 * LND a chance to free urgently needed resources. If called, return 0
277 * for success and do NOT give back a receive credit; that has to wait
278 * until lnd_recv() gets called. On failure return < 0 and
279 * release resources; lnd_recv() will not be called. */
280 int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
281 struct lnet_msg *msg, void **new_privatep);
283 /* notification of peer health */
284 void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
286 /* query of peer aliveness */
287 void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, time64_t *when);
289 /* accept a new connection */
290 int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
293 struct lnet_tx_queue {
294 int tq_credits; /* # tx credits free */
295 int tq_credits_min; /* lowest it's been */
296 int tq_credits_max; /* total # tx credits */
297 struct list_head tq_delayed; /* delayed TXs */
300 enum lnet_net_state {
301 /* set when net block is allocated */
302 LNET_NET_STATE_INIT = 0,
303 /* set when NIs in net are started successfully */
304 LNET_NET_STATE_ACTIVE,
305 /* set if all NIs in net are in FAILED state */
306 LNET_NET_STATE_INACTIVE,
307 /* set when shutting down a NET */
308 LNET_NET_STATE_DELETING
311 #define LNET_NI_STATE_INIT (1 << 0)
312 #define LNET_NI_STATE_ACTIVE (1 << 1)
313 #define LNET_NI_STATE_FAILED (1 << 2)
314 #define LNET_NI_STATE_RECOVERY_PENDING (1 << 3)
315 #define LNET_NI_STATE_DELETING (1 << 4)
317 enum lnet_stats_type {
318 LNET_STATS_TYPE_SEND = 0,
319 LNET_STATS_TYPE_RECV,
323 struct lnet_comm_count {
324 atomic_t co_get_count;
325 atomic_t co_put_count;
326 atomic_t co_reply_count;
327 atomic_t co_ack_count;
328 atomic_t co_hello_count;
331 struct lnet_element_stats {
332 struct lnet_comm_count el_send_stats;
333 struct lnet_comm_count el_recv_stats;
334 struct lnet_comm_count el_drop_stats;
338 /* chain on the ln_nets */
339 struct list_head net_list;
341 /* net ID, which is composed of
342 * (net_type << 16) | net_num.
343 * net_type can be one of the enumerated types defined in
344 * lnet/include/lnet/nidstr.h */
347 /* priority of the network */
350 /* total number of CPTs in the array */
353 /* cumulative CPTs of all NIs in this net */
356 /* network tunables */
357 struct lnet_ioctl_config_lnd_cmn_tunables net_tunables;
360 * boolean to indicate that the tunables have been set and
363 bool net_tunables_set;
365 /* procedural interface */
366 struct lnet_lnd *net_lnd;
368 /* list of NIs on this net */
369 struct list_head net_ni_list;
371 /* list of NIs being added, but not started yet */
372 struct list_head net_ni_added;
374 /* dying LND instances */
375 struct list_head net_ni_zombie;
378 enum lnet_net_state net_state;
382 /* chain on the lnet_net structure */
383 struct list_head ni_netlist;
385 /* chain on net_ni_cpt */
386 struct list_head ni_cptlist;
388 /* chain on the recovery queue */
389 struct list_head ni_recovery;
391 /* MD handle for recovery ping */
392 struct lnet_handle_md ni_ping_mdh;
399 /* bond NI on some CPTs */
402 /* interface's NID */
405 /* instance-specific data */
409 atomic_t ni_tx_credits;
411 /* percpt TX queues */
412 struct lnet_tx_queue **ni_tx_queues;
414 /* percpt reference count */
417 /* when I was last alive */
418 time64_t ni_last_alive;
420 /* pointer to parent network */
421 struct lnet_net *ni_net;
423 /* my health status */
424 struct lnet_ni_status *ni_status;
429 /* per NI LND tunables */
430 struct lnet_lnd_tunables ni_lnd_tunables;
432 /* lnd tunables set explicitly */
433 bool ni_lnd_tunables_set;
436 struct lnet_element_stats ni_stats;
438 /* physical device CPT */
441 /* sequence number used to round robin over nis within a net */
446 * initialized to LNET_MAX_HEALTH_VALUE
447 * Value is decremented every time we fail to send a message over
448 * this NI because of a NI specific failure.
449 * Value is incremented if we successfully send a message.
454 * equivalent interfaces to use
455 * This is an array because socklnd bonding can still be configured
457 char *ni_interfaces[LNET_INTERFACES_NUM];
458 struct net *ni_net_ns; /* original net namespace */
461 #define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
464 * Descriptor of a ping info buffer: keep a separate indicator of the
465 * size and a reference count. The type is used both as a source and
466 * sink of data, so we need to keep some information outside of the
467 * area that may be overwritten by network data.
469 struct lnet_ping_buffer {
472 struct lnet_ping_info pb_info;
475 #define LNET_PING_BUFFER_SIZE(NNIDS) \
476 offsetof(struct lnet_ping_buffer, pb_info.pi_ni[NNIDS])
477 #define LNET_PING_BUFFER_LONI(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_nid)
478 #define LNET_PING_BUFFER_SEQNO(PBUF) ((PBUF)->pb_info.pi_ni[0].ns_status)
480 #define LNET_PING_INFO_TO_BUFFER(PINFO) \
481 container_of((PINFO), struct lnet_ping_buffer, pb_info)
483 /* router checker data, per router */
484 struct lnet_rc_data {
485 /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
486 struct list_head rcd_list;
487 struct lnet_handle_md rcd_mdh; /* ping buffer MD */
488 struct lnet_peer_ni *rcd_gateway; /* reference to gateway */
489 struct lnet_ping_buffer *rcd_pingbuffer;/* ping buffer */
490 int rcd_nnis; /* desired size of buffer */
493 struct lnet_peer_ni {
494 /* chain on lpn_peer_nis */
495 struct list_head lpni_peer_nis;
496 /* chain on remote peer list */
497 struct list_head lpni_on_remote_peer_ni_list;
498 /* chain on peer hash */
499 struct list_head lpni_hashlist;
500 /* messages blocking for tx credits */
501 struct list_head lpni_txq;
502 /* messages blocking for router credits */
503 struct list_head lpni_rtrq;
504 /* chain on router list */
505 struct list_head lpni_rtr_list;
506 /* pointer to peer net I'm part of */
507 struct lnet_peer_net *lpni_peer_net;
508 /* statistics kept on each peer NI */
509 struct lnet_element_stats lpni_stats;
510 /* spin lock protecting credits and lpni_txq / lpni_rtrq */
511 spinlock_t lpni_lock;
512 /* # tx credits available */
515 int lpni_mintxcredits;
516 /* # router credits */
519 int lpni_minrtrcredits;
520 /* bytes queued for sending */
524 /* notification outstanding? */
526 /* outstanding notification for LND? */
528 /* some thread is handling notification */
530 /* SEND event outstanding from ping */
531 bool lpni_ping_notsent;
532 /* # times router went dead<->alive. Protected with lpni_lock */
533 int lpni_alive_count;
534 /* time of last aliveness news */
535 time64_t lpni_timestamp;
536 /* time of last ping attempt */
537 time64_t lpni_ping_timestamp;
538 /* != 0 if ping reply expected */
539 time64_t lpni_ping_deadline;
540 /* when I was last alive */
541 time64_t lpni_last_alive;
542 /* when lpni_ni was queried last time */
543 time64_t lpni_last_query;
544 /* network peer is on */
545 struct lnet_net *lpni_net;
549 atomic_t lpni_refcount;
550 /* CPT this peer attached on */
552 /* state flags -- protected by lpni_lock */
554 /* # refs from lnet_route_t::lr_gateway */
555 int lpni_rtr_refcount;
556 /* sequence number used to round robin over peer nis within a net */
558 /* sequence number used to round robin over gateways */
562 /* returned RC ping features. Protected with lpni_lock */
563 unsigned int lpni_ping_feats;
564 /* routes on this peer */
565 struct list_head lpni_routes;
566 /* preferred local nids: if only one, use lpni_pref.nid */
571 /* number of preferred NIDs in lnpi_pref_nids */
572 __u32 lpni_pref_nnids;
573 /* router checker state */
574 struct lnet_rc_data *lpni_rcd;
577 /* Preferred path added due to traffic on non-MR peer_ni */
578 #define LNET_PEER_NI_NON_MR_PREF (1 << 0)
581 /* chain on pt_peer_list */
582 struct list_head lp_peer_list;
584 /* list of peer nets */
585 struct list_head lp_peer_nets;
587 /* list of messages pending discovery*/
588 struct list_head lp_dc_pendq;
590 /* primary NID of the peer */
591 lnet_nid_t lp_primary_nid;
593 /* CPT of peer_table */
596 /* number of NIDs on this peer */
599 /* reference count */
600 atomic_t lp_refcount;
602 /* lock protecting peer state flags */
605 /* peer state flags */
608 /* buffer for data pushed by peer */
609 struct lnet_ping_buffer *lp_data;
611 /* MD handle for ping in progress */
612 struct lnet_handle_md lp_ping_mdh;
614 /* MD handle for push in progress */
615 struct lnet_handle_md lp_push_mdh;
617 /* number of NIDs for sizing push data */
620 /* NI config sequence number of peer */
623 /* Local NI config sequence number acked by peer */
626 /* Local NI config sequence number sent to peer */
627 __u32 lp_node_seqno_sent;
629 /* Ping error encountered during discovery. */
632 /* Push error encountered during discovery. */
635 /* Error encountered during discovery. */
638 /* time it was put on the ln_dc_working queue */
639 time64_t lp_last_queued;
641 /* link on discovery-related lists */
642 struct list_head lp_dc_list;
644 /* tasks waiting on discovery of this peer */
645 wait_queue_head_t lp_dc_waitq;
649 * The status flags in lp_state. Their semantics have chosen so that
650 * lp_state can be zero-initialized.
652 * A peer is marked MULTI_RAIL in two cases: it was configured using DLC
653 * as multi-rail aware, or the LNET_PING_FEAT_MULTI_RAIL bit was set.
655 * A peer is marked NO_DISCOVERY if the LNET_PING_FEAT_DISCOVERY bit was
656 * NOT set when the peer was pinged by discovery.
658 #define LNET_PEER_MULTI_RAIL (1 << 0) /* Multi-rail aware */
659 #define LNET_PEER_NO_DISCOVERY (1 << 1) /* Peer disabled discovery */
661 * A peer is marked CONFIGURED if it was configured by DLC.
663 * In addition, a peer is marked DISCOVERED if it has fully passed
664 * through Peer Discovery.
666 * When Peer Discovery is disabled, the discovery thread will mark
667 * peers REDISCOVER to indicate that they should be re-examined if
668 * discovery is (re)enabled on the node.
670 * A peer that was created as the result of inbound traffic will not
673 #define LNET_PEER_CONFIGURED (1 << 2) /* Configured via DLC */
674 #define LNET_PEER_DISCOVERED (1 << 3) /* Peer was discovered */
675 #define LNET_PEER_REDISCOVER (1 << 4) /* Discovery was disabled */
677 * A peer is marked DISCOVERING when discovery is in progress.
678 * The other flags below correspond to stages of discovery.
680 #define LNET_PEER_DISCOVERING (1 << 5) /* Discovering */
681 #define LNET_PEER_DATA_PRESENT (1 << 6) /* Remote peer data present */
682 #define LNET_PEER_NIDS_UPTODATE (1 << 7) /* Remote peer info uptodate */
683 #define LNET_PEER_PING_SENT (1 << 8) /* Waiting for REPLY to Ping */
684 #define LNET_PEER_PUSH_SENT (1 << 9) /* Waiting for ACK of Push */
685 #define LNET_PEER_PING_FAILED (1 << 10) /* Ping send failure */
686 #define LNET_PEER_PUSH_FAILED (1 << 11) /* Push send failure */
688 * A ping can be forced as a way to fix up state, or as a manual
689 * intervention by an admin.
690 * A push can be forced in circumstances that would normally not
691 * allow for one to happen.
693 #define LNET_PEER_FORCE_PING (1 << 12) /* Forced Ping */
694 #define LNET_PEER_FORCE_PUSH (1 << 13) /* Forced Push */
696 struct lnet_peer_net {
697 /* chain on lp_peer_nets */
698 struct list_head lpn_peer_nets;
700 /* list of peer_nis on this network */
701 struct list_head lpn_peer_nis;
703 /* pointer to the peer I'm part of */
704 struct lnet_peer *lpn_peer;
709 /* reference count */
710 atomic_t lpn_refcount;
714 #define LNET_PEER_HASH_BITS 9
715 #define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
718 * peer hash table - one per CPT
720 * protected by lnet_net_lock/EX for update
726 * protected by pt_zombie_lock:
730 * pt_zombie lock nests inside lnet_net_lock
732 struct lnet_peer_table {
733 int pt_version; /* /proc validity stamp */
734 int pt_number; /* # peers_ni extant */
735 struct list_head *pt_hash; /* NID->peer hash */
736 struct list_head pt_peer_list; /* peers */
737 int pt_peers; /* # peers */
738 struct list_head pt_zombie_list; /* zombie peer_ni */
739 int pt_zombies; /* # zombie peers_ni */
740 spinlock_t pt_zombie_lock; /* protect list and count */
743 /* peer aliveness is enabled only on routers for peers in a network where the
744 * struct lnet_ni::ni_peertimeout has been set to a positive value
746 #define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
747 ((lp)->lpni_net) && \
748 (lp)->lpni_net->net_tunables.lct_peer_timeout > 0)
751 struct list_head lr_list; /* chain on net */
752 struct list_head lr_gwlist; /* chain on gateway */
753 struct lnet_peer_ni *lr_gateway; /* router node */
754 __u32 lr_net; /* remote network number */
755 int lr_seq; /* sequence for round-robin */
756 unsigned int lr_downis; /* number of down NIs */
757 __u32 lr_hops; /* how far I am */
758 unsigned int lr_priority; /* route priority */
761 #define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
762 #define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
763 #define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
765 struct lnet_remotenet {
766 /* chain on ln_remote_nets_hash */
767 struct list_head lrn_list;
769 struct list_head lrn_routes;
774 /** lnet message has credit and can be submitted to lnd for send/receive */
775 #define LNET_CREDIT_OK 0
776 /** lnet message is waiting for credit */
777 #define LNET_CREDIT_WAIT 1
778 /** lnet message is waiting for discovery */
779 #define LNET_DC_WAIT 2
781 struct lnet_rtrbufpool {
782 /* my free buffer pool */
783 struct list_head rbp_bufs;
784 /* messages blocking for a buffer */
785 struct list_head rbp_msgs;
786 /* # pages in each buffer */
788 /* requested number of buffers */
789 int rbp_req_nbuffers;
790 /* # buffers actually allocated */
792 /* # free buffers / blocked messages */
799 struct list_head rb_list; /* chain on rbp_bufs */
800 struct lnet_rtrbufpool *rb_pool; /* owning pool */
801 lnet_kiov_t rb_kiov[0]; /* the buffer space */
804 #define LNET_PEER_HASHSIZE 503 /* prime! */
806 enum lnet_match_flags {
807 /* Didn't match anything */
808 LNET_MATCHMD_NONE = (1 << 0),
810 LNET_MATCHMD_OK = (1 << 1),
811 /* Must be discarded */
812 LNET_MATCHMD_DROP = (1 << 2),
813 /* match and buffer is exhausted */
814 LNET_MATCHMD_EXHAUSTED = (1 << 3),
816 LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
819 /* Options for struct lnet_portal::ptl_options */
820 #define LNET_PTL_LAZY (1 << 0)
821 #define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
822 #define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */
824 /* parameter for matching operations (GET, PUT) */
825 struct lnet_match_info {
827 struct lnet_process_id mi_id;
830 unsigned int mi_portal;
831 unsigned int mi_rlength;
832 unsigned int mi_roffset;
835 /* ME hash of RDMA portal */
836 #define LNET_MT_HASH_BITS 8
837 #define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
838 #define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
839 /* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
840 * the last entry is reserved for MEs with ignore-bits */
841 #define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
842 /* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
843 * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
844 * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
845 #define LNET_MT_BITS_U64 6 /* 2^6 bits */
846 #define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
847 #define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
849 /* portal match table */
850 struct lnet_match_table {
851 /* reserved for upcoming patches, CPU partition ID */
853 unsigned int mt_portal; /* portal index */
854 /* match table is set as "enabled" if there's non-exhausted MD
855 * attached on mt_mhash, it's only valid for wildcard portal */
856 unsigned int mt_enabled;
857 /* bitmap to flag whether MEs on mt_hash are exhausted or not */
858 __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
859 struct list_head *mt_mhash; /* matching hash */
862 /* these are only useful for wildcard portal */
863 /* Turn off message rotor for wildcard portals */
864 #define LNET_PTL_ROTOR_OFF 0
865 /* round-robin dispatch all PUT messages for wildcard portals */
866 #define LNET_PTL_ROTOR_ON 1
867 /* round-robin dispatch routed PUT message for wildcard portals */
868 #define LNET_PTL_ROTOR_RR_RT 2
869 /* dispatch routed PUT message by hashing source NID for wildcard portals */
870 #define LNET_PTL_ROTOR_HASH_RT 3
874 unsigned int ptl_index; /* portal ID, reserved */
875 /* flags on this portal: lazy, unique... */
876 unsigned int ptl_options;
877 /* list of messages which are stealing buffer */
878 struct list_head ptl_msg_stealing;
879 /* messages blocking for MD */
880 struct list_head ptl_msg_delayed;
881 /* Match table for each CPT */
882 struct lnet_match_table **ptl_mtables;
883 /* spread rotor of incoming "PUT" */
884 unsigned int ptl_rotor;
885 /* # active entries for this portal */
887 /* array of active entries' cpu-partition-id */
891 #define LNET_LH_HASH_BITS 12
892 #define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
893 #define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
895 /* resource container (ME, MD, EQ) */
896 struct lnet_res_container {
897 unsigned int rec_type; /* container type */
898 __u64 rec_lh_cookie; /* cookie generator */
899 struct list_head rec_active; /* active resource list */
900 struct list_head *rec_lh_hash; /* handle hash */
903 /* message container */
904 struct lnet_msg_container {
905 int msc_init; /* initialized or not */
906 /* max # threads finalizing */
908 /* msgs waiting to complete finalizing */
909 struct list_head msc_finalizing;
910 struct list_head msc_active; /* active message list */
911 /* threads doing finalization */
912 void **msc_finalizers;
915 /* Peer Discovery states */
916 #define LNET_DC_STATE_SHUTDOWN 0 /* not started */
917 #define LNET_DC_STATE_RUNNING 1 /* started up OK */
918 #define LNET_DC_STATE_STOPPING 2 /* telling thread to stop */
920 /* Router Checker states */
921 #define LNET_MT_STATE_SHUTDOWN 0 /* not started */
922 #define LNET_MT_STATE_RUNNING 1 /* started up OK */
923 #define LNET_MT_STATE_STOPPING 2 /* telling thread to stop */
926 #define LNET_STATE_SHUTDOWN 0 /* not started */
927 #define LNET_STATE_RUNNING 1 /* started up OK */
928 #define LNET_STATE_STOPPING 2 /* telling thread to stop */
931 /* CPU partition table of LNet */
932 struct cfs_cpt_table *ln_cpt_table;
933 /* number of CPTs in ln_cpt_table */
934 unsigned int ln_cpt_number;
935 unsigned int ln_cpt_bits;
937 /* protect LNet resources (ME/MD/EQ) */
938 struct cfs_percpt_lock *ln_res_lock;
941 /* the vector of portals */
942 struct lnet_portal **ln_portals;
943 /* percpt ME containers */
944 struct lnet_res_container **ln_me_containers;
945 /* percpt MD container */
946 struct lnet_res_container **ln_md_containers;
948 /* Event Queue container */
949 struct lnet_res_container ln_eq_container;
950 wait_queue_head_t ln_eq_waitq;
951 spinlock_t ln_eq_wait_lock;
953 unsigned int ln_remote_nets_hbits;
955 /* protect NI, peer table, credits, routers, rtrbuf... */
956 struct cfs_percpt_lock *ln_net_lock;
957 /* percpt message containers for active/finalizing/freed message */
958 struct lnet_msg_container **ln_msg_containers;
959 struct lnet_counters **ln_counters;
960 struct lnet_peer_table **ln_peer_tables;
961 /* list of peer nis not on a local network */
962 struct list_head ln_remote_peer_ni_list;
963 /* failure simulation */
964 struct list_head ln_test_peers;
965 struct list_head ln_drop_rules;
966 struct list_head ln_delay_rules;
968 struct list_head ln_nets;
969 /* the loopback NI */
970 struct lnet_ni *ln_loni;
971 /* network zombie list */
972 struct list_head ln_net_zombie;
973 /* resend messages list */
974 struct list_head ln_msg_resend;
975 /* spin lock to protect the msg resend list */
976 spinlock_t ln_msg_resend_lock;
978 /* remote networks with routes to them */
979 struct list_head *ln_remote_nets_hash;
981 __u64 ln_remote_nets_version;
982 /* list of all known routers */
983 struct list_head ln_routers;
985 __u64 ln_routers_version;
986 /* percpt router buffer pools */
987 struct lnet_rtrbufpool **ln_rtrpools;
990 * Ping target / Push source
992 * The ping target and push source share a single buffer. The
993 * ln_ping_target is protected against concurrent updates by
996 struct lnet_handle_md ln_ping_target_md;
997 struct lnet_handle_eq ln_ping_target_eq;
998 struct lnet_ping_buffer *ln_ping_target;
999 atomic_t ln_ping_target_seqno;
1004 * ln_push_nnis contains the desired size of the push target.
1005 * The lnet_net_lock is used to handle update races. The old
1006 * buffer may linger a while after it has been unlinked, in
1007 * which case the event handler cleans up.
1009 struct lnet_handle_eq ln_push_target_eq;
1010 struct lnet_handle_md ln_push_target_md;
1011 struct lnet_ping_buffer *ln_push_target;
1012 int ln_push_target_nnis;
1014 /* discovery event queue handle */
1015 struct lnet_handle_eq ln_dc_eqh;
1016 /* discovery requests */
1017 struct list_head ln_dc_request;
1018 /* discovery working list */
1019 struct list_head ln_dc_working;
1020 /* discovery expired list */
1021 struct list_head ln_dc_expired;
1022 /* discovery thread wait queue */
1023 wait_queue_head_t ln_dc_waitq;
1024 /* discovery startup/shutdown state */
1027 /* monitor thread startup/shutdown state */
1029 /* router checker's event queue */
1030 struct lnet_handle_eq ln_rc_eqh;
1031 /* rcd still pending on net */
1032 struct list_head ln_rcd_deathrow;
1033 /* rcd ready for free */
1034 struct list_head ln_rcd_zombie;
1035 /* serialise startup/shutdown */
1036 struct semaphore ln_mt_signal;
1038 struct mutex ln_api_mutex;
1039 struct mutex ln_lnd_mutex;
1040 /* Have I called LNetNIInit myself? */
1042 /* LNetNIInit/LNetNIFini counter */
1044 /* SHUTDOWN/RUNNING/STOPPING */
1047 int ln_routing; /* am I a router? */
1048 lnet_pid_t ln_pid; /* requested pid */
1049 /* uniquely identifies this ni in this epoch */
1050 __u64 ln_interface_cookie;
1051 /* registered LNDs */
1052 struct list_head ln_lnds;
1054 /* test protocol compatibility flags */
1055 int ln_testprotocompat;
1057 /* 0 - load the NIs from the mod params
1058 * 1 - do not load the NIs from the mod params
1059 * Reverse logic to ensure that other calls to LNetNIInit
1062 bool ln_nis_from_mod_params;
1065 * waitq for the monitor thread. The monitor thread takes care of
1066 * checking routes, timedout messages and resending messages.
1068 wait_queue_head_t ln_mt_waitq;
1070 /* per-cpt resend queues */
1071 struct list_head **ln_mt_resendqs;
1072 /* local NIs to recover */
1073 struct list_head ln_mt_localNIRecovq;
1074 /* recovery eq handler */
1075 struct lnet_handle_eq ln_mt_eqh;