1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2012, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/ */
11 #ifndef __UAPI_LNET_TYPES_H__
12 #define __UAPI_LNET_TYPES_H__
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <asm/byteorder.h>
24 #include <linux/lnet/lnet-idl.h>
26 /** \addtogroup lnet_addr
29 #define LNET_VERSION "0.7.0"
31 /** Portal reserved for LNet's own use.
32 * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
34 #define LNET_RESERVED_PORTAL 0
36 /** wildcard NID that matches any end-point address */
37 #define LNET_NID_ANY (~(lnet_nid_t) 0)
38 /** wildcard PID that matches any lnet_pid_t */
39 #define LNET_PID_ANY (~(lnet_pid_t) 0)
41 static inline int LNET_NID_IS_ANY(const struct lnet_nid *nid)
43 /* A NULL pointer can be used to mean "ANY" */
44 return !nid || nid->nid_type == 0xFF;
47 #define LNET_ANY_NID ((struct lnet_nid) \
48 {0xFF, 0xFF, ~0, {~0, ~0, ~0, ~0} })
50 #define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
51 #define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
52 #define LNET_PID_LUSTRE 12345
54 /* how an LNET NID encodes net:address */
55 /** extract the address part of an lnet_nid_t */
57 static inline __u32 LNET_NIDADDR(lnet_nid_t nid)
59 return nid & 0xffffffff;
62 static inline __u32 LNET_NIDNET(lnet_nid_t nid)
64 return (nid >> 32) & 0xffffffff;
67 static inline __u32 LNET_NETNUM(__u32 net)
72 static inline __u32 LNET_NETTYP(__u32 net)
74 return (net >> 16) & 0xff;
77 static inline __u32 LNET_MKNET(__u32 type, __u32 num)
79 return (type << 16) | num;
82 static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr)
84 return (((__u64)net) << 32) | addr;
87 /** The lolnd NID (i.e. myself) */
88 #define LNET_NID_LO_0 LNET_MKNID(LNET_MKNET(LOLND, 0), 0)
90 #define LNET_NET_ANY LNET_NIDNET(LNET_NID_ANY)
92 static inline bool nid_is_nid4(const struct lnet_nid *nid)
94 return NID_ADDR_BYTES(nid) == 4;
97 /* check for address set */
98 static inline bool nid_addr_is_set(const struct lnet_nid *nid)
100 __u8 *addr = (__u8 *)(&nid->nid_addr[0]);
103 for (i = 0; i < NID_ADDR_BYTES(nid); i++)
110 /* LOLND may not be defined yet, so we cannot use an inline */
111 #define nid_is_lo0(__nid) \
112 ((__nid)->nid_type == LOLND && \
113 nid_is_nid4(__nid) && \
114 (__nid)->nid_num == 0 && \
115 (__nid)->nid_addr[0] == 0)
117 static inline __u32 LNET_NID_NET(const struct lnet_nid *nid)
119 if (LNET_NID_IS_ANY(nid))
122 return LNET_MKNET(nid->nid_type, __be16_to_cpu(nid->nid_num));
125 static inline void lnet_nid4_to_nid(lnet_nid_t nid4, struct lnet_nid *nid)
127 if (nid4 == LNET_NID_ANY) {
133 nid->nid_type = LNET_NETTYP(LNET_NIDNET(nid4));
134 nid->nid_num = __cpu_to_be16(LNET_NETNUM(LNET_NIDNET(nid4)));
135 nid->nid_addr[0] = __cpu_to_be32(LNET_NIDADDR(nid4));
136 nid->nid_addr[1] = nid->nid_addr[2] = nid->nid_addr[3] = 0;
139 static inline lnet_nid_t lnet_nid_to_nid4(const struct lnet_nid *nid)
141 if (LNET_NID_IS_ANY(nid))
144 return LNET_MKNID(LNET_NID_NET(nid), __be32_to_cpu(nid->nid_addr[0]));
147 static inline int nid_same(const struct lnet_nid *n1,
148 const struct lnet_nid *n2)
150 return n1->nid_size == n2->nid_size &&
151 n1->nid_type == n2->nid_type &&
152 n1->nid_num == n2->nid_num &&
153 n1->nid_addr[0] == n2->nid_addr[0] &&
154 n1->nid_addr[1] == n2->nid_addr[1] &&
155 n1->nid_addr[2] == n2->nid_addr[2] &&
156 n1->nid_addr[3] == n2->nid_addr[3];
159 /* This can be used when we need to hash a nid */
160 static inline unsigned long nidhash(const struct lnet_nid *nid)
163 unsigned long hash = 0;
165 hash ^= LNET_NID_NET(nid);
166 for (i = 0; i < 4; i++)
167 hash ^= nid->nid_addr[i];
171 struct lnet_counters_health {
173 __u32 lch_resend_count;
174 __u32 lch_response_timeout_count;
175 __u32 lch_local_interrupt_count;
176 __u32 lch_local_dropped_count;
177 __u32 lch_local_aborted_count;
178 __u32 lch_local_no_route_count;
179 __u32 lch_local_timeout_count;
180 __u32 lch_local_error_count;
181 __u32 lch_remote_dropped_count;
182 __u32 lch_remote_error_count;
183 __u32 lch_remote_timeout_count;
184 __u32 lch_network_timeout_count;
187 struct lnet_counters {
188 struct lnet_counters_common lct_common;
189 struct lnet_counters_health lct_health;
193 * This is a hard-coded limit on the number of interfaces supported by
194 * the interface bonding implemented by the ksocknal LND. It must be
195 * defined here because it is used in LNet data structures that are
196 * common to all LNDs.
198 #define LNET_INTERFACES_NUM 16
200 /* The minimum number of interfaces per node supported by LNet. */
201 #define LNET_INTERFACES_MIN 16
202 /* The default - arbitrary - value of the lnet_max_interfaces tunable. */
203 #define LNET_INTERFACES_MAX_DEFAULT 200
206 * Objects maintained by the LNet are accessed through handles. Handle types
207 * have names of the form lnet_handle_xx, where xx is one of the two letter
208 * object type codes ('md' for memory descriptor, and
209 * 'me' for match entry). Each type of object is given a unique handle type
210 * to enhance type checking.
212 #define LNET_WIRE_HANDLE_COOKIE_NONE (~0ULL)
214 struct lnet_handle_md {
219 * Invalidate md handle \a h.
221 static inline void LNetInvalidateMDHandle(struct lnet_handle_md *h)
223 h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
227 * Check whether handler \a h is invalid.
229 * \return 1 if handle is invalid, 0 if valid.
231 static inline int LNetMDHandleIsInvalid(struct lnet_handle_md h)
233 return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
239 struct lnet_process_id {
247 * Global process ID - with large addresses
249 struct lnet_processid {
257 lnet_pid4_to_pid(struct lnet_process_id pid4, struct lnet_processid *pid)
260 lnet_nid4_to_nid(pid4.nid, &pid->nid);
263 static inline struct lnet_process_id
264 lnet_pid_to_pid4(struct lnet_processid *pid)
266 struct lnet_process_id ret;
269 ret.nid = lnet_nid_to_nid4(&pid->nid);
275 /** \addtogroup lnet_me
279 * Specifies whether the match entry or memory descriptor should be unlinked
280 * automatically (LNET_UNLINK) or not (LNET_RETAIN).
288 * Values of the type enum lnet_ins_pos are used to control where a new match
289 * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
290 * entry before the current entry or before the head of the list. The value
291 * LNET_INS_AFTER is used to insert the new entry after the current entry
292 * or after the last item in the list.
295 /** insert ME before current position or head of the list */
297 /** insert ME after current position or tail of the list */
299 /** attach ME at tail of local CPU partition ME list */
305 /** \addtogroup lnet_md
308 struct lnet_hdr_nid16 {
309 char _bytes[sizeof(struct lnet_hdr)];
310 } __attribute__((packed));
313 * Event queue handler function type.
315 * The EQ handler runs for each event that is deposited into the EQ. The
316 * handler is supplied with a pointer to the event that triggered the
317 * handler invocation.
319 * The handler must not block, must be reentrant, and must not call any LNet
320 * API functions. It should return as quickly as possible.
323 typedef void (*lnet_handler_t)(struct lnet_event *event);
326 * Defines the visible parts of a memory descriptor. Values of this type
327 * are used to initialize memory descriptors.
331 * Specify the memory region associated with the memory descriptor.
332 * If the options field has:
333 * - LNET_MD_KIOV bit set: The start field points to the starting
334 * address of an array of struct bio_vec and the length field specifies
335 * the number of entries in the array. The length can't be bigger
336 * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based
337 * fragments that are not necessarily mapped in virtal memory.
338 * - Otherwise: The memory region is contiguous. The start field
339 * specifies the starting address for the memory region and the
340 * length field specifies its length.
342 * When the memory region is fragmented, all fragments but the first
343 * one must start on page boundary, and all but the last must end on
349 * Specifies the maximum number of operations that can be performed
350 * on the memory descriptor. An operation is any action that could
351 * possibly generate an event. In the usual case, the threshold value
352 * is decremented for each operation on the MD. When the threshold
353 * drops to zero, the MD becomes inactive and does not respond to
354 * operations. A threshold value of LNET_MD_THRESH_INF indicates that
355 * there is no bound on the number of operations that may be applied
360 * Specifies the largest incoming request that the memory descriptor
361 * should respond to. When the unused portion of a MD (length -
362 * local offset) falls below this value, the MD becomes inactive and
363 * does not respond to further operations. This value is only used
364 * if the LNET_MD_MAX_SIZE option is set.
368 * Specifies the behavior of the memory descriptor. A bitwise OR
369 * of the following values can be used:
370 * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
371 * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
372 * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
373 * region is provided by the incoming request. By default, the
374 * offset is maintained locally. When maintained locally, the
375 * offset is incremented by the length of the request so that
376 * the next operation (PUT or GET) will access the next part of
377 * the memory region. Note that only one offset variable exists
378 * per memory descriptor. If both PUT and GET operations are
379 * performed on a memory descriptor, the offset is updated each time.
380 * - LNET_MD_TRUNCATE: The length provided in the incoming request can
381 * be reduced to match the memory available in the region (determined
382 * by subtracting the offset from the length of the memory region).
383 * By default, if the length in the incoming operation is greater
384 * than the amount of memory available, the operation is rejected.
385 * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
386 * incoming PUT operations, even if requested. By default,
387 * acknowledgments are sent for PUT operations that request an
388 * acknowledgment. Acknowledgments are never sent for GET operations.
389 * The data sent in the REPLY serves as an implicit acknowledgment.
390 * - LNET_MD_KIOV: The start and length fields specify an array of
392 * - LNET_MD_MAX_SIZE: The max_size field is valid.
393 * - LNET_MD_BULK_HANDLE: The bulk_handle field is valid.
394 * - LNET_MD_TRACK_RESPONSE: Enable response tracking on this MD
395 * regardless of the value of the lnet_response_tracking param.
396 * - LNET_MD_NO_TRACK_RESPONSE: Disable response tracking on this MD
397 * regardless of the value of the lnet_response_tracking param.
398 * - LNET_MD_GNILND: Disable warning about exceeding LNET_MAX_IOV.
401 * - LNET_MD_KIOV allows for a scatter/gather capability for memory
403 * - When LNET_MD_MAX_SIZE is set, the total length of the memory
404 * region (i.e. sum of all fragment lengths) must not be less than
407 unsigned int options;
409 * A user-specified value that is associated with the memory
410 * descriptor. The value does not need to be a pointer, but must fit
411 * in the space used by a pointer. This value is recorded in events
412 * associated with operations on this MD.
416 * The event handler used to log the operations performed on
417 * the memory region. If this argument is NULL operations
418 * performed on this memory descriptor are not logged.
420 lnet_handler_t handler;
422 * The bulk MD handle which was registered to describe the buffers
423 * either to be used to transfer data to the peer or receive data
424 * from the peer. This allows LNet to properly determine the NUMA
425 * node on which the memory was allocated and use that to select the
426 * nearest local network interface. This value is only used
427 * if the LNET_MD_BULK_HANDLE option is set.
429 struct lnet_handle_md bulk_handle;
432 /* Max Transfer Unit (minimum supported everywhere).
433 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
434 * these limits are system wide and not interface-local. */
435 #define LNET_MTU_BITS 20
436 #define LNET_MTU (1 << LNET_MTU_BITS)
439 * Options for the MD structure. See struct lnet_md::options.
441 #define LNET_MD_OP_PUT (1 << 0)
442 /** See struct lnet_md::options. */
443 #define LNET_MD_OP_GET (1 << 1)
444 /** See struct lnet_md::options. */
445 #define LNET_MD_MANAGE_REMOTE (1 << 2)
446 /* unused (1 << 3) */
447 /** See struct lnet_md::options. */
448 #define LNET_MD_TRUNCATE (1 << 4)
449 /** See struct lnet_md::options. */
450 #define LNET_MD_ACK_DISABLE (1 << 5)
451 /** See struct lnet_md::options. */
452 /* deprecated #define LNET_MD_IOVEC (1 << 6) */
453 /** See struct lnet_md::options. */
454 #define LNET_MD_MAX_SIZE (1 << 7)
455 /** See struct lnet_md::options. */
456 #define LNET_MD_KIOV (1 << 8)
457 /** See struct lnet_md::options. */
458 #define LNET_MD_BULK_HANDLE (1 << 9)
459 /** See struct lnet_md::options. */
460 #define LNET_MD_TRACK_RESPONSE (1 << 10)
461 /** See struct lnet_md::options. */
462 #define LNET_MD_NO_TRACK_RESPONSE (1 << 11)
463 /** See struct lnet_md::options. */
464 #define LNET_MD_GNILND (1 << 12)
465 /** Special page mapping handling */
466 #define LNET_MD_GPU_ADDR (1 << 13)
468 /** Infinite threshold on MD operations. See struct lnet_md::threshold */
469 #define LNET_MD_THRESH_INF (-1)
473 /** \addtogroup lnet_eq
477 * Six types of events can be logged in an event queue.
479 enum lnet_event_kind {
480 /** An incoming GET operation has completed on the MD. */
483 * An incoming PUT operation has completed on the MD. The
484 * underlying layers will not alter the memory (on behalf of this
485 * operation) once this event has been logged.
489 * A REPLY operation has completed. This event is logged after the
490 * data (if any) from the REPLY has been written into the MD.
493 /** An acknowledgment has been received. */
496 * An outgoing send (PUT or GET) operation has completed. This event
497 * is logged after the entire buffer has been sent and it is safe for
498 * the caller to reuse the buffer.
501 * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
502 * happen even when the message has not yet been put out on wire.
503 * - It's unsafe to assume that in an outgoing GET operation
504 * the LNET_EVENT_SEND event would happen before the
505 * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
506 * LNET_EVENT_ACK events in an outgoing PUT operation.
510 * A MD has been unlinked. Note that LNetMDUnlink() does not
511 * necessarily trigger an LNET_EVENT_UNLINK event.
517 #define LNET_SEQ_GT(a, b) (((signed long)((a) - (b))) > 0)
520 * Information about an event on a MD.
523 /** The identifier (nid, pid) of the target. */
524 struct lnet_processid target;
525 /** The identifier (nid, pid) of the initiator. */
526 struct lnet_processid initiator;
527 /** The source NID on the initiator. */
528 struct lnet_processid source;
530 * The NID of the immediate sender. If the request has been forwarded
531 * by routers, this is the NID of the last hop; otherwise it's the
532 * same as the source.
534 struct lnet_nid sender;
535 /** Indicates the type of the event. */
536 enum lnet_event_kind type;
537 /** The portal table index specified in the request */
538 unsigned int pt_index;
539 /** A copy of the match bits specified in the request. */
541 /** The length (in bytes) specified in the request. */
542 unsigned int rlength;
544 * The length (in bytes) of the data that was manipulated by the
545 * operation. For truncated operations, the manipulated length will be
546 * the number of bytes specified by the MD (possibly with an offset,
547 * see struct lnet_md). For all other operations, the manipulated length
548 * will be the length of the requested operation, i.e. rlength.
550 unsigned int mlength;
552 * The handle to the MD associated with the event. The handle may be
553 * invalid if the MD has been unlinked.
555 struct lnet_handle_md md_handle;
557 * A snapshot of relevant state of the MD immediately after the event
558 * has been processed.
562 unsigned int md_options;
564 * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
569 * The message type, to ensure a handler for LNET_EVENT_SEND can
570 * distinguish between LNET_MSG_GET and LNET_MSG_PUT.
574 * Indicates the completion status of the operation. It's 0 for
575 * successful operations, otherwise it's an error code.
579 * Indicates whether the MD has been unlinked. Note that:
580 * - An event with unlinked set is the last event on the MD.
581 * - This field is also set for an explicit LNET_EVENT_UNLINK event.
586 * The displacement (in bytes) into the memory region that the
587 * operation used. The offset can be determined by the operation for
588 * a remote managed MD or by the local MD.
589 * \see struct lnet_md::options
593 * The sequence number for this event. Sequence numbers are unique
596 volatile unsigned long sequence;
599 /** \addtogroup lnet_data
603 * Specify whether an acknowledgment should be sent by target when the PUT
604 * operation completes (i.e., when the data has been written to a MD of the
607 * \see struct lnet_md::options for the discussion on LNET_MD_ACK_DISABLE
608 * by which acknowledgments can be disabled for a MD.
611 /** Request an acknowledgment */
613 /** Request that no acknowledgment should be generated. */
618 * UDSP action types. There are two available actions:
619 * 1. PRIORITY - set priority of matching LNet constructs
620 * 2. PREFERRED LIST - set preferred list of matching LNet constructs
622 enum lnet_udsp_action_type {
623 EN_LNET_UDSP_ACTION_NONE = 0,
624 /** assign a priority to matching constructs */
625 EN_LNET_UDSP_ACTION_PRIORITY = 1,
626 /** assign a preferred list of NIDs to matching constructs */
627 EN_LNET_UDSP_ACTION_PREFERRED_LIST = 2,