X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Finclude%2Flnet%2Ftypes.h;h=6b9a9495fc722c0d6784be7878fbf50e6dc36d0a;hp=9f9cd8f8879c843d773ea5810d1c6c55b11789fd;hb=c14ef7b790e1de58ed8757e0406b9dc9fad8029b;hpb=75a8f4b4aa9ad6bf697aedece539e62111e9029a diff --git a/lnet/include/lnet/types.h b/lnet/include/lnet/types.h index 9f9cd8f..6b9a949 100644 --- a/lnet/include/lnet/types.h +++ b/lnet/include/lnet/types.h @@ -15,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,15 +36,16 @@ /** \addtogroup lnet * @{ */ -#include - +#include /** \addtogroup lnet_addr * @{ */ +#define LNET_VERSION "0.7.0" + /** Portal reserved for LNet's own use. * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments. */ -#define LNET_RESERVED_PORTAL 0 +#define LNET_RESERVED_PORTAL 0 /** * Address of an end-point in an LNet network. @@ -66,19 +65,182 @@ typedef __u64 lnet_nid_t; typedef __u32 lnet_pid_t; /** wildcard NID that matches any end-point address */ -#define LNET_NID_ANY ((lnet_nid_t) -1) +#define LNET_NID_ANY ((lnet_nid_t) -1) /** wildcard PID that matches any lnet_pid_t */ -#define LNET_PID_ANY ((lnet_pid_t) -1) - -#ifdef CRAY_XT3 -typedef __u32 lnet_uid_t; -#define LNET_UID_ANY ((lnet_uid_t) -1) -#endif +#define LNET_PID_ANY ((lnet_pid_t) -1) #define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */ #define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */ +#define LNET_PID_LUSTRE 12345 + +/* how an LNET NID encodes net:address */ +/** extract the address part of an lnet_nid_t */ + +static inline __u32 LNET_NIDADDR(lnet_nid_t nid) +{ + return nid & 0xffffffff; +} + +static inline __u32 LNET_NIDNET(lnet_nid_t nid) +{ + return (nid >> 32) & 0xffffffff; +} + +static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr) +{ + return (((__u64)net) << 32) | addr; +} + +static inline __u32 LNET_NETNUM(__u32 net) +{ + return net & 0xffff; +} -#define LNET_TIME_FOREVER (-1) +static inline __u32 LNET_NETTYP(__u32 net) +{ + return (net >> 16) & 0xffff; +} + +static inline __u32 LNET_MKNET(__u32 type, __u32 num) +{ + return (type << 16) | num; +} + +#define WIRE_ATTR __attribute__((packed)) + +/* Packed version of lnet_process_id_t to transfer via network */ +typedef struct lnet_process_id_packed { + lnet_nid_t nid; + lnet_pid_t pid; /* node id / process id */ +} WIRE_ATTR lnet_process_id_packed_t; + +/* The wire handle's interface cookie only matches one network interface in + * one epoch (i.e. new cookie when the interface restarts or the node + * reboots). The object cookie only matches one object on that interface + * during that object's lifetime (i.e. no cookie re-use). */ +typedef struct lnet_handle_wire { + __u64 wh_interface_cookie; + __u64 wh_object_cookie; +} WIRE_ATTR lnet_handle_wire_t; + +typedef enum { + LNET_MSG_ACK = 0, + LNET_MSG_PUT, + LNET_MSG_GET, + LNET_MSG_REPLY, + LNET_MSG_HELLO, +} lnet_msg_type_t; + +/* The variant fields of the portals message header are aligned on an 8 + * byte boundary in the message header. Note that all types used in these + * wire structs MUST be fixed size and the smaller types are placed at the + * end. */ +struct lnet_ack { + struct lnet_handle_wire dst_wmd; + __u64 match_bits; + __u32 mlength; +} WIRE_ATTR; + +struct lnet_put { + struct lnet_handle_wire ack_wmd; + __u64 match_bits; + __u64 hdr_data; + __u32 ptl_index; + __u32 offset; +} WIRE_ATTR; + +struct lnet_get { + struct lnet_handle_wire return_wmd; + __u64 match_bits; + __u32 ptl_index; + __u32 src_offset; + __u32 sink_length; +} WIRE_ATTR; + +struct lnet_reply { + struct lnet_handle_wire dst_wmd; +} WIRE_ATTR; + +struct lnet_hello { + __u64 incarnation; + __u32 type; +} WIRE_ATTR; + +typedef struct lnet_hdr { + lnet_nid_t dest_nid; + lnet_nid_t src_nid; + lnet_pid_t dest_pid; + lnet_pid_t src_pid; + __u32 type; /* lnet_msg_type_t */ + __u32 payload_length; /* payload data to follow */ + /*<------__u64 aligned------->*/ + union { + struct lnet_ack ack; + struct lnet_put put; + struct lnet_get get; + struct lnet_reply reply; + struct lnet_hello hello; + } msg; +} WIRE_ATTR lnet_hdr_t; + +/* A HELLO message contains a magic number and protocol version + * code in the header's dest_nid, the peer's NID in the src_nid, and + * LNET_MSG_HELLO in the type field. All other common fields are zero + * (including payload_size; i.e. no payload). + * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is + * running the same protocol and to find out its NID. These LNDs should + * exchange HELLO messages when a connection is first established. Individual + * LNDs can put whatever else they fancy in lnet_hdr_t::msg. + */ +typedef struct lnet_magicversion { + __u32 magic; /* LNET_PROTO_TCP_MAGIC */ + __u16 version_major; /* increment on incompatible change */ + __u16 version_minor; /* increment on compatible change */ +} WIRE_ATTR lnet_magicversion_t; + +/* PROTO MAGIC for LNDs */ +#define LNET_PROTO_IB_MAGIC 0x0be91b91 +#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */ +#define LNET_PROTO_TCP_MAGIC 0xeebc0ded +#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100 +#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */ + +/* Placeholder for a future "unified" protocol across all LNDs */ +/* Current LNDs that receive a request with this magic will respond + * with a "stub" reply using their current protocol */ +#define LNET_PROTO_MAGIC 0x45726963 /* ! */ + +#define LNET_PROTO_TCP_VERSION_MAJOR 1 +#define LNET_PROTO_TCP_VERSION_MINOR 0 + +/* Acceptor connection request */ +typedef struct lnet_acceptor_connreq { + __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */ + __u32 acr_version; /* protocol version */ + __u64 acr_nid; /* target NID */ +} WIRE_ATTR lnet_acceptor_connreq_t; + +#define LNET_PROTO_ACCEPTOR_VERSION 1 + +typedef struct lnet_counters { + __u32 msgs_alloc; + __u32 msgs_max; + __u32 errors; + __u32 send_count; + __u32 recv_count; + __u32 route_count; + __u32 drop_count; + __u64 send_length; + __u64 recv_length; + __u64 route_length; + __u64 drop_length; +} WIRE_ATTR lnet_counters_t; + +#define LNET_NI_STATUS_UP 0x15aac0de +#define LNET_NI_STATUS_DOWN 0xdeadface +#define LNET_NI_STATUS_INVALID 0x00000000 + +#define LNET_MAX_INTERFACES 16 /** * Objects maintained by the LNet are accessed through handles. Handle types @@ -91,7 +253,7 @@ typedef __u32 lnet_uid_t; * without loss of information. */ typedef struct { - __u64 cookie; + __u64 cookie; } lnet_handle_any_t; typedef lnet_handle_any_t lnet_handle_eq_t; @@ -105,7 +267,7 @@ typedef lnet_handle_any_t lnet_handle_me_t; */ static inline void LNetInvalidateHandle(lnet_handle_any_t *h) { - h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE; + h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE; } /** @@ -115,7 +277,7 @@ static inline void LNetInvalidateHandle(lnet_handle_any_t *h) */ static inline int LNetHandleIsEqual (lnet_handle_any_t h1, lnet_handle_any_t h2) { - return (h1.cookie == h2.cookie); + return (h1.cookie == h2.cookie); } /** @@ -125,17 +287,17 @@ static inline int LNetHandleIsEqual (lnet_handle_any_t h1, lnet_handle_any_t h2) */ static inline int LNetHandleIsInvalid(lnet_handle_any_t h) { - return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie); + return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie); } /** * Global process ID. */ typedef struct { - /** node id */ - lnet_nid_t nid; - /** process id */ - lnet_pid_t pid; + /** node id */ + lnet_nid_t nid; + /** process id */ + lnet_pid_t pid; } lnet_process_id_t; /** @} lnet_addr */ @@ -147,8 +309,8 @@ typedef struct { * automatically (LNET_UNLINK) or not (LNET_RETAIN). */ typedef enum { - LNET_RETAIN = 0, - LNET_UNLINK + LNET_RETAIN = 0, + LNET_UNLINK } lnet_unlink_t; /** @@ -159,8 +321,12 @@ typedef enum { * or after the last item in the list. */ typedef enum { - LNET_INS_BEFORE, - LNET_INS_AFTER + /** insert ME before current position or head of the list */ + LNET_INS_BEFORE, + /** insert ME after current position or tail of the list */ + LNET_INS_AFTER, + /** attach ME at tail of local CPU partition ME list */ + LNET_INS_LOCAL } lnet_ins_pos_t; /** @} lnet_me */ @@ -173,147 +339,147 @@ typedef enum { * are used to initialize memory descriptors. */ typedef struct { - /** - * Specify the memory region associated with the memory descriptor. - * If the options field has: - * - LNET_MD_KIOV bit set: The start field points to the starting - * address of an array of lnet_kiov_t and the length field specifies - * the number of entries in the array. The length can't be bigger - * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based - * fragments that are not necessarily mapped in virtal memory. - * - LNET_MD_IOVEC bit set: The start field points to the starting - * address of an array of struct iovec and the length field specifies - * the number of entries in the array. The length can't be bigger - * than LNET_MAX_IOV. The struct iovec is used to describe fragments - * that have virtual addresses. - * - Otherwise: The memory region is contiguous. The start field - * specifies the starting address for the memory region and the - * length field specifies its length. - * - * When the memory region is fragmented, all fragments but the first - * one must start on page boundary, and all but the last must end on - * page boundary. - */ - void *start; - unsigned int length; - /** - * Specifies the maximum number of operations that can be performed - * on the memory descriptor. An operation is any action that could - * possibly generate an event. In the usual case, the threshold value - * is decremented for each operation on the MD. When the threshold - * drops to zero, the MD becomes inactive and does not respond to - * operations. A threshold value of LNET_MD_THRESH_INF indicates that - * there is no bound on the number of operations that may be applied - * to a MD. - */ - int threshold; - /** - * Specifies the largest incoming request that the memory descriptor - * should respond to. When the unused portion of a MD (length - - * local offset) falls below this value, the MD becomes inactive and - * does not respond to further operations. This value is only used - * if the LNET_MD_MAX_SIZE option is set. - */ - int max_size; - /** - * Specifies the behavior of the memory descriptor. A bitwise OR - * of the following values can be used: - * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD. - * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD. - * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory - * region is provided by the incoming request. By default, the - * offset is maintained locally. When maintained locally, the - * offset is incremented by the length of the request so that - * the next operation (PUT or GET) will access the next part of - * the memory region. Note that only one offset variable exists - * per memory descriptor. If both PUT and GET operations are - * performed on a memory descriptor, the offset is updated each time. - * - LNET_MD_TRUNCATE: The length provided in the incoming request can - * be reduced to match the memory available in the region (determined - * by subtracting the offset from the length of the memory region). - * By default, if the length in the incoming operation is greater - * than the amount of memory available, the operation is rejected. - * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for - * incoming PUT operations, even if requested. By default, - * acknowledgments are sent for PUT operations that request an - * acknowledgment. Acknowledgments are never sent for GET operations. - * The data sent in the REPLY serves as an implicit acknowledgment. - * - LNET_MD_KIOV: The start and length fields specify an array of - * lnet_kiov_t. - * - LNET_MD_IOVEC: The start and length fields specify an array of - * struct iovec. - * - LNET_MD_MAX_SIZE: The max_size field is valid. - * - * Note: - * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather - * capability for memory descriptors. They can't be both set. - * - When LNET_MD_MAX_SIZE is set, the total length of the memory - * region (i.e. sum of all fragment lengths) must not be less than - * \a max_size. - */ - unsigned int options; - /** - * A user-specified value that is associated with the memory - * descriptor. The value does not need to be a pointer, but must fit - * in the space used by a pointer. This value is recorded in events - * associated with operations on this MD. - */ - void *user_ptr; - /** - * A handle for the event queue used to log the operations performed on - * the memory region. If this argument is a NULL handle (i.e. nullified - * by LNetInvalidateHandle()), operations performed on this memory - * descriptor are not logged. - */ - lnet_handle_eq_t eq_handle; + /** + * Specify the memory region associated with the memory descriptor. + * If the options field has: + * - LNET_MD_KIOV bit set: The start field points to the starting + * address of an array of lnet_kiov_t and the length field specifies + * the number of entries in the array. The length can't be bigger + * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based + * fragments that are not necessarily mapped in virtal memory. + * - LNET_MD_IOVEC bit set: The start field points to the starting + * address of an array of struct kvec and the length field specifies + * the number of entries in the array. The length can't be bigger + * than LNET_MAX_IOV. The struct kvec is used to describe fragments + * that have virtual addresses. + * - Otherwise: The memory region is contiguous. The start field + * specifies the starting address for the memory region and the + * length field specifies its length. + * + * When the memory region is fragmented, all fragments but the first + * one must start on page boundary, and all but the last must end on + * page boundary. + */ + void *start; + unsigned int length; + /** + * Specifies the maximum number of operations that can be performed + * on the memory descriptor. An operation is any action that could + * possibly generate an event. In the usual case, the threshold value + * is decremented for each operation on the MD. When the threshold + * drops to zero, the MD becomes inactive and does not respond to + * operations. A threshold value of LNET_MD_THRESH_INF indicates that + * there is no bound on the number of operations that may be applied + * to a MD. + */ + int threshold; + /** + * Specifies the largest incoming request that the memory descriptor + * should respond to. When the unused portion of a MD (length - + * local offset) falls below this value, the MD becomes inactive and + * does not respond to further operations. This value is only used + * if the LNET_MD_MAX_SIZE option is set. + */ + int max_size; + /** + * Specifies the behavior of the memory descriptor. A bitwise OR + * of the following values can be used: + * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD. + * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD. + * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory + * region is provided by the incoming request. By default, the + * offset is maintained locally. When maintained locally, the + * offset is incremented by the length of the request so that + * the next operation (PUT or GET) will access the next part of + * the memory region. Note that only one offset variable exists + * per memory descriptor. If both PUT and GET operations are + * performed on a memory descriptor, the offset is updated each time. + * - LNET_MD_TRUNCATE: The length provided in the incoming request can + * be reduced to match the memory available in the region (determined + * by subtracting the offset from the length of the memory region). + * By default, if the length in the incoming operation is greater + * than the amount of memory available, the operation is rejected. + * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for + * incoming PUT operations, even if requested. By default, + * acknowledgments are sent for PUT operations that request an + * acknowledgment. Acknowledgments are never sent for GET operations. + * The data sent in the REPLY serves as an implicit acknowledgment. + * - LNET_MD_KIOV: The start and length fields specify an array of + * lnet_kiov_t. + * - LNET_MD_IOVEC: The start and length fields specify an array of + * struct iovec. + * - LNET_MD_MAX_SIZE: The max_size field is valid. + * - LNET_MD_BULK_HANDLE: The bulk_handle field is valid. + * + * Note: + * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather + * capability for memory descriptors. They can't be both set. + * - When LNET_MD_MAX_SIZE is set, the total length of the memory + * region (i.e. sum of all fragment lengths) must not be less than + * \a max_size. + */ + unsigned int options; + /** + * A user-specified value that is associated with the memory + * descriptor. The value does not need to be a pointer, but must fit + * in the space used by a pointer. This value is recorded in events + * associated with operations on this MD. + */ + void *user_ptr; + /** + * A handle for the event queue used to log the operations performed on + * the memory region. If this argument is a NULL handle (i.e. nullified + * by LNetInvalidateHandle()), operations performed on this memory + * descriptor are not logged. + */ + lnet_handle_eq_t eq_handle; + /** + * The bulk MD handle which was registered to describe the buffers + * either to be used to transfer data to the peer or receive data + * from the peer. This allows LNet to properly determine the NUMA + * node on which the memory was allocated and use that to select the + * nearest local network interface. This value is only used + * if the LNET_MD_BULK_HANDLE option is set. + */ + lnet_handle_md_t bulk_handle; } lnet_md_t; -/* Max Transfer Unit (minimum supported everywhere) */ -#define LNET_MTU_BITS 20 -#define LNET_MTU (1< (PAGE_SIZE * LNET_MAX_IOV)) -/* PAGE_SIZE is a constant: check with cpp! */ -# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb" -# endif -# endif -#endif +#define LNET_MAX_IOV 256 /** * Options for the MD structure. See lnet_md_t::options. */ -#define LNET_MD_OP_PUT (1 << 0) +#define LNET_MD_OP_PUT (1 << 0) +/** See lnet_md_t::options. */ +#define LNET_MD_OP_GET (1 << 1) /** See lnet_md_t::options. */ -#define LNET_MD_OP_GET (1 << 1) +#define LNET_MD_MANAGE_REMOTE (1 << 2) +/* unused (1 << 3) */ /** See lnet_md_t::options. */ -#define LNET_MD_MANAGE_REMOTE (1 << 2) -/* unused (1 << 3) */ +#define LNET_MD_TRUNCATE (1 << 4) /** See lnet_md_t::options. */ -#define LNET_MD_TRUNCATE (1 << 4) +#define LNET_MD_ACK_DISABLE (1 << 5) /** See lnet_md_t::options. */ -#define LNET_MD_ACK_DISABLE (1 << 5) +#define LNET_MD_IOVEC (1 << 6) /** See lnet_md_t::options. */ -#define LNET_MD_IOVEC (1 << 6) +#define LNET_MD_MAX_SIZE (1 << 7) /** See lnet_md_t::options. */ -#define LNET_MD_MAX_SIZE (1 << 7) +#define LNET_MD_KIOV (1 << 8) /** See lnet_md_t::options. */ -#define LNET_MD_KIOV (1 << 8) +#define LNET_MD_BULK_HANDLE (1 << 9) /* For compatibility with Cray Portals */ -#define LNET_MD_PHYS 0 +#define LNET_MD_PHYS 0 /** Infinite threshold on MD operations. See lnet_md_t::threshold */ -#define LNET_MD_THRESH_INF (-1) +#define LNET_MD_THRESH_INF (-1) /* NB lustre portals uses struct iovec internally! */ typedef struct iovec lnet_md_iovec_t; @@ -322,16 +488,16 @@ typedef struct iovec lnet_md_iovec_t; * A page-based fragment of a MD. */ typedef struct { - /** Pointer to the page where the fragment resides */ - cfs_page_t *kiov_page; - /** Length in bytes of the fragment */ - unsigned int kiov_len; - /** - * Starting offset of the fragment within the page. Note that the - * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= CFS_PAGE_SIZE. - */ - unsigned int kiov_offset; + /** Pointer to the page where the fragment resides */ + struct page *kiov_page; + /** Length in bytes of the fragment */ + unsigned int kiov_len; + /** + * Starting offset of the fragment within the page. Note that the + * end of the fragment must not pass the end of the page; i.e., + * kiov_len + kiov_offset <= PAGE_SIZE. + */ + unsigned int kiov_offset; } lnet_kiov_t; /** @} lnet_md */ @@ -344,131 +510,118 @@ typedef struct { typedef enum { /** An incoming GET operation has completed on the MD. */ LNET_EVENT_GET = 1, - /** - * An incoming PUT operation has completed on the MD. The - * underlying layers will not alter the memory (on behalf of this - * operation) once this event has been logged. - */ - LNET_EVENT_PUT, - /** - * A REPLY operation has completed. This event is logged after the - * data (if any) from the REPLY has been written into the MD. - */ - LNET_EVENT_REPLY, - /** An acknowledgment has been received. */ - LNET_EVENT_ACK, - /** - * An outgoing send (PUT or GET) operation has completed. This event - * is logged after the entire buffer has been sent and it is safe for - * the caller to reuse the buffer. - * - * Note: - * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can - * happen even when the message has not yet been put out on wire. - * - It's unsafe to assume that in an outgoing GET operation - * the LNET_EVENT_SEND event would happen before the - * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and - * LNET_EVENT_ACK events in an outgoing PUT operation. - */ - LNET_EVENT_SEND, - /** - * A MD has been unlinked. Note that LNetMDUnlink() does not - * necessarily trigger an LNET_EVENT_UNLINK event. - * \see LNetMDUnlink - */ - LNET_EVENT_UNLINK, + /** + * An incoming PUT operation has completed on the MD. The + * underlying layers will not alter the memory (on behalf of this + * operation) once this event has been logged. + */ + LNET_EVENT_PUT, + /** + * A REPLY operation has completed. This event is logged after the + * data (if any) from the REPLY has been written into the MD. + */ + LNET_EVENT_REPLY, + /** An acknowledgment has been received. */ + LNET_EVENT_ACK, + /** + * An outgoing send (PUT or GET) operation has completed. This event + * is logged after the entire buffer has been sent and it is safe for + * the caller to reuse the buffer. + * + * Note: + * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can + * happen even when the message has not yet been put out on wire. + * - It's unsafe to assume that in an outgoing GET operation + * the LNET_EVENT_SEND event would happen before the + * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and + * LNET_EVENT_ACK events in an outgoing PUT operation. + */ + LNET_EVENT_SEND, + /** + * A MD has been unlinked. Note that LNetMDUnlink() does not + * necessarily trigger an LNET_EVENT_UNLINK event. + * \see LNetMDUnlink + */ + LNET_EVENT_UNLINK, } lnet_event_kind_t; -#define LNET_SEQ_BASETYPE long +#define LNET_SEQ_BASETYPE long typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t; -#define LNET_SEQ_GT(a,b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0) - -/* XXX - * cygwin need the pragma line, not clear if it's needed in other places. - * checking!!! - */ -#ifdef __CYGWIN__ -#pragma pack(push, 4) -#endif +#define LNET_SEQ_GT(a, b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0) /** * Information about an event on a MD. */ typedef struct { - /** The identifier (nid, pid) of the target. */ - lnet_process_id_t target; - /** The identifier (nid, pid) of the initiator. */ - lnet_process_id_t initiator; - /** - * The NID of the immediate sender. If the request has been forwarded - * by routers, this is the NID of the last hop; otherwise it's the - * same as the initiator. - */ - lnet_nid_t sender; - /** Indicates the type of the event. */ - lnet_event_kind_t type; - /** The portal table index specified in the request */ - unsigned int pt_index; - /** A copy of the match bits specified in the request. */ - __u64 match_bits; - /** The length (in bytes) specified in the request. */ - unsigned int rlength; - /** - * The length (in bytes) of the data that was manipulated by the - * operation. For truncated operations, the manipulated length will be - * the number of bytes specified by the MD (possibly with an offset, - * see lnet_md_t). For all other operations, the manipulated length - * will be the length of the requested operation, i.e. rlength. - */ - unsigned int mlength; - /** - * The handle to the MD associated with the event. The handle may be - * invalid if the MD has been unlinked. - */ - lnet_handle_md_t md_handle; - /** - * A snapshot of the state of the MD immediately after the event has - * been processed. In particular, the threshold field in md will - * reflect the value of the threshold after the operation occurred. - */ - lnet_md_t md; - /** - * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT. - * \see LNetPut - */ - __u64 hdr_data; - /** - * Indicates the completion status of the operation. It's 0 for - * successful operations, otherwise it's an error code. - */ - int status; - /** - * Indicates whether the MD has been unlinked. Note that: - * - An event with unlinked set is the last event on the MD. - * - This field is also set for an explicit LNET_EVENT_UNLINK event. - * \see LNetMDUnlink - */ - int unlinked; - /** - * The displacement (in bytes) into the memory region that the - * operation used. The offset can be determined by the operation for - * a remote managed MD or by the local MD. - * \see lnet_md_t::options - */ - unsigned int offset; -#ifdef CRAY_XT3 - lnet_uid_t uid; -#endif - - /** - * The sequence number for this event. Sequence numbers are unique - * to each event. - */ - volatile lnet_seq_t sequence; + /** The identifier (nid, pid) of the target. */ + lnet_process_id_t target; + /** The identifier (nid, pid) of the initiator. */ + lnet_process_id_t initiator; + /** The source NID on the initiator. */ + lnet_process_id_t source; + /** + * The NID of the immediate sender. If the request has been forwarded + * by routers, this is the NID of the last hop; otherwise it's the + * same as the source. + */ + lnet_nid_t sender; + /** Indicates the type of the event. */ + lnet_event_kind_t type; + /** The portal table index specified in the request */ + unsigned int pt_index; + /** A copy of the match bits specified in the request. */ + __u64 match_bits; + /** The length (in bytes) specified in the request. */ + unsigned int rlength; + /** + * The length (in bytes) of the data that was manipulated by the + * operation. For truncated operations, the manipulated length will be + * the number of bytes specified by the MD (possibly with an offset, + * see lnet_md_t). For all other operations, the manipulated length + * will be the length of the requested operation, i.e. rlength. + */ + unsigned int mlength; + /** + * The handle to the MD associated with the event. The handle may be + * invalid if the MD has been unlinked. + */ + lnet_handle_md_t md_handle; + /** + * A snapshot of the state of the MD immediately after the event has + * been processed. In particular, the threshold field in md will + * reflect the value of the threshold after the operation occurred. + */ + lnet_md_t md; + /** + * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT. + * \see LNetPut + */ + __u64 hdr_data; + /** + * Indicates the completion status of the operation. It's 0 for + * successful operations, otherwise it's an error code. + */ + int status; + /** + * Indicates whether the MD has been unlinked. Note that: + * - An event with unlinked set is the last event on the MD. + * - This field is also set for an explicit LNET_EVENT_UNLINK event. + * \see LNetMDUnlink + */ + int unlinked; + /** + * The displacement (in bytes) into the memory region that the + * operation used. The offset can be determined by the operation for + * a remote managed MD or by the local MD. + * \see lnet_md_t::options + */ + unsigned int offset; + /** + * The sequence number for this event. Sequence numbers are unique + * to each event. + */ + volatile lnet_seq_t sequence; } lnet_event_t; -#ifdef __CYGWIN__ -#pragma pop -#endif /** * Event queue handler function type. @@ -496,10 +649,10 @@ typedef void (*lnet_eq_handler_t)(lnet_event_t *event); * acknowledgments can be disabled for a MD. */ typedef enum { - /** Request an acknowledgment */ - LNET_ACK_REQ, - /** Request that no acknowledgment should be generated. */ - LNET_NOACK_REQ + /** Request an acknowledgment */ + LNET_ACK_REQ, + /** Request that no acknowledgment should be generated. */ + LNET_NOACK_REQ } lnet_ack_req_t; /** @} lnet_data */