X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Finclude%2Flnet%2Flib-types.h;h=c4b62223b9ee7ba779f1a2224759f172b80270bf;hb=4072d863c240fa5466f0f616f7e9b1cfcdf0aa0e;hp=cc02d45c3f7fd288abeb8a80851951e7d9f99690;hpb=df3904c3486c71da586d43fbfca8f126b5764838;p=fs%2Flustre-release.git diff --git a/lnet/include/lnet/lib-types.h b/lnet/include/lnet/lib-types.h index cc02d45..c4b6222 100644 --- a/lnet/include/lnet/lib-types.h +++ b/lnet/include/lnet/lib-types.h @@ -49,11 +49,13 @@ #include #include +#include /* Max payload size */ #define LNET_MAX_PAYLOAD LNET_MTU -#define LNET_MAX_IOV (LNET_MAX_PAYLOAD >> PAGE_SHIFT) +/** limit on the number of fragments in discontiguous MDs */ +#define LNET_MAX_IOV 256 /* * This is the maximum health value. @@ -166,8 +168,7 @@ struct lnet_msg { unsigned int msg_wanted; unsigned int msg_offset; unsigned int msg_niov; - struct kvec *msg_iov; - lnet_kiov_t *msg_kiov; + struct bio_vec *msg_kiov; struct lnet_event msg_ev; struct lnet_hdr msg_hdr; @@ -181,20 +182,9 @@ struct lnet_libhandle { #define lh_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(char *)(&((type *)0)->member))) -struct lnet_eq { - struct list_head eq_list; - struct lnet_libhandle eq_lh; - unsigned long eq_enq_seq; - unsigned long eq_deq_seq; - unsigned int eq_size; - lnet_eq_handler_t eq_callback; - struct lnet_event *eq_events; - int **eq_refs; /* percpt refcount for EQ */ -}; - struct lnet_me { struct list_head me_list; - struct lnet_libhandle me_lh; + int me_cpt; struct lnet_process_id me_match_id; unsigned int me_portal; unsigned int me_pos; /* hash offset in mt_hash */ @@ -219,17 +209,14 @@ struct lnet_libmd { unsigned int md_niov; /* # frags at end of struct */ void *md_user_ptr; struct lnet_rsp_tracker *md_rspt_ptr; - struct lnet_eq *md_eq; + lnet_handler_t md_handler; struct lnet_handle_md md_bulk_handle; - union { - struct kvec iov[LNET_MAX_IOV]; - lnet_kiov_t kiov[LNET_MAX_IOV]; - } md_iov; + struct bio_vec md_kiov[LNET_MAX_IOV]; }; -#define LNET_MD_FLAG_ZOMBIE (1 << 0) -#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1) -#define LNET_MD_FLAG_ABORTED (1 << 2) +#define LNET_MD_FLAG_ZOMBIE BIT(0) +#define LNET_MD_FLAG_AUTO_UNLINK BIT(1) +#define LNET_MD_FLAG_ABORTED BIT(2) struct lnet_test_peer { /* info about peers we are trying to fail */ @@ -248,10 +235,6 @@ struct lnet_ni; /* forward ref */ struct socket; struct lnet_lnd { - /* fields managed by portals */ - struct list_head lnd_list; /* stash in the LND table */ - int lnd_refcount; /* # active instances */ - /* fields initialized by the LND */ __u32 lnd_type; @@ -260,11 +243,7 @@ struct lnet_lnd { int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg); /* In data movement APIs below, payload buffers are described as a set - * of 'niov' fragments which are... - * EITHER - * in virtual memory (struct kvec *iov != NULL) - * OR - * in pages (kernel only: plt_kiov_t *kiov != NULL). + * of 'niov' fragments which are in pages. * The LND may NOT overwrite these fragment descriptors. * An 'offset' and may specify a byte offset within the set of * fragments to start from @@ -285,7 +264,7 @@ struct lnet_lnd { * credit if the LND does flow control. */ int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg, int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, + struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); /* lnet_parse() has had to delay processing of this message @@ -300,9 +279,6 @@ struct lnet_lnd { /* notification of peer down */ void (*lnd_notify_peer_down)(lnet_nid_t peer); - /* query of peer aliveness */ - void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, time64_t *when); - /* accept a new connection */ int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock); }; @@ -402,7 +378,7 @@ struct lnet_net { bool net_tunables_set; /* procedural interface */ - struct lnet_lnd *net_lnd; + const struct lnet_lnd *net_lnd; /* list of NIs on this net */ struct list_head net_ni_list; @@ -516,6 +492,7 @@ struct lnet_ni { struct lnet_ping_buffer { int pb_nnis; atomic_t pb_refcnt; + bool pb_needs_post; struct lnet_ping_info pb_info; }; @@ -593,13 +570,13 @@ struct lnet_peer_ni { }; /* Preferred path added due to traffic on non-MR peer_ni */ -#define LNET_PEER_NI_NON_MR_PREF (1 << 0) +#define LNET_PEER_NI_NON_MR_PREF BIT(0) /* peer is being recovered. */ -#define LNET_PEER_NI_RECOVERY_PENDING (1 << 1) +#define LNET_PEER_NI_RECOVERY_PENDING BIT(1) /* recovery ping failed */ -#define LNET_PEER_NI_RECOVERY_FAILED (1 << 2) +#define LNET_PEER_NI_RECOVERY_FAILED BIT(2) /* peer is being deleted */ -#define LNET_PEER_NI_DELETING (1 << 3) +#define LNET_PEER_NI_DELETING BIT(3) struct lnet_peer { /* chain on pt_peer_list */ @@ -617,6 +594,9 @@ struct lnet_peer { /* primary NID of the peer */ lnet_nid_t lp_primary_nid; + /* source NID to use during discovery */ + lnet_nid_t lp_disc_src_nid; + /* net to perform discovery on */ __u32 lp_disc_net_id; @@ -688,6 +668,9 @@ struct lnet_peer { /* tasks waiting on discovery of this peer */ wait_queue_head_t lp_dc_waitq; + + /* cached peer aliveness */ + bool lp_alive; }; /* @@ -702,9 +685,9 @@ struct lnet_peer { * * A peer is marked ROUTER if it indicates so in the feature bit. */ -#define LNET_PEER_MULTI_RAIL (1 << 0) /* Multi-rail aware */ -#define LNET_PEER_NO_DISCOVERY (1 << 1) /* Peer disabled discovery */ -#define LNET_PEER_ROUTER_ENABLED (1 << 2) /* router feature enabled */ +#define LNET_PEER_MULTI_RAIL BIT(0) /* Multi-rail aware */ +#define LNET_PEER_NO_DISCOVERY BIT(1) /* Peer disabled discovery */ +#define LNET_PEER_ROUTER_ENABLED BIT(2) /* router feature enabled */ /* * A peer is marked CONFIGURED if it was configured by DLC. @@ -719,34 +702,39 @@ struct lnet_peer { * A peer that was created as the result of inbound traffic will not * be marked at all. */ -#define LNET_PEER_CONFIGURED (1 << 3) /* Configured via DLC */ -#define LNET_PEER_DISCOVERED (1 << 4) /* Peer was discovered */ -#define LNET_PEER_REDISCOVER (1 << 5) /* Discovery was disabled */ +#define LNET_PEER_CONFIGURED BIT(3) /* Configured via DLC */ +#define LNET_PEER_DISCOVERED BIT(4) /* Peer was discovered */ +#define LNET_PEER_REDISCOVER BIT(5) /* Discovery was disabled */ /* * A peer is marked DISCOVERING when discovery is in progress. * The other flags below correspond to stages of discovery. */ -#define LNET_PEER_DISCOVERING (1 << 6) /* Discovering */ -#define LNET_PEER_DATA_PRESENT (1 << 7) /* Remote peer data present */ -#define LNET_PEER_NIDS_UPTODATE (1 << 8) /* Remote peer info uptodate */ -#define LNET_PEER_PING_SENT (1 << 9) /* Waiting for REPLY to Ping */ -#define LNET_PEER_PUSH_SENT (1 << 10) /* Waiting for ACK of Push */ -#define LNET_PEER_PING_FAILED (1 << 11) /* Ping send failure */ -#define LNET_PEER_PUSH_FAILED (1 << 12) /* Push send failure */ +#define LNET_PEER_DISCOVERING BIT(6) /* Discovering */ +#define LNET_PEER_DATA_PRESENT BIT(7) /* Remote peer data present */ +#define LNET_PEER_NIDS_UPTODATE BIT(8) /* Remote peer info uptodate */ +#define LNET_PEER_PING_SENT BIT(9) /* Waiting for REPLY to Ping */ +#define LNET_PEER_PUSH_SENT BIT(10) /* Waiting for ACK of Push */ +#define LNET_PEER_PING_FAILED BIT(11) /* Ping send failure */ +#define LNET_PEER_PUSH_FAILED BIT(12) /* Push send failure */ /* * A ping can be forced as a way to fix up state, or as a manual * intervention by an admin. * A push can be forced in circumstances that would normally not * allow for one to happen. */ -#define LNET_PEER_FORCE_PING (1 << 13) /* Forced Ping */ -#define LNET_PEER_FORCE_PUSH (1 << 14) /* Forced Push */ +#define LNET_PEER_FORCE_PING BIT(13) /* Forced Ping */ +#define LNET_PEER_FORCE_PUSH BIT(14) /* Forced Push */ /* force delete even if router */ -#define LNET_PEER_RTR_NI_FORCE_DEL (1 << 15) +#define LNET_PEER_RTR_NI_FORCE_DEL BIT(15) /* gw undergoing alive discovery */ -#define LNET_PEER_RTR_DISCOVERY (1 << 16) +#define LNET_PEER_RTR_DISCOVERY BIT(16) +/* gw has undergone discovery (does not indicate success or failure) */ +#define LNET_PEER_RTR_DISCOVERED BIT(17) + +/* peer is marked for deletion */ +#define LNET_PEER_MARK_DELETION BIT(18) struct lnet_peer_net { /* chain on lp_peer_nets */ @@ -761,6 +749,9 @@ struct lnet_peer_net { /* Net ID */ __u32 lpn_net_id; + /* peer net health */ + int lpn_healthv; + /* time of last router net check attempt */ time64_t lpn_rtrcheck_timestamp; @@ -817,6 +808,7 @@ struct lnet_route { __u32 lr_hops; /* how far I am */ unsigned int lr_priority; /* route priority */ bool lr_alive; /* cached route aliveness */ + bool lr_single_hop; /* this route is single-hop */ }; #define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7) @@ -859,28 +851,28 @@ struct lnet_rtrbufpool { struct lnet_rtrbuf { struct list_head rb_list; /* chain on rbp_bufs */ struct lnet_rtrbufpool *rb_pool; /* owning pool */ - lnet_kiov_t rb_kiov[0]; /* the buffer space */ + struct bio_vec rb_kiov[0]; /* the buffer space */ }; #define LNET_PEER_HASHSIZE 503 /* prime! */ enum lnet_match_flags { /* Didn't match anything */ - LNET_MATCHMD_NONE = (1 << 0), + LNET_MATCHMD_NONE = BIT(0), /* Matched OK */ - LNET_MATCHMD_OK = (1 << 1), + LNET_MATCHMD_OK = BIT(1), /* Must be discarded */ - LNET_MATCHMD_DROP = (1 << 2), + LNET_MATCHMD_DROP = BIT(2), /* match and buffer is exhausted */ - LNET_MATCHMD_EXHAUSTED = (1 << 3), + LNET_MATCHMD_EXHAUSTED = BIT(3), /* match or drop */ LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP), }; /* Options for struct lnet_portal::ptl_options */ -#define LNET_PTL_LAZY (1 << 0) -#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */ -#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */ +#define LNET_PTL_LAZY BIT(0) +#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */ +#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */ /* parameter for matching operations (GET, PUT) */ struct lnet_match_info { @@ -1005,14 +997,11 @@ struct lnet { int ln_nportals; /* the vector of portals */ struct lnet_portal **ln_portals; - /* percpt ME containers */ - struct lnet_res_container **ln_me_containers; /* percpt MD container */ struct lnet_res_container **ln_md_containers; /* Event Queue container */ struct lnet_res_container ln_eq_container; - wait_queue_head_t ln_eq_waitq; spinlock_t ln_eq_wait_lock; unsigned int ln_remote_nets_hbits; @@ -1059,7 +1048,7 @@ struct lnet { * ln_api_mutex. */ struct lnet_handle_md ln_ping_target_md; - struct lnet_handle_eq ln_ping_target_eq; + lnet_handler_t ln_ping_target_handler; struct lnet_ping_buffer *ln_ping_target; atomic_t ln_ping_target_seqno; @@ -1071,13 +1060,13 @@ struct lnet { * buffer may linger a while after it has been unlinked, in * which case the event handler cleans up. */ - struct lnet_handle_eq ln_push_target_eq; + lnet_handler_t ln_push_target_handler; struct lnet_handle_md ln_push_target_md; struct lnet_ping_buffer *ln_push_target; int ln_push_target_nnis; /* discovery event queue handle */ - struct lnet_handle_eq ln_dc_eqh; + lnet_handler_t ln_dc_handler; /* discovery requests */ struct list_head ln_dc_request; /* discovery working list */ @@ -1108,10 +1097,10 @@ struct lnet { /* uniquely identifies this ni in this epoch */ __u64 ln_interface_cookie; /* registered LNDs */ - struct list_head ln_lnds; + const struct lnet_lnd *ln_lnds[NUM_LNDS]; /* test protocol compatibility flags */ - int ln_testprotocompat; + unsigned long ln_testprotocompat; /* 0 - load the NIs from the mod params * 1 - do not load the NIs from the mod params @@ -1147,8 +1136,8 @@ struct lnet { * operations on the MD complete or when LNet has shut down. */ struct list_head **ln_mt_zombie_rstqs; - /* recovery eq handler */ - struct lnet_handle_eq ln_mt_eqh; + /* recovery handler */ + lnet_handler_t ln_mt_handler; /* * Completed when the discovery and monitor threads can enter their