4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lnet/ulnds/socklnd/usocklnd.h
36 * Author: Maxim Patlasov <maxim@clusterfs.com>
43 #include <lnet/lib-lnet.h>
44 #include <lnet/socklnd.h>
47 cfs_list_t tx_list; /* neccessary to form tx list */
48 lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
49 ksock_msg_t tx_msg; /* buffer for wire header of ksock msg */
50 int tx_resid; /* # of residual bytes */
51 int tx_nob; /* # of packet bytes */
52 int tx_size; /* size of this descriptor */
53 struct iovec *tx_iov; /* points to tx_iova[i] */
54 int tx_niov; /* # of packet iovec frags */
55 struct iovec tx_iova[1]; /* iov for header */
61 cfs_socket_t *uc_sock; /* socket */
62 int uc_type; /* conn type */
63 int uc_activeflag; /* active side of connection? */
64 int uc_flip; /* is peer other endian? */
65 int uc_state; /* connection state */
66 struct usock_peer_s *uc_peer; /* owning peer */
67 lnet_process_id_t uc_peerid; /* id of remote peer */
68 int uc_pt_idx; /* index in ud_pollthreads[] of
69 * owning poll thread */
70 lnet_ni_t *uc_ni; /* parent NI while accepting */
71 struct usock_preq_s *uc_preq; /* preallocated request */
72 __u32 uc_peer_ip; /* IP address of the peer */
73 __u16 uc_peer_port; /* port of the peer */
74 cfs_list_t uc_stale_list; /* orphaned connections */
77 int uc_rx_state; /* message or hello state */
78 ksock_hello_msg_t *uc_rx_hello; /* hello buffer */
79 struct iovec *uc_rx_iov; /* points to uc_rx_iova[i] */
80 struct iovec uc_rx_iova[LNET_MAX_IOV]; /* message frags */
81 int uc_rx_niov; /* # frags */
82 int uc_rx_nob_left; /* # bytes to next hdr/body */
83 int uc_rx_nob_wanted; /* # of bytes actually wanted */
84 void *uc_rx_lnetmsg; /* LNET message being received */
85 cfs_time_t uc_rx_deadline; /* when to time out */
86 int uc_rx_flag; /* deadline valid? */
87 ksock_msg_t uc_rx_msg; /* message buffer */
90 cfs_list_t uc_tx_list; /* pending txs */
91 cfs_list_t uc_zcack_list; /* pending zc_acks */
92 cfs_time_t uc_tx_deadline; /* when to time out */
93 int uc_tx_flag; /* deadline valid? */
94 int uc_sending; /* send op is in progress */
95 usock_tx_t *uc_tx_hello; /* fake tx with hello */
97 cfs_mt_atomic_t uc_refcount; /* # of users */
98 pthread_mutex_t uc_lock; /* serialize */
99 int uc_errored; /* a flag for lnet_notify() */
102 /* Allowable conn states are: */
103 #define UC_CONNECTING 1
104 #define UC_SENDING_HELLO 2
105 #define UC_RECEIVING_HELLO 3
109 /* Allowable RX states are: */
110 #define UC_RX_HELLO_MAGIC 1
111 #define UC_RX_HELLO_VERSION 2
112 #define UC_RX_HELLO_BODY 3
113 #define UC_RX_HELLO_IPS 4
114 #define UC_RX_KSM_HEADER 5
115 #define UC_RX_LNET_HEADER 6
116 #define UC_RX_PARSE 7
117 #define UC_RX_PARSE_WAIT 8
118 #define UC_RX_LNET_PAYLOAD 9
119 #define UC_RX_SKIPPING 10
121 #define N_CONN_TYPES 3 /* CONTROL, BULK_IN and BULK_OUT */
123 typedef struct usock_peer_s {
124 cfs_list_t up_list; /* neccessary to form peer list */
125 lnet_process_id_t up_peerid; /* id of remote peer */
126 usock_conn_t *up_conns[N_CONN_TYPES]; /* conns that connect us
127 * us with the peer */
128 lnet_ni_t *up_ni; /* pointer to parent NI */
129 __u64 up_incarnation; /* peer's incarnation */
130 int up_incrn_is_set;/* 0 if peer's incarnation
131 * hasn't been set so far */
132 cfs_mt_atomic_t up_refcount; /* # of users */
133 pthread_mutex_t up_lock; /* serialize */
134 int up_errored; /* a flag for lnet_notify() */
135 cfs_time_t up_last_alive; /* when the peer was last alive */
139 cfs_socket_t *upt_notifier[2]; /* notifier sockets: 1st for
140 * writing, 2nd for reading */
141 struct pollfd *upt_pollfd; /* poll fds */
142 int upt_nfds; /* active poll fds */
143 int upt_npollfd; /* allocated poll fds */
144 usock_conn_t **upt_idx2conn; /* conns corresponding to
146 int *upt_skip; /* skip chain */
147 int *upt_fd2idx; /* index into upt_pollfd[]
149 int upt_nfd2idx; /* # of allocated elements
151 cfs_list_t upt_stale_list; /* list of orphaned conns */
152 cfs_list_t upt_pollrequests; /* list of poll requests */
153 pthread_mutex_t upt_pollrequests_lock; /* serialize */
154 int upt_errno; /* non-zero if errored */
155 cfs_mt_completion_t upt_completion; /* wait/signal facility for
156 * syncronizing shutdown */
157 } usock_pollthread_t;
159 /* Number of elements in upt_pollfd[], upt_idx2conn[] and upt_fd2idx[]
160 * at initialization time. Will be resized on demand */
161 #define UPT_START_SIZ 32
164 #define UD_PEER_HASH_SIZE 101
167 int ud_state; /* initialization state */
168 int ud_npollthreads; /* # of poll threads */
169 usock_pollthread_t *ud_pollthreads; /* their state */
170 int ud_shutdown; /* shutdown flag */
171 int ud_nets_count; /* # of instances */
172 cfs_list_t ud_peers[UD_PEER_HASH_SIZE]; /* peer hash table */
173 pthread_rwlock_t ud_peers_lock; /* serialize */
176 extern usock_data_t usock_data;
178 /* ud_state allowed values */
179 #define UD_STATE_INIT_NOTHING 0
180 #define UD_STATE_INITIALIZED 1
183 int un_peercount; /* # of peers */
184 int un_shutdown; /* shutdown flag */
185 __u64 un_incarnation; /* my epoch */
186 pthread_cond_t un_cond; /* condvar to wait for notifications */
187 pthread_mutex_t un_lock; /* a lock to protect un_cond */
191 int ut_poll_timeout; /* the third arg for poll(2) (seconds) */
192 int ut_timeout; /* "stuck" socket timeout (seconds) */
193 int ut_npollthreads; /* number of poll thread to spawn */
194 int ut_fair_limit; /* how many packets can we receive or transmit
195 * without calling poll(2) */
196 int ut_min_bulk; /* smallest "large" message */
197 int ut_txcredits; /* # concurrent sends */
198 int ut_peertxcredits; /* # concurrent sends to 1 peer */
199 int ut_socknagle; /* Is Nagle alg on ? */
200 int ut_sockbufsiz; /* size of socket buffers */
203 extern usock_tunables_t usock_tuns;
205 typedef struct usock_preq_s {
206 int upr_type; /* type of requested action */
207 short upr_value; /* bitmask of POLLIN and POLLOUT bits */
208 usock_conn_t * upr_conn; /* a conn for the sake of which
209 * action will be performed */
210 cfs_list_t upr_list; /* neccessary to form list */
211 } usock_pollrequest_t;
213 /* Allowable poll request types are: */
214 #define POLL_ADD_REQUEST 1
215 #define POLL_DEL_REQUEST 2
216 #define POLL_RX_SET_REQUEST 3
217 #define POLL_TX_SET_REQUEST 4
218 #define POLL_SET_REQUEST 5
221 cfs_list_t zc_list; /* neccessary to form zc_ack list */
222 __u64 zc_cookie; /* zero-copy cookie */
226 usocklnd_conn_addref(usock_conn_t *conn)
228 LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
229 cfs_mt_atomic_inc(&conn->uc_refcount);
232 void usocklnd_destroy_conn(usock_conn_t *conn);
235 usocklnd_conn_decref(usock_conn_t *conn)
237 LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
238 if (cfs_mt_atomic_dec_and_test(&conn->uc_refcount))
239 usocklnd_destroy_conn(conn);
243 usocklnd_peer_addref(usock_peer_t *peer)
245 LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
246 cfs_mt_atomic_inc(&peer->up_refcount);
249 void usocklnd_destroy_peer(usock_peer_t *peer);
252 usocklnd_peer_decref(usock_peer_t *peer)
254 LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
255 if (cfs_mt_atomic_dec_and_test(&peer->up_refcount))
256 usocklnd_destroy_peer(peer);
260 usocklnd_ip2pt_idx(__u32 ip) {
261 return ip % usock_data.ud_npollthreads;
264 static inline cfs_list_t *
265 usocklnd_nid2peerlist(lnet_nid_t nid)
267 unsigned int hash = ((unsigned int)nid) % UD_PEER_HASH_SIZE;
269 return &usock_data.ud_peers[hash];
272 int usocklnd_startup(lnet_ni_t *ni);
273 void usocklnd_shutdown(lnet_ni_t *ni);
274 int usocklnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
275 int usocklnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
276 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
277 unsigned int offset, unsigned int mlen, unsigned int rlen);
278 int usocklnd_accept(lnet_ni_t *ni, cfs_socket_t *sock);
280 int usocklnd_poll_thread(void *arg);
281 int usocklnd_add_pollrequest(usock_conn_t *conn, int type, short value);
282 void usocklnd_add_killrequest(usock_conn_t *conn);
283 int usocklnd_process_pollrequest(usock_pollrequest_t *pr,
284 usock_pollthread_t *pt_data);
285 void usocklnd_execute_handlers(usock_pollthread_t *pt_data);
286 int usocklnd_calculate_chunk_size(int num);
287 void usocklnd_wakeup_pollthread(int i);
289 int usocklnd_notifier_handler(int fd);
290 void usocklnd_exception_handler(usock_conn_t *conn);
291 int usocklnd_read_handler(usock_conn_t *conn);
292 int usocklnd_read_msg(usock_conn_t *conn, int *cont_flag);
293 int usocklnd_handle_zc_req(usock_peer_t *peer, __u64 cookie);
294 int usocklnd_read_hello(usock_conn_t *conn, int *cont_flag);
295 int usocklnd_activeconn_hellorecv(usock_conn_t *conn);
296 int usocklnd_passiveconn_hellorecv(usock_conn_t *conn);
297 int usocklnd_write_handler(usock_conn_t *conn);
298 usock_tx_t * usocklnd_try_piggyback(cfs_list_t *tx_list_p,
299 cfs_list_t *zcack_list_p);
300 int usocklnd_activeconn_hellosent(usock_conn_t *conn);
301 int usocklnd_passiveconn_hellosent(usock_conn_t *conn);
302 int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx);
303 int usocklnd_read_data(usock_conn_t *conn);
305 void usocklnd_release_poll_states(int n);
306 int usocklnd_base_startup();
307 void usocklnd_base_shutdown(int n);
308 __u64 usocklnd_new_incarnation();
309 void usocklnd_del_all_peers(lnet_ni_t *ni);
310 void usocklnd_del_peer_and_conns(usock_peer_t *peer);
311 void usocklnd_del_conns_locked(usock_peer_t *peer);
313 int usocklnd_conn_timed_out(usock_conn_t *conn, cfs_time_t current_time);
314 void usocklnd_conn_kill(usock_conn_t *conn);
315 void usocklnd_conn_kill_locked(usock_conn_t *conn);
316 usock_conn_t *usocklnd_conn_allocate();
317 void usocklnd_conn_free(usock_conn_t *conn);
318 void usocklnd_tear_peer_conn(usock_conn_t *conn);
319 void usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id);
320 int usocklnd_create_passive_conn(lnet_ni_t *ni,
321 cfs_socket_t *sock, usock_conn_t **connp);
322 int usocklnd_create_active_conn(usock_peer_t *peer, int type,
323 usock_conn_t **connp);
324 int usocklnd_connect_srv_mode(cfs_socket_t **sockp,
325 __u32 dst_ip, __u16 dst_port);
326 int usocklnd_connect_cli_mode(cfs_socket_t **sockp,
327 __u32 dst_ip, __u16 dst_port);
328 int usocklnd_set_sock_options(cfs_socket_t *sock);
329 usock_tx_t *usocklnd_create_noop_tx(__u64 cookie);
330 usock_tx_t *usocklnd_create_tx(lnet_msg_t *lntmsg);
331 void usocklnd_init_hello_msg(ksock_hello_msg_t *hello,
332 lnet_ni_t *ni, int type, lnet_nid_t peer_nid);
333 usock_tx_t *usocklnd_create_hello_tx(lnet_ni_t *ni,
334 int type, lnet_nid_t peer_nid);
335 usock_tx_t *usocklnd_create_cr_hello_tx(lnet_ni_t *ni,
336 int type, lnet_nid_t peer_nid);
337 void usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx);
338 void usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist);
339 void usocklnd_destroy_zcack_list(cfs_list_t *zcack_list);
340 void usocklnd_destroy_peer (usock_peer_t *peer);
341 int usocklnd_get_conn_type(lnet_msg_t *lntmsg);
342 int usocklnd_type2idx(int type);
343 usock_peer_t *usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
344 int usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
345 usock_peer_t **peerp);
346 int usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
347 usock_peer_t **peerp);
348 int usocklnd_find_or_create_conn(usock_peer_t *peer, int type,
349 usock_conn_t **connp,
350 usock_tx_t *tx, usock_zc_ack_t *zc_ack,
351 int *send_immediately_flag);
352 void usocklnd_link_conn_to_peer(usock_conn_t *conn, usock_peer_t *peer, int idx);
353 int usocklnd_invert_type(int type);
354 void usocklnd_conn_new_state(usock_conn_t *conn, int new_state);
355 void usocklnd_cleanup_stale_conns(usock_peer_t *peer, __u64 incrn,
356 usock_conn_t *skip_conn);
358 void usocklnd_rx_hellomagic_state_transition(usock_conn_t *conn);
359 void usocklnd_rx_helloversion_state_transition(usock_conn_t *conn);
360 void usocklnd_rx_hellobody_state_transition(usock_conn_t *conn);
361 void usocklnd_rx_helloIPs_state_transition(usock_conn_t *conn);
362 void usocklnd_rx_lnethdr_state_transition(usock_conn_t *conn);
363 void usocklnd_rx_ksmhdr_state_transition(usock_conn_t *conn);
364 void usocklnd_rx_skipping_state_transition(usock_conn_t *conn);