4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/ulnds/socklnd/conn.c
38 * Author: Maxim Patlasov <maxim@clusterfs.com>
43 /* Return 1 if the conn is timed out, 0 else */
45 usocklnd_conn_timed_out(usock_conn_t *conn, cfs_time_t current_time)
47 if (conn->uc_tx_flag && /* sending is in progress */
48 cfs_time_aftereq(current_time, conn->uc_tx_deadline))
51 if (conn->uc_rx_flag && /* receiving is in progress */
52 cfs_time_aftereq(current_time, conn->uc_rx_deadline))
59 usocklnd_conn_kill(usock_conn_t *conn)
61 pthread_mutex_lock(&conn->uc_lock);
62 if (conn->uc_state != UC_DEAD)
63 usocklnd_conn_kill_locked(conn);
64 pthread_mutex_unlock(&conn->uc_lock);
67 /* Mark the conn as DEAD and schedule its deletion */
69 usocklnd_conn_kill_locked(usock_conn_t *conn)
71 conn->uc_rx_flag = conn->uc_tx_flag = 0;
72 conn->uc_state = UC_DEAD;
73 usocklnd_add_killrequest(conn);
77 usocklnd_conn_allocate()
80 usock_pollrequest_t *pr;
82 LIBCFS_ALLOC (pr, sizeof(*pr));
86 LIBCFS_ALLOC (conn, sizeof(*conn));
88 LIBCFS_FREE (pr, sizeof(*pr));
91 memset(conn, 0, sizeof(*conn));
94 LIBCFS_ALLOC (conn->uc_rx_hello,
95 offsetof(ksock_hello_msg_t,
96 kshm_ips[LNET_MAX_INTERFACES]));
97 if (conn->uc_rx_hello == NULL) {
98 LIBCFS_FREE (pr, sizeof(*pr));
99 LIBCFS_FREE (conn, sizeof(*conn));
107 usocklnd_conn_free(usock_conn_t *conn)
109 usock_pollrequest_t *pr = conn->uc_preq;
112 LIBCFS_FREE (pr, sizeof(*pr));
114 if (conn->uc_rx_hello != NULL)
115 LIBCFS_FREE (conn->uc_rx_hello,
116 offsetof(ksock_hello_msg_t,
117 kshm_ips[LNET_MAX_INTERFACES]));
119 LIBCFS_FREE (conn, sizeof(*conn));
123 usocklnd_tear_peer_conn(usock_conn_t *conn)
125 usock_peer_t *peer = conn->uc_peer;
126 int idx = usocklnd_type2idx(conn->uc_type);
128 lnet_process_id_t id;
130 int killall_flag = 0;
131 void *rx_lnetmsg = NULL;
132 CFS_LIST_HEAD (zombie_txs);
134 if (peer == NULL) /* nothing to tear */
137 pthread_mutex_lock(&peer->up_lock);
138 pthread_mutex_lock(&conn->uc_lock);
141 id = peer->up_peerid;
143 if (peer->up_conns[idx] == conn) {
144 if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) {
145 /* change state not to finalize twice */
146 conn->uc_rx_state = UC_RX_KSM_HEADER;
147 /* stash lnetmsg while holding locks */
148 rx_lnetmsg = conn->uc_rx_lnetmsg;
151 /* we cannot finilize txs right now (bug #18844) */
152 cfs_list_splice_init(&conn->uc_tx_list, &zombie_txs);
154 peer->up_conns[idx] = NULL;
155 conn->uc_peer = NULL;
158 if(conn->uc_errored && !peer->up_errored)
159 peer->up_errored = killall_flag = 1;
161 /* prevent queueing new txs to this conn */
162 conn->uc_errored = 1;
165 pthread_mutex_unlock(&conn->uc_lock);
168 usocklnd_del_conns_locked(peer);
170 pthread_mutex_unlock(&peer->up_lock);
175 if (rx_lnetmsg != NULL)
176 lnet_finalize(ni, rx_lnetmsg, -EIO);
178 usocklnd_destroy_txlist(ni, &zombie_txs);
180 usocklnd_conn_decref(conn);
181 usocklnd_peer_decref(peer);
183 usocklnd_check_peer_stale(ni, id);
186 /* Remove peer from hash list if all up_conns[i] is NULL &&
187 * hash table is the only consumer of the peer */
189 usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id)
193 pthread_rwlock_wrlock(&usock_data.ud_peers_lock);
194 peer = usocklnd_find_peer_locked(ni, id);
197 pthread_rwlock_unlock(&usock_data.ud_peers_lock);
201 if (mt_atomic_read(&peer->up_refcount) == 2) {
203 for (i = 0; i < N_CONN_TYPES; i++)
204 LASSERT (peer->up_conns[i] == NULL);
206 cfs_list_del(&peer->up_list);
208 if (peer->up_errored &&
209 (peer->up_peerid.pid & LNET_PID_USERFLAG) == 0)
210 lnet_notify (peer->up_ni, peer->up_peerid.nid, 0,
211 cfs_time_seconds(peer->up_last_alive));
213 usocklnd_peer_decref(peer);
216 usocklnd_peer_decref(peer);
217 pthread_rwlock_unlock(&usock_data.ud_peers_lock);
220 /* Returns 0 on success, <0 else */
222 usocklnd_create_passive_conn(lnet_ni_t *ni,
223 cfs_socket_t *sock, usock_conn_t **connp)
230 rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
234 LASSERT (peer_port >= 0); /* uc_peer_port is u16 */
236 rc = usocklnd_set_sock_options(sock);
240 conn = usocklnd_conn_allocate();
244 usocklnd_rx_hellomagic_state_transition(conn);
246 conn->uc_sock = sock;
247 conn->uc_peer_ip = peer_ip;
248 conn->uc_peer_port = peer_port;
249 conn->uc_state = UC_RECEIVING_HELLO;
250 conn->uc_pt_idx = usocklnd_ip2pt_idx(peer_ip);
252 CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
253 CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
254 pthread_mutex_init(&conn->uc_lock, NULL);
255 mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
261 /* Returns 0 on success, <0 else */
263 usocklnd_create_active_conn(usock_peer_t *peer, int type,
264 usock_conn_t **connp)
269 __u32 dst_ip = LNET_NIDADDR(peer->up_peerid.nid);
270 __u16 dst_port = lnet_acceptor_port();
272 conn = usocklnd_conn_allocate();
276 conn->uc_tx_hello = usocklnd_create_cr_hello_tx(peer->up_ni, type,
277 peer->up_peerid.nid);
278 if (conn->uc_tx_hello == NULL) {
279 usocklnd_conn_free(conn);
283 if (the_lnet.ln_pid & LNET_PID_USERFLAG)
284 rc = usocklnd_connect_cli_mode(&sock, dst_ip, dst_port);
286 rc = usocklnd_connect_srv_mode(&sock, dst_ip, dst_port);
289 usocklnd_destroy_tx(NULL, conn->uc_tx_hello);
290 usocklnd_conn_free(conn);
294 conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout);
295 conn->uc_tx_flag = 1;
297 conn->uc_sock = sock;
298 conn->uc_peer_ip = dst_ip;
299 conn->uc_peer_port = dst_port;
300 conn->uc_type = type;
301 conn->uc_activeflag = 1;
302 conn->uc_state = UC_CONNECTING;
303 conn->uc_pt_idx = usocklnd_ip2pt_idx(dst_ip);
305 conn->uc_peerid = peer->up_peerid;
306 conn->uc_peer = peer;
308 usocklnd_peer_addref(peer);
309 CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
310 CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
311 pthread_mutex_init(&conn->uc_lock, NULL);
312 mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
318 /* Returns 0 on success, <0 else */
320 usocklnd_connect_srv_mode(cfs_socket_t **sockp, __u32 dst_ip, __u16 dst_port)
327 for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
328 port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
330 /* Iterate through reserved ports. */
331 rc = libcfs_sock_create(&sock, &fatal, 0, port);
338 rc = usocklnd_set_sock_options(sock);
340 libcfs_sock_release(sock);
344 rc = libcfs_sock_connect(sock, dst_ip, dst_port);
350 if (rc != -EADDRINUSE && rc != -EADDRNOTAVAIL) {
351 libcfs_sock_release(sock);
355 libcfs_sock_release(sock);
358 CERROR("Can't bind to any reserved port\n");
362 /* Returns 0 on success, <0 else */
364 usocklnd_connect_cli_mode(cfs_socket_t **sockp, __u32 dst_ip, __u16 dst_port)
370 rc = libcfs_sock_create(&sock, &fatal, 0, 0);
374 rc = usocklnd_set_sock_options(sock);
376 libcfs_sock_release(sock);
380 rc = libcfs_sock_connect(sock, dst_ip, dst_port);
382 libcfs_sock_release(sock);
391 usocklnd_set_sock_options(cfs_socket_t *sock)
395 rc = libcfs_sock_set_nagle(sock, usock_tuns.ut_socknagle);
399 if (usock_tuns.ut_sockbufsiz) {
400 rc = libcfs_sock_set_bufsiz(sock, usock_tuns.ut_sockbufsiz);
405 return libcfs_fcntl_nonblock(sock);
409 usocklnd_create_noop_tx(__u64 cookie)
413 LIBCFS_ALLOC (tx, sizeof(usock_tx_t));
417 tx->tx_size = sizeof(usock_tx_t);
418 tx->tx_lnetmsg = NULL;
420 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
421 tx->tx_msg.ksm_zc_cookies[1] = cookie;
423 tx->tx_iova[0].iov_base = (void *)&tx->tx_msg;
424 tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob =
425 offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
426 tx->tx_iov = tx->tx_iova;
433 usocklnd_create_tx(lnet_msg_t *lntmsg)
436 unsigned int payload_niov = lntmsg->msg_niov;
437 struct iovec *payload_iov = lntmsg->msg_iov;
438 unsigned int payload_offset = lntmsg->msg_offset;
439 unsigned int payload_nob = lntmsg->msg_len;
440 int size = offsetof(usock_tx_t,
441 tx_iova[1 + payload_niov]);
443 LIBCFS_ALLOC (tx, size);
448 tx->tx_lnetmsg = lntmsg;
450 tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + payload_nob;
452 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
453 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = lntmsg->msg_hdr;
454 tx->tx_iova[0].iov_base = (void *)&tx->tx_msg;
455 tx->tx_iova[0].iov_len = sizeof(ksock_msg_t);
456 tx->tx_iov = tx->tx_iova;
459 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
460 payload_niov, payload_iov,
461 payload_offset, payload_nob);
467 usocklnd_init_hello_msg(ksock_hello_msg_t *hello,
468 lnet_ni_t *ni, int type, lnet_nid_t peer_nid)
470 usock_net_t *net = (usock_net_t *)ni->ni_data;
472 hello->kshm_magic = LNET_PROTO_MAGIC;
473 hello->kshm_version = KSOCK_PROTO_V2;
474 hello->kshm_nips = 0;
475 hello->kshm_ctype = type;
477 hello->kshm_dst_incarnation = 0; /* not used */
478 hello->kshm_src_incarnation = net->un_incarnation;
480 hello->kshm_src_pid = the_lnet.ln_pid;
481 hello->kshm_src_nid = ni->ni_nid;
482 hello->kshm_dst_nid = peer_nid;
483 hello->kshm_dst_pid = 0; /* not used */
487 usocklnd_create_hello_tx(lnet_ni_t *ni,
488 int type, lnet_nid_t peer_nid)
492 ksock_hello_msg_t *hello;
494 size = sizeof(usock_tx_t) + offsetof(ksock_hello_msg_t, kshm_ips);
495 LIBCFS_ALLOC (tx, size);
500 tx->tx_lnetmsg = NULL;
502 hello = (ksock_hello_msg_t *)&tx->tx_iova[1];
503 usocklnd_init_hello_msg(hello, ni, type, peer_nid);
505 tx->tx_iova[0].iov_base = (void *)hello;
506 tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob =
507 offsetof(ksock_hello_msg_t, kshm_ips);
508 tx->tx_iov = tx->tx_iova;
515 usocklnd_create_cr_hello_tx(lnet_ni_t *ni,
516 int type, lnet_nid_t peer_nid)
520 lnet_acceptor_connreq_t *cr;
521 ksock_hello_msg_t *hello;
523 size = sizeof(usock_tx_t) +
524 sizeof(lnet_acceptor_connreq_t) +
525 offsetof(ksock_hello_msg_t, kshm_ips);
526 LIBCFS_ALLOC (tx, size);
531 tx->tx_lnetmsg = NULL;
533 cr = (lnet_acceptor_connreq_t *)&tx->tx_iova[1];
534 memset(cr, 0, sizeof(*cr));
535 cr->acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
536 cr->acr_version = LNET_PROTO_ACCEPTOR_VERSION;
537 cr->acr_nid = peer_nid;
539 hello = (ksock_hello_msg_t *)((char *)cr + sizeof(*cr));
540 usocklnd_init_hello_msg(hello, ni, type, peer_nid);
542 tx->tx_iova[0].iov_base = (void *)cr;
543 tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob =
544 sizeof(lnet_acceptor_connreq_t) +
545 offsetof(ksock_hello_msg_t, kshm_ips);
546 tx->tx_iov = tx->tx_iova;
553 usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx)
555 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
556 int rc = (tx->tx_resid == 0) ? 0 : -EIO;
558 LASSERT (ni != NULL || lnetmsg == NULL);
560 LIBCFS_FREE (tx, tx->tx_size);
562 if (lnetmsg != NULL) /* NOOP and hello go without lnetmsg */
563 lnet_finalize(ni, lnetmsg, rc);
567 usocklnd_destroy_txlist(lnet_ni_t *ni, cfs_list_t *txlist)
571 while (!cfs_list_empty(txlist)) {
572 tx = cfs_list_entry(txlist->next, usock_tx_t, tx_list);
573 cfs_list_del(&tx->tx_list);
575 usocklnd_destroy_tx(ni, tx);
580 usocklnd_destroy_zcack_list(cfs_list_t *zcack_list)
582 usock_zc_ack_t *zcack;
584 while (!cfs_list_empty(zcack_list)) {
585 zcack = cfs_list_entry(zcack_list->next, usock_zc_ack_t,
587 cfs_list_del(&zcack->zc_list);
589 LIBCFS_FREE (zcack, sizeof(*zcack));
594 usocklnd_destroy_peer(usock_peer_t *peer)
596 usock_net_t *net = peer->up_ni->ni_data;
599 for (i = 0; i < N_CONN_TYPES; i++)
600 LASSERT (peer->up_conns[i] == NULL);
602 LIBCFS_FREE (peer, sizeof (*peer));
604 pthread_mutex_lock(&net->un_lock);
605 if(--net->un_peercount == 0)
606 pthread_cond_signal(&net->un_cond);
607 pthread_mutex_unlock(&net->un_lock);
611 usocklnd_destroy_conn(usock_conn_t *conn)
613 LASSERT (conn->uc_peer == NULL || conn->uc_ni == NULL);
615 if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) {
616 LASSERT (conn->uc_peer != NULL);
617 lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, -EIO);
620 if (!cfs_list_empty(&conn->uc_tx_list)) {
621 LASSERT (conn->uc_peer != NULL);
622 usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list);
625 usocklnd_destroy_zcack_list(&conn->uc_zcack_list);
627 if (conn->uc_peer != NULL)
628 usocklnd_peer_decref(conn->uc_peer);
630 if (conn->uc_ni != NULL)
631 lnet_ni_decref(conn->uc_ni);
633 if (conn->uc_tx_hello)
634 usocklnd_destroy_tx(NULL, conn->uc_tx_hello);
636 usocklnd_conn_free(conn);
640 usocklnd_get_conn_type(lnet_msg_t *lntmsg)
644 if (the_lnet.ln_pid & LNET_PID_USERFLAG)
645 return SOCKLND_CONN_ANY;
647 nob = sizeof(ksock_msg_t) + lntmsg->msg_len;
649 if (nob >= usock_tuns.ut_min_bulk)
650 return SOCKLND_CONN_BULK_OUT;
652 return SOCKLND_CONN_CONTROL;
655 int usocklnd_type2idx(int type)
658 case SOCKLND_CONN_ANY:
659 case SOCKLND_CONN_CONTROL:
661 case SOCKLND_CONN_BULK_IN:
663 case SOCKLND_CONN_BULK_OUT:
671 usocklnd_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
673 cfs_list_t *peer_list = usocklnd_nid2peerlist(id.nid);
677 cfs_list_for_each (tmp, peer_list) {
679 peer = cfs_list_entry (tmp, usock_peer_t, up_list);
681 if (peer->up_ni != ni)
684 if (peer->up_peerid.nid != id.nid ||
685 peer->up_peerid.pid != id.pid)
688 usocklnd_peer_addref(peer);
695 usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
696 usock_peer_t **peerp)
698 usock_net_t *net = ni->ni_data;
702 LIBCFS_ALLOC (peer, sizeof (*peer));
706 for (i = 0; i < N_CONN_TYPES; i++)
707 peer->up_conns[i] = NULL;
709 peer->up_peerid = id;
711 peer->up_incrn_is_set = 0;
712 peer->up_errored = 0;
713 peer->up_last_alive = 0;
714 mt_atomic_set(&peer->up_refcount, 1); /* 1 ref for caller */
715 pthread_mutex_init(&peer->up_lock, NULL);
717 pthread_mutex_lock(&net->un_lock);
719 pthread_mutex_unlock(&net->un_lock);
725 /* Safely create new peer if needed. Save result in *peerp.
726 * Returns 0 on success, <0 else */
728 usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
729 usock_peer_t **peerp)
734 usock_net_t *net = ni->ni_data;
736 pthread_rwlock_rdlock(&usock_data.ud_peers_lock);
737 peer = usocklnd_find_peer_locked(ni, id);
738 pthread_rwlock_unlock(&usock_data.ud_peers_lock);
741 goto find_or_create_peer_done;
743 rc = usocklnd_create_peer(ni, id, &peer);
747 pthread_rwlock_wrlock(&usock_data.ud_peers_lock);
748 peer2 = usocklnd_find_peer_locked(ni, id);
750 if (net->un_shutdown) {
751 pthread_rwlock_unlock(&usock_data.ud_peers_lock);
752 usocklnd_peer_decref(peer); /* should destroy peer */
753 CERROR("Can't create peer: network shutdown\n");
757 /* peer table will take 1 of my refs on peer */
758 usocklnd_peer_addref(peer);
759 cfs_list_add_tail (&peer->up_list,
760 usocklnd_nid2peerlist(id.nid));
762 usocklnd_peer_decref(peer); /* should destroy peer */
765 pthread_rwlock_unlock(&usock_data.ud_peers_lock);
767 find_or_create_peer_done:
772 /* NB: both peer and conn locks are held */
774 usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack)
776 if (conn->uc_state == UC_READY &&
777 cfs_list_empty(&conn->uc_tx_list) &&
778 cfs_list_empty(&conn->uc_zcack_list) &&
780 int rc = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST,
786 cfs_list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list);
790 /* NB: both peer and conn locks are held
791 * NB: if sending isn't in progress. the caller *MUST* send tx
792 * immediately after we'll return */
794 usocklnd_enqueue_tx(usock_conn_t *conn, usock_tx_t *tx,
795 int *send_immediately)
797 if (conn->uc_state == UC_READY &&
798 cfs_list_empty(&conn->uc_tx_list) &&
799 cfs_list_empty(&conn->uc_zcack_list) &&
801 conn->uc_sending = 1;
802 *send_immediately = 1;
806 *send_immediately = 0;
807 cfs_list_add_tail(&tx->tx_list, &conn->uc_tx_list);
810 /* Safely create new conn if needed. Save result in *connp.
811 * Returns 0 on success, <0 else */
813 usocklnd_find_or_create_conn(usock_peer_t *peer, int type,
814 usock_conn_t **connp,
815 usock_tx_t *tx, usock_zc_ack_t *zc_ack,
816 int *send_immediately)
821 lnet_pid_t userflag = peer->up_peerid.pid & LNET_PID_USERFLAG;
824 type = SOCKLND_CONN_ANY;
826 idx = usocklnd_type2idx(type);
828 pthread_mutex_lock(&peer->up_lock);
829 if (peer->up_conns[idx] != NULL) {
830 conn = peer->up_conns[idx];
831 LASSERT(conn->uc_type == type);
834 CERROR("Refusing to create a connection to "
835 "userspace process %s\n",
836 libcfs_id2str(peer->up_peerid));
838 goto find_or_create_conn_failed;
841 rc = usocklnd_create_active_conn(peer, type, &conn);
843 peer->up_errored = 1;
844 usocklnd_del_conns_locked(peer);
845 goto find_or_create_conn_failed;
848 /* peer takes 1 of conn refcount */
849 usocklnd_link_conn_to_peer(conn, peer, idx);
851 rc = usocklnd_add_pollrequest(conn, POLL_ADD_REQUEST, POLLOUT);
853 peer->up_conns[idx] = NULL;
854 usocklnd_conn_decref(conn); /* should destroy conn */
855 goto find_or_create_conn_failed;
857 usocklnd_wakeup_pollthread(conn->uc_pt_idx);
860 pthread_mutex_lock(&conn->uc_lock);
861 LASSERT(conn->uc_peer == peer);
863 LASSERT(tx == NULL || zc_ack == NULL);
865 /* usocklnd_tear_peer_conn() could signal us stop queueing */
866 if (conn->uc_errored) {
868 pthread_mutex_unlock(&conn->uc_lock);
869 goto find_or_create_conn_failed;
872 usocklnd_enqueue_tx(conn, tx, send_immediately);
874 rc = usocklnd_enqueue_zcack(conn, zc_ack);
876 usocklnd_conn_kill_locked(conn);
877 pthread_mutex_unlock(&conn->uc_lock);
878 goto find_or_create_conn_failed;
881 pthread_mutex_unlock(&conn->uc_lock);
883 usocklnd_conn_addref(conn);
884 pthread_mutex_unlock(&peer->up_lock);
889 find_or_create_conn_failed:
890 pthread_mutex_unlock(&peer->up_lock);
895 usocklnd_link_conn_to_peer(usock_conn_t *conn, usock_peer_t *peer, int idx)
897 peer->up_conns[idx] = conn;
898 peer->up_errored = 0; /* this new fresh conn will try
899 * revitalize even stale errored peer */
903 usocklnd_invert_type(int type)
907 case SOCKLND_CONN_ANY:
908 case SOCKLND_CONN_CONTROL:
910 case SOCKLND_CONN_BULK_IN:
911 return SOCKLND_CONN_BULK_OUT;
912 case SOCKLND_CONN_BULK_OUT:
913 return SOCKLND_CONN_BULK_IN;
915 return SOCKLND_CONN_NONE;
920 usocklnd_conn_new_state(usock_conn_t *conn, int new_state)
922 pthread_mutex_lock(&conn->uc_lock);
923 if (conn->uc_state != UC_DEAD)
924 conn->uc_state = new_state;
925 pthread_mutex_unlock(&conn->uc_lock);
928 /* NB: peer is locked by caller */
930 usocklnd_cleanup_stale_conns(usock_peer_t *peer, __u64 incrn,
931 usock_conn_t *skip_conn)
935 if (!peer->up_incrn_is_set) {
936 peer->up_incarnation = incrn;
937 peer->up_incrn_is_set = 1;
941 if (peer->up_incarnation == incrn)
944 peer->up_incarnation = incrn;
946 for (i = 0; i < N_CONN_TYPES; i++) {
947 usock_conn_t *conn = peer->up_conns[i];
949 if (conn == NULL || conn == skip_conn)
952 pthread_mutex_lock(&conn->uc_lock);
953 LASSERT (conn->uc_peer == peer);
954 conn->uc_peer = NULL;
955 peer->up_conns[i] = NULL;
956 if (conn->uc_state != UC_DEAD)
957 usocklnd_conn_kill_locked(conn);
958 pthread_mutex_unlock(&conn->uc_lock);
960 usocklnd_conn_decref(conn);
961 usocklnd_peer_decref(peer);
965 /* RX state transition to UC_RX_HELLO_MAGIC: update RX part to receive
966 * MAGIC part of hello and set uc_rx_state
969 usocklnd_rx_hellomagic_state_transition(usock_conn_t *conn)
971 LASSERT(conn->uc_rx_hello != NULL);
973 conn->uc_rx_niov = 1;
974 conn->uc_rx_iov = conn->uc_rx_iova;
975 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_hello->kshm_magic;
976 conn->uc_rx_iov[0].iov_len =
977 conn->uc_rx_nob_wanted =
978 conn->uc_rx_nob_left =
979 sizeof(conn->uc_rx_hello->kshm_magic);
981 conn->uc_rx_state = UC_RX_HELLO_MAGIC;
983 conn->uc_rx_flag = 1; /* waiting for incoming hello */
984 conn->uc_rx_deadline = cfs_time_shift(usock_tuns.ut_timeout);
987 /* RX state transition to UC_RX_HELLO_VERSION: update RX part to receive
988 * VERSION part of hello and set uc_rx_state
991 usocklnd_rx_helloversion_state_transition(usock_conn_t *conn)
993 LASSERT(conn->uc_rx_hello != NULL);
995 conn->uc_rx_niov = 1;
996 conn->uc_rx_iov = conn->uc_rx_iova;
997 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_hello->kshm_version;
998 conn->uc_rx_iov[0].iov_len =
999 conn->uc_rx_nob_wanted =
1000 conn->uc_rx_nob_left =
1001 sizeof(conn->uc_rx_hello->kshm_version);
1003 conn->uc_rx_state = UC_RX_HELLO_VERSION;
1006 /* RX state transition to UC_RX_HELLO_BODY: update RX part to receive
1007 * the rest of hello and set uc_rx_state
1010 usocklnd_rx_hellobody_state_transition(usock_conn_t *conn)
1012 LASSERT(conn->uc_rx_hello != NULL);
1014 conn->uc_rx_niov = 1;
1015 conn->uc_rx_iov = conn->uc_rx_iova;
1016 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_hello->kshm_src_nid;
1017 conn->uc_rx_iov[0].iov_len =
1018 conn->uc_rx_nob_wanted =
1019 conn->uc_rx_nob_left =
1020 offsetof(ksock_hello_msg_t, kshm_ips) -
1021 offsetof(ksock_hello_msg_t, kshm_src_nid);
1023 conn->uc_rx_state = UC_RX_HELLO_BODY;
1026 /* RX state transition to UC_RX_HELLO_IPS: update RX part to receive
1027 * array of IPs and set uc_rx_state
1030 usocklnd_rx_helloIPs_state_transition(usock_conn_t *conn)
1032 LASSERT(conn->uc_rx_hello != NULL);
1034 conn->uc_rx_niov = 1;
1035 conn->uc_rx_iov = conn->uc_rx_iova;
1036 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_hello->kshm_ips;
1037 conn->uc_rx_iov[0].iov_len =
1038 conn->uc_rx_nob_wanted =
1039 conn->uc_rx_nob_left =
1040 conn->uc_rx_hello->kshm_nips *
1041 sizeof(conn->uc_rx_hello->kshm_ips[0]);
1043 conn->uc_rx_state = UC_RX_HELLO_IPS;
1046 /* RX state transition to UC_RX_LNET_HEADER: update RX part to receive
1047 * LNET header and set uc_rx_state
1050 usocklnd_rx_lnethdr_state_transition(usock_conn_t *conn)
1052 conn->uc_rx_niov = 1;
1053 conn->uc_rx_iov = conn->uc_rx_iova;
1054 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg.ksm_u.lnetmsg;
1055 conn->uc_rx_iov[0].iov_len =
1056 conn->uc_rx_nob_wanted =
1057 conn->uc_rx_nob_left =
1058 sizeof(ksock_lnet_msg_t);
1060 conn->uc_rx_state = UC_RX_LNET_HEADER;
1061 conn->uc_rx_flag = 1;
1064 /* RX state transition to UC_RX_KSM_HEADER: update RX part to receive
1065 * KSM header and set uc_rx_state
1068 usocklnd_rx_ksmhdr_state_transition(usock_conn_t *conn)
1070 conn->uc_rx_niov = 1;
1071 conn->uc_rx_iov = conn->uc_rx_iova;
1072 conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg;
1073 conn->uc_rx_iov[0].iov_len =
1074 conn->uc_rx_nob_wanted =
1075 conn->uc_rx_nob_left =
1076 offsetof(ksock_msg_t, ksm_u);
1078 conn->uc_rx_state = UC_RX_KSM_HEADER;
1079 conn->uc_rx_flag = 0;
1082 /* RX state transition to UC_RX_SKIPPING: update RX part for
1083 * skipping and set uc_rx_state
1086 usocklnd_rx_skipping_state_transition(usock_conn_t *conn)
1088 static char skip_buffer[4096];
1091 unsigned int niov = 0;
1093 int nob_to_skip = conn->uc_rx_nob_left;
1095 LASSERT(nob_to_skip != 0);
1097 conn->uc_rx_iov = conn->uc_rx_iova;
1099 /* Set up to skip as much as possible now. If there's more left
1100 * (ran out of iov entries) we'll get called again */
1103 nob = MIN (nob_to_skip, sizeof(skip_buffer));
1105 conn->uc_rx_iov[niov].iov_base = skip_buffer;
1106 conn->uc_rx_iov[niov].iov_len = nob;
1111 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1112 niov < sizeof(conn->uc_rx_iova) / sizeof (struct iovec));
1114 conn->uc_rx_niov = niov;
1115 conn->uc_rx_nob_wanted = skipped;
1117 conn->uc_rx_state = UC_RX_SKIPPING;