2 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2012, 2017, Intel Corporation.
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Lustre, https://wiki.whamcloud.com/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 * pro_send_hello : send hello message
32 * pro_recv_hello : receive hello message
33 * pro_pack : pack message header
34 * pro_unpack : unpack message header
35 * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
36 * return 1 if ACK is piggybacked, otherwise return 0
37 * pro_queue_tx_msg() : Called holding BH lock: kss_lock
38 * return the ACK that piggybacked by my message, or NULL
39 * pro_handle_zcreq() : handler of incoming ZC-REQ
40 * pro_handle_zcack() : handler of incoming ZC-ACK
41 * pro_match_tx() : Called holding glock
44 static struct ksock_tx *
45 ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
47 /* V1.x, just enqueue it */
48 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
53 ksocknal_next_tx_carrier(struct ksock_conn *conn)
55 struct ksock_tx *tx = conn->ksnc_tx_carrier;
57 /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
58 LASSERT(!list_empty(&conn->ksnc_tx_queue));
61 /* Next TX that can carry ZC-ACK or LNet message */
62 if (tx->tx_list.next == &conn->ksnc_tx_queue) {
63 /* no more packets queued */
64 conn->ksnc_tx_carrier = NULL;
66 conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
67 struct ksock_tx, tx_list);
68 LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type ==
74 ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
75 struct ksock_tx *tx_ack, __u64 cookie)
77 struct ksock_tx *tx = conn->ksnc_tx_carrier;
79 LASSERT (tx_ack == NULL ||
80 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
83 * Enqueue or piggyback tx_ack / cookie
84 * . no tx can piggyback cookie of tx_ack (or cookie), just
85 * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
86 * . There is tx can piggyback cookie of tx_ack (or cookie),
87 * piggyback the cookie and return the tx.
91 list_add_tail(&tx_ack->tx_list,
92 &conn->ksnc_tx_queue);
93 conn->ksnc_tx_carrier = tx_ack;
98 if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
99 /* tx is noop zc-ack, can't piggyback zc-ack cookie */
101 list_add_tail(&tx_ack->tx_list,
102 &conn->ksnc_tx_queue);
106 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
107 LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
110 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
112 /* piggyback the zc-ack cookie */
113 tx->tx_msg.ksm_zc_cookies[1] = cookie;
114 /* move on to the next TX which can carry cookie */
115 ksocknal_next_tx_carrier(conn);
120 static struct ksock_tx *
121 ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
123 struct ksock_tx *tx = conn->ksnc_tx_carrier;
127 * . If there is no NOOP on the connection, just enqueue
128 * tx_msg and return NULL
129 * . If there is NOOP on the connection, piggyback the cookie
130 * and replace the NOOP tx, and return the NOOP tx.
132 if (tx == NULL) { /* nothing on queue */
133 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
134 conn->ksnc_tx_carrier = tx_msg;
138 if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
139 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
143 LASSERT (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
145 /* There is a noop zc-ack can be piggybacked */
146 tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
147 ksocknal_next_tx_carrier(conn);
149 /* use new_tx to replace the noop zc-ack packet */
150 list_splice(&tx->tx_list, &tx_msg->tx_list);
156 ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
157 struct ksock_tx *tx_ack, __u64 cookie)
161 if (conn->ksnc_type != SOCKLND_CONN_ACK)
162 return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
164 /* non-blocking ZC-ACK (to router) */
165 LASSERT (tx_ack == NULL ||
166 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
168 if ((tx = conn->ksnc_tx_carrier) == NULL) {
169 if (tx_ack != NULL) {
170 list_add_tail(&tx_ack->tx_list,
171 &conn->ksnc_tx_queue);
172 conn->ksnc_tx_carrier = tx_ack;
177 /* conn->ksnc_tx_carrier != NULL */
180 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
182 if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
185 if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
186 /* replace the keepalive PING with a real ACK */
187 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
188 tx->tx_msg.ksm_zc_cookies[1] = cookie;
192 if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
193 cookie == tx->tx_msg.ksm_zc_cookies[1]) {
194 CWARN("%s: duplicated ZC cookie: %llu\n",
195 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
196 return 1; /* XXX return error in the future */
199 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
200 /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
201 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
202 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
203 tx->tx_msg.ksm_zc_cookies[1] = cookie;
205 tx->tx_msg.ksm_zc_cookies[0] = cookie;
208 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
209 /* not likely to carry more ACKs, skip it to simplify logic */
210 ksocknal_next_tx_carrier(conn);
216 /* takes two or more cookies already */
218 if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
221 /* two separated cookies: (a+2, a) or (a+1, a) */
222 LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
223 tx->tx_msg.ksm_zc_cookies[1] <= 2);
225 if (tx->tx_msg.ksm_zc_cookies[0] -
226 tx->tx_msg.ksm_zc_cookies[1] == 2) {
227 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
229 } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
230 tmp = tx->tx_msg.ksm_zc_cookies[1];
231 } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
232 tmp = tx->tx_msg.ksm_zc_cookies[0];
236 /* range of cookies */
237 tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
238 tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
243 /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
244 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
245 cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
246 CWARN("%s: duplicated ZC cookie: %llu\n",
247 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
248 return 1; /* XXX: return error in the future */
251 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
252 tx->tx_msg.ksm_zc_cookies[1] = cookie;
256 if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
257 tx->tx_msg.ksm_zc_cookies[0] = cookie;
262 /* failed to piggyback ZC-ACK */
263 if (tx_ack != NULL) {
264 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
265 /* the next tx can piggyback at least 1 ACK */
266 ksocknal_next_tx_carrier(conn);
273 ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
277 #if SOCKNAL_VERSION_DEBUG
278 if (!*ksocknal_tunables.ksnd_typed_conns)
279 return SOCKNAL_MATCH_YES;
282 if (tx == NULL || tx->tx_lnetmsg == NULL) {
284 nob = offsetof(struct ksock_msg, ksm_u);
286 nob = tx->tx_lnetmsg->msg_len +
287 ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
288 sizeof(struct lnet_hdr) : sizeof(struct ksock_msg));
291 /* default checking for typed connection */
292 switch (conn->ksnc_type) {
294 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
296 case SOCKLND_CONN_ANY:
297 return SOCKNAL_MATCH_YES;
299 case SOCKLND_CONN_BULK_IN:
300 return SOCKNAL_MATCH_MAY;
302 case SOCKLND_CONN_BULK_OUT:
303 if (nob < *ksocknal_tunables.ksnd_min_bulk)
304 return SOCKNAL_MATCH_MAY;
306 return SOCKNAL_MATCH_YES;
308 case SOCKLND_CONN_CONTROL:
309 if (nob >= *ksocknal_tunables.ksnd_min_bulk)
310 return SOCKNAL_MATCH_MAY;
312 return SOCKNAL_MATCH_YES;
317 ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
321 if (tx == NULL || tx->tx_lnetmsg == NULL)
322 nob = offsetof(struct ksock_msg, ksm_u);
324 nob = tx->tx_lnetmsg->msg_len + sizeof(struct ksock_msg);
326 switch (conn->ksnc_type) {
328 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
330 case SOCKLND_CONN_ANY:
331 return SOCKNAL_MATCH_NO;
333 case SOCKLND_CONN_ACK:
335 return SOCKNAL_MATCH_YES;
336 else if (tx == NULL || tx->tx_lnetmsg == NULL)
337 return SOCKNAL_MATCH_MAY;
339 return SOCKNAL_MATCH_NO;
341 case SOCKLND_CONN_BULK_OUT:
343 return SOCKNAL_MATCH_NO;
344 else if (nob < *ksocknal_tunables.ksnd_min_bulk)
345 return SOCKNAL_MATCH_MAY;
347 return SOCKNAL_MATCH_YES;
349 case SOCKLND_CONN_CONTROL:
351 return SOCKNAL_MATCH_NO;
352 else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
353 return SOCKNAL_MATCH_MAY;
355 return SOCKNAL_MATCH_YES;
359 /* (Sink) handle incoming ZC request from sender */
361 ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
363 struct ksock_peer_ni *peer_ni = c->ksnc_peer;
364 struct ksock_conn *conn;
368 read_lock(&ksocknal_data.ksnd_global_lock);
370 conn = ksocknal_find_conn_locked(peer_ni, NULL, !!remote);
372 struct ksock_sched *sched = conn->ksnc_scheduler;
374 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
376 spin_lock_bh(&sched->kss_lock);
378 rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
380 spin_unlock_bh(&sched->kss_lock);
382 if (rc) { /* piggybacked */
383 read_unlock(&ksocknal_data.ksnd_global_lock);
388 read_unlock(&ksocknal_data.ksnd_global_lock);
390 /* ACK connection is not ready, or can't piggyback the ACK */
391 tx = ksocknal_alloc_tx_noop(cookie, !!remote);
395 if ((rc = ksocknal_launch_packet(peer_ni->ksnp_ni, tx, peer_ni->ksnp_id)) == 0)
398 ksocknal_free_tx(tx);
402 /* (Sender) handle ZC_ACK from sink */
404 ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
406 struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
408 struct ksock_tx *tmp;
415 count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
417 if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
418 conn->ksnc_proto == &ksocknal_protocol_v3x) {
419 /* keepalive PING for V3.x, just ignore it */
420 return count == 1 ? 0 : -EPROTO;
423 spin_lock(&peer_ni->ksnp_lock);
425 list_for_each_entry_safe(tx, tmp,
426 &peer_ni->ksnp_zc_req_list, tx_zc_list) {
427 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
429 if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
430 tx->tx_msg.ksm_zc_cookies[0] = 0;
431 list_move(&tx->tx_zc_list, &zlist);
438 spin_unlock(&peer_ni->ksnp_lock);
440 while (!list_empty(&zlist)) {
441 tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
442 list_del(&tx->tx_zc_list);
443 ksocknal_tx_decref(tx);
446 return count == 0 ? 0 : -EPROTO;
450 ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
452 struct socket *sock = conn->ksnc_sock;
453 struct lnet_hdr *hdr;
454 struct lnet_magicversion *hmv;
458 BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
459 offsetof(struct lnet_hdr, src_nid));
461 LIBCFS_ALLOC(hdr, sizeof(*hdr));
463 CERROR("Can't allocate struct lnet_hdr\n");
467 hmv = (struct lnet_magicversion *)&hdr->dest_nid;
469 /* Re-organize V2.x message header to V1.x (struct lnet_hdr)
470 * header and send out */
471 hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
472 hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
473 hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
475 if (the_lnet.ln_testprotocompat) {
476 /* single-shot proto check */
477 if (test_and_clear_bit(0, &the_lnet.ln_testprotocompat))
478 hmv->version_major++; /* just different! */
480 if (test_and_clear_bit(1, &the_lnet.ln_testprotocompat))
481 hmv->magic = LNET_PROTO_MAGIC;
484 hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
485 hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
486 hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
487 hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
488 hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
489 hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
491 rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
493 CNETERR("Error %d sending HELLO hdr to %pISp\n",
494 rc, &conn->ksnc_peeraddr);
498 if (hello->kshm_nips == 0)
501 for (i = 0; i < (int) hello->kshm_nips; i++) {
502 hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
505 rc = lnet_sock_write(sock, hello->kshm_ips,
506 hello->kshm_nips * sizeof(__u32),
507 lnet_acceptor_timeout());
509 CNETERR("Error %d sending HELLO payload (%d) to %pISp\n",
510 rc, hello->kshm_nips,
511 &conn->ksnc_peeraddr);
514 LIBCFS_FREE(hdr, sizeof(*hdr));
520 ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello)
522 struct socket *sock = conn->ksnc_sock;
525 hello->kshm_magic = LNET_PROTO_MAGIC;
526 hello->kshm_version = conn->ksnc_proto->pro_version;
528 if (the_lnet.ln_testprotocompat) {
529 /* single-shot proto check */
530 if (test_and_clear_bit(0, &the_lnet.ln_testprotocompat))
531 hello->kshm_version++; /* just different! */
534 rc = lnet_sock_write(sock, hello, offsetof(struct ksock_hello_msg, kshm_ips),
535 lnet_acceptor_timeout());
538 CNETERR("Error %d sending HELLO hdr to %pISp\n",
539 rc, &conn->ksnc_peeraddr);
543 if (hello->kshm_nips == 0)
546 rc = lnet_sock_write(sock, hello->kshm_ips,
547 hello->kshm_nips * sizeof(__u32),
548 lnet_acceptor_timeout());
550 CNETERR("Error %d sending HELLO payload (%d) to %pISp\n", rc,
552 &conn->ksnc_peeraddr);
559 ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
562 struct socket *sock = conn->ksnc_sock;
563 struct lnet_hdr *hdr;
567 LIBCFS_ALLOC(hdr, sizeof(*hdr));
569 CERROR("Can't allocate struct lnet_hdr\n");
573 rc = lnet_sock_read(sock, &hdr->src_nid,
574 sizeof(*hdr) - offsetof(struct lnet_hdr, src_nid),
577 CERROR("Error %d reading rest of HELLO hdr from %pIS\n",
578 rc, &conn->ksnc_peeraddr);
579 LASSERT(rc < 0 && rc != -EALREADY);
583 /* ...and check we got what we expected */
584 if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
585 CERROR("Expecting a HELLO hdr, but got type %d from %pIS\n",
586 le32_to_cpu(hdr->type),
587 &conn->ksnc_peeraddr);
592 hello->kshm_src_nid = le64_to_cpu (hdr->src_nid);
593 hello->kshm_src_pid = le32_to_cpu (hdr->src_pid);
594 hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation);
595 hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type);
596 hello->kshm_nips = le32_to_cpu (hdr->payload_length) /
599 if (hello->kshm_nips > LNET_INTERFACES_NUM) {
600 CERROR("Bad nips %d from ip %pIS\n",
601 hello->kshm_nips, &conn->ksnc_peeraddr);
606 if (hello->kshm_nips == 0)
609 rc = lnet_sock_read(sock, hello->kshm_ips,
610 hello->kshm_nips * sizeof(__u32), timeout);
612 CERROR("Error %d reading IPs from ip %pIS\n",
613 rc, &conn->ksnc_peeraddr);
614 LASSERT(rc < 0 && rc != -EALREADY);
618 for (i = 0; i < (int) hello->kshm_nips; i++) {
619 hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
621 if (hello->kshm_ips[i] == 0) {
622 CERROR("Zero IP[%d] from ip %pIS\n",
623 i, &conn->ksnc_peeraddr);
629 LIBCFS_FREE(hdr, sizeof(*hdr));
635 ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello,
638 struct socket *sock = conn->ksnc_sock;
642 if (hello->kshm_magic == LNET_PROTO_MAGIC)
647 rc = lnet_sock_read(sock, &hello->kshm_src_nid,
648 offsetof(struct ksock_hello_msg, kshm_ips) -
649 offsetof(struct ksock_hello_msg, kshm_src_nid),
652 CERROR("Error %d reading HELLO from %pIS\n",
653 rc, &conn->ksnc_peeraddr);
654 LASSERT(rc < 0 && rc != -EALREADY);
658 if (conn->ksnc_flip) {
659 __swab32s(&hello->kshm_src_pid);
660 __swab64s(&hello->kshm_src_nid);
661 __swab32s(&hello->kshm_dst_pid);
662 __swab64s(&hello->kshm_dst_nid);
663 __swab64s(&hello->kshm_src_incarnation);
664 __swab64s(&hello->kshm_dst_incarnation);
665 __swab32s(&hello->kshm_ctype);
666 __swab32s(&hello->kshm_nips);
669 if (hello->kshm_nips > LNET_INTERFACES_NUM) {
670 CERROR("Bad nips %d from ip %pIS\n",
671 hello->kshm_nips, &conn->ksnc_peeraddr);
675 if (hello->kshm_nips == 0)
678 rc = lnet_sock_read(sock, hello->kshm_ips,
679 hello->kshm_nips * sizeof(__u32), timeout);
681 CERROR("Error %d reading IPs from ip %pIS\n",
682 rc, &conn->ksnc_peeraddr);
683 LASSERT(rc < 0 && rc != -EALREADY);
687 for (i = 0; i < (int) hello->kshm_nips; i++) {
689 __swab32s(&hello->kshm_ips[i]);
691 if (hello->kshm_ips[i] == 0) {
692 CERROR("Zero IP[%d] from ip %pIS\n",
693 i, &conn->ksnc_peeraddr);
702 ksocknal_pack_msg_v1(struct ksock_tx *tx)
704 /* V1.x has no KSOCK_MSG_NOOP */
705 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
706 LASSERT(tx->tx_lnetmsg != NULL);
708 tx->tx_hdr.iov_base = (void *)&tx->tx_lnetmsg->msg_hdr;
709 tx->tx_hdr.iov_len = sizeof(struct lnet_hdr);
711 tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(struct lnet_hdr);
712 tx->tx_resid = tx->tx_nob;
716 ksocknal_pack_msg_v2(struct ksock_tx *tx)
718 tx->tx_hdr.iov_base = (void *)&tx->tx_msg;
720 if (tx->tx_lnetmsg != NULL) {
721 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
723 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
724 tx->tx_hdr.iov_len = sizeof(struct ksock_msg);
725 tx->tx_resid = tx->tx_nob = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
727 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
729 tx->tx_hdr.iov_len = offsetof(struct ksock_msg,
730 ksm_u.lnetmsg.ksnm_hdr);
731 tx->tx_resid = tx->tx_nob = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
733 /* Don't checksum before start sending, because packet can be piggybacked with ACK */
737 ksocknal_unpack_msg_v1(struct ksock_msg *msg)
740 msg->ksm_type = KSOCK_MSG_LNET;
741 msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
745 ksocknal_unpack_msg_v2(struct ksock_msg *msg)
747 return; /* Do nothing */
750 const struct ksock_proto ksocknal_protocol_v1x =
752 .pro_version = KSOCK_PROTO_V1,
753 .pro_send_hello = ksocknal_send_hello_v1,
754 .pro_recv_hello = ksocknal_recv_hello_v1,
755 .pro_pack = ksocknal_pack_msg_v1,
756 .pro_unpack = ksocknal_unpack_msg_v1,
757 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
758 .pro_handle_zcreq = NULL,
759 .pro_handle_zcack = NULL,
760 .pro_queue_tx_zcack = NULL,
761 .pro_match_tx = ksocknal_match_tx
764 const struct ksock_proto ksocknal_protocol_v2x =
766 .pro_version = KSOCK_PROTO_V2,
767 .pro_send_hello = ksocknal_send_hello_v2,
768 .pro_recv_hello = ksocknal_recv_hello_v2,
769 .pro_pack = ksocknal_pack_msg_v2,
770 .pro_unpack = ksocknal_unpack_msg_v2,
771 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
772 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
773 .pro_handle_zcreq = ksocknal_handle_zcreq,
774 .pro_handle_zcack = ksocknal_handle_zcack,
775 .pro_match_tx = ksocknal_match_tx
778 const struct ksock_proto ksocknal_protocol_v3x =
780 .pro_version = KSOCK_PROTO_V3,
781 .pro_send_hello = ksocknal_send_hello_v2,
782 .pro_recv_hello = ksocknal_recv_hello_v2,
783 .pro_pack = ksocknal_pack_msg_v2,
784 .pro_unpack = ksocknal_unpack_msg_v2,
785 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
786 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
787 .pro_handle_zcreq = ksocknal_handle_zcreq,
788 .pro_handle_zcack = ksocknal_handle_zcack,
789 .pro_match_tx = ksocknal_match_tx_v3