1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 * pro_send_hello : send hello message
32 * pro_recv_hello : receive hello message
33 * pro_pack : pack message header
34 * pro_unpack : unpack message header
35 * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
36 * return 1 if ACK is piggybacked, otherwise return 0
37 * pro_queue_tx_msg() : Called holding BH lock: kss_lock
38 * return the ACK that piggybacked by my message, or NULL
39 * pro_handle_zcreq() : handler of incoming ZC-REQ
40 * pro_handle_zcack() : handler of incoming ZC-ACK
41 * pro_match_tx() : Called holding glock
45 ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
47 /* V1.x, just enqueue it */
48 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
53 ksocknal_next_tx_carrier(ksock_conn_t *conn)
55 ksock_tx_t *tx = conn->ksnc_tx_carrier;
57 /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
58 LASSERT (!list_empty(&conn->ksnc_tx_queue));
61 /* Next TX that can carry ZC-ACK or LNet message */
62 if (tx->tx_list.next == &conn->ksnc_tx_queue) {
63 /* no more packets queued */
64 conn->ksnc_tx_carrier = NULL;
66 conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksock_tx_t, tx_list);
67 LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
72 ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
73 ksock_tx_t *tx_ack, __u64 cookie)
75 ksock_tx_t *tx = conn->ksnc_tx_carrier;
77 LASSERT (tx_ack == NULL ||
78 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
81 * Enqueue or piggyback tx_ack / cookie
82 * . no tx can piggyback cookie of tx_ack (or cookie), just
83 * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
84 * . There is tx can piggyback cookie of tx_ack (or cookie),
85 * piggyback the cookie and return the tx.
89 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
90 conn->ksnc_tx_carrier = tx_ack;
95 if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
96 /* tx is noop zc-ack, can't piggyback zc-ack cookie */
98 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
102 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
103 LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
106 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
108 /* piggyback the zc-ack cookie */
109 tx->tx_msg.ksm_zc_cookies[1] = cookie;
110 /* move on to the next TX which can carry cookie */
111 ksocknal_next_tx_carrier(conn);
117 ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
119 ksock_tx_t *tx = conn->ksnc_tx_carrier;
123 * . If there is no NOOP on the connection, just enqueue
124 * tx_msg and return NULL
125 * . If there is NOOP on the connection, piggyback the cookie
126 * and replace the NOOP tx, and return the NOOP tx.
128 if (tx == NULL) { /* nothing on queue */
129 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
130 conn->ksnc_tx_carrier = tx_msg;
134 if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
135 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
139 LASSERT (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
141 /* There is a noop zc-ack can be piggybacked */
142 tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
143 ksocknal_next_tx_carrier(conn);
145 /* use new_tx to replace the noop zc-ack packet */
146 list_add(&tx_msg->tx_list, &tx->tx_list);
147 list_del(&tx->tx_list);
153 ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
154 ksock_tx_t *tx_ack, __u64 cookie)
158 if (conn->ksnc_type != SOCKLND_CONN_ACK)
159 return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
161 /* non-blocking ZC-ACK (to router) */
162 LASSERT (tx_ack == NULL ||
163 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
165 if ((tx = conn->ksnc_tx_carrier) == NULL) {
166 if (tx_ack != NULL) {
167 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
168 conn->ksnc_tx_carrier = tx_ack;
173 /* conn->ksnc_tx_carrier != NULL */
176 cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
178 if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
181 if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
182 /* replace the keepalive PING with a real ACK */
183 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
184 tx->tx_msg.ksm_zc_cookies[1] = cookie;
188 if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
189 cookie == tx->tx_msg.ksm_zc_cookies[1]) {
190 CWARN("%s: duplicated ZC cookie: "LPU64"\n",
191 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
192 return 1; /* XXX return error in the future */
195 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
196 /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
197 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
198 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
199 tx->tx_msg.ksm_zc_cookies[1] = cookie;
201 tx->tx_msg.ksm_zc_cookies[0] = cookie;
204 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
205 /* not likely to carry more ACKs, skip it to simplify logic */
206 ksocknal_next_tx_carrier(conn);
212 /* takes two or more cookies already */
214 if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
217 /* two seperated cookies: (a+2, a) or (a+1, a) */
218 LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
219 tx->tx_msg.ksm_zc_cookies[1] <= 2);
221 if (tx->tx_msg.ksm_zc_cookies[0] -
222 tx->tx_msg.ksm_zc_cookies[1] == 2) {
223 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
225 } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
226 tmp = tx->tx_msg.ksm_zc_cookies[1];
227 } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
228 tmp = tx->tx_msg.ksm_zc_cookies[0];
232 /* range of cookies */
233 tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
234 tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
239 /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
240 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
241 cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
242 CWARN("%s: duplicated ZC cookie: "LPU64"\n",
243 libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
244 return 1; /* XXX: return error in the future */
247 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
248 tx->tx_msg.ksm_zc_cookies[1] = cookie;
252 if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
253 tx->tx_msg.ksm_zc_cookies[0] = cookie;
258 /* failed to piggyback ZC-ACK */
259 if (tx_ack != NULL) {
260 list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
261 /* the next tx can piggyback at least 1 ACK */
262 ksocknal_next_tx_carrier(conn);
269 ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
273 #if SOCKNAL_VERSION_DEBUG
274 if (!*ksocknal_tunables.ksnd_typed_conns)
275 return SOCKNAL_MATCH_YES;
278 if (tx == NULL || tx->tx_lnetmsg == NULL) {
280 nob = offsetof(ksock_msg_t, ksm_u);
282 nob = tx->tx_lnetmsg->msg_len +
283 ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
284 sizeof(lnet_hdr_t) : sizeof(ksock_msg_t));
287 /* default checking for typed connection */
288 switch (conn->ksnc_type) {
290 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
292 case SOCKLND_CONN_ANY:
293 return SOCKNAL_MATCH_YES;
295 case SOCKLND_CONN_BULK_IN:
296 return SOCKNAL_MATCH_MAY;
298 case SOCKLND_CONN_BULK_OUT:
299 if (nob < *ksocknal_tunables.ksnd_min_bulk)
300 return SOCKNAL_MATCH_MAY;
302 return SOCKNAL_MATCH_YES;
304 case SOCKLND_CONN_CONTROL:
305 if (nob >= *ksocknal_tunables.ksnd_min_bulk)
306 return SOCKNAL_MATCH_MAY;
308 return SOCKNAL_MATCH_YES;
313 ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
317 if (tx == NULL || tx->tx_lnetmsg == NULL)
318 nob = offsetof(ksock_msg_t, ksm_u);
320 nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
322 switch (conn->ksnc_type) {
324 CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
326 case SOCKLND_CONN_ANY:
327 return SOCKNAL_MATCH_NO;
329 case SOCKLND_CONN_ACK:
331 return SOCKNAL_MATCH_YES;
332 else if (tx == NULL || tx->tx_lnetmsg == NULL)
333 return SOCKNAL_MATCH_MAY;
335 return SOCKNAL_MATCH_NO;
337 case SOCKLND_CONN_BULK_OUT:
339 return SOCKNAL_MATCH_NO;
340 else if (nob < *ksocknal_tunables.ksnd_min_bulk)
341 return SOCKNAL_MATCH_MAY;
343 return SOCKNAL_MATCH_YES;
345 case SOCKLND_CONN_CONTROL:
347 return SOCKNAL_MATCH_NO;
348 else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
349 return SOCKNAL_MATCH_MAY;
351 return SOCKNAL_MATCH_YES;
355 /* (Sink) handle incoming ZC request from sender */
357 ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
359 ksock_peer_t *peer = c->ksnc_peer;
364 cfs_read_lock (&ksocknal_data.ksnd_global_lock);
366 conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
368 ksock_sched_t *sched = conn->ksnc_scheduler;
370 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
372 cfs_spin_lock_bh (&sched->kss_lock);
374 rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
376 cfs_spin_unlock_bh (&sched->kss_lock);
378 if (rc) { /* piggybacked */
379 read_unlock (&ksocknal_data.ksnd_global_lock);
384 cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
386 /* ACK connection is not ready, or can't piggyback the ACK */
387 tx = ksocknal_alloc_tx_noop(cookie, !!remote);
391 if ((rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) == 0)
394 ksocknal_free_tx(tx);
398 /* (Sender) handle ZC_ACK from sink */
400 ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
402 ksock_peer_t *peer = conn->ksnc_peer;
405 CFS_LIST_HEAD (zlist);
411 count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
413 if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
414 conn->ksnc_proto == &ksocknal_protocol_v3x) {
415 /* keepalive PING for V3.x, just ignore it */
416 return count == 1 ? 0 : -EPROTO;
419 cfs_spin_lock(&peer->ksnp_lock);
421 list_for_each_entry_safe(tx, tmp,
422 &peer->ksnp_zc_req_list, tx_zc_list) {
423 __u64 c = tx->tx_msg.ksm_zc_cookies[0];
425 if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
426 tx->tx_msg.ksm_zc_cookies[0] = 0;
427 list_del(&tx->tx_zc_list);
428 list_add(&tx->tx_zc_list, &zlist);
435 cfs_spin_unlock(&peer->ksnp_lock);
437 while (!list_empty(&zlist)) {
438 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
439 list_del(&tx->tx_zc_list);
440 ksocknal_tx_decref(tx);
443 return count == 0 ? 0 : -EPROTO;
447 ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
449 cfs_socket_t *sock = conn->ksnc_sock;
451 lnet_magicversion_t *hmv;
455 CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
457 LIBCFS_ALLOC(hdr, sizeof(*hdr));
459 CERROR("Can't allocate lnet_hdr_t\n");
463 hmv = (lnet_magicversion_t *)&hdr->dest_nid;
465 /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
466 * header and send out */
467 hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
468 hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
469 hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
471 if (the_lnet.ln_testprotocompat != 0) {
472 /* single-shot proto check */
474 if ((the_lnet.ln_testprotocompat & 1) != 0) {
475 hmv->version_major++; /* just different! */
476 the_lnet.ln_testprotocompat &= ~1;
478 if ((the_lnet.ln_testprotocompat & 2) != 0) {
479 hmv->magic = LNET_PROTO_MAGIC;
480 the_lnet.ln_testprotocompat &= ~2;
485 hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
486 hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
487 hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
488 hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
489 hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
490 hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
492 rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
495 CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
496 rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
500 if (hello->kshm_nips == 0)
503 for (i = 0; i < (int) hello->kshm_nips; i++) {
504 hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
507 rc = libcfs_sock_write(sock, hello->kshm_ips,
508 hello->kshm_nips * sizeof(__u32),
509 lnet_acceptor_timeout());
511 CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)"
512 " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
513 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
516 LIBCFS_FREE(hdr, sizeof(*hdr));
522 ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
524 cfs_socket_t *sock = conn->ksnc_sock;
527 hello->kshm_magic = LNET_PROTO_MAGIC;
528 hello->kshm_version = conn->ksnc_proto->pro_version;
530 if (the_lnet.ln_testprotocompat != 0) {
531 /* single-shot proto check */
533 if ((the_lnet.ln_testprotocompat & 1) != 0) {
534 hello->kshm_version++; /* just different! */
535 the_lnet.ln_testprotocompat &= ~1;
540 rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
541 lnet_acceptor_timeout());
544 CDEBUG (D_NETERROR, "Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
545 rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
549 if (hello->kshm_nips == 0)
552 rc = libcfs_sock_write(sock, hello->kshm_ips,
553 hello->kshm_nips * sizeof(__u32),
554 lnet_acceptor_timeout());
556 CDEBUG (D_NETERROR, "Error %d sending HELLO payload (%d)"
557 " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
558 HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
565 ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
567 cfs_socket_t *sock = conn->ksnc_sock;
572 LIBCFS_ALLOC(hdr, sizeof(*hdr));
574 CERROR("Can't allocate lnet_hdr_t\n");
578 rc = libcfs_sock_read(sock, &hdr->src_nid,
579 sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid),
582 CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n",
583 rc, HIPQUAD(conn->ksnc_ipaddr));
584 LASSERT (rc < 0 && rc != -EALREADY);
588 /* ...and check we got what we expected */
589 if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
590 CERROR ("Expecting a HELLO hdr,"
591 " but got type %d from %u.%u.%u.%u\n",
592 le32_to_cpu (hdr->type),
593 HIPQUAD(conn->ksnc_ipaddr));
598 hello->kshm_src_nid = le64_to_cpu (hdr->src_nid);
599 hello->kshm_src_pid = le32_to_cpu (hdr->src_pid);
600 hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation);
601 hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type);
602 hello->kshm_nips = le32_to_cpu (hdr->payload_length) /
605 if (hello->kshm_nips > LNET_MAX_INTERFACES) {
606 CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
607 hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
612 if (hello->kshm_nips == 0)
615 rc = libcfs_sock_read(sock, hello->kshm_ips,
616 hello->kshm_nips * sizeof(__u32), timeout);
618 CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
619 rc, HIPQUAD(conn->ksnc_ipaddr));
620 LASSERT (rc < 0 && rc != -EALREADY);
624 for (i = 0; i < (int) hello->kshm_nips; i++) {
625 hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
627 if (hello->kshm_ips[i] == 0) {
628 CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
629 i, HIPQUAD(conn->ksnc_ipaddr));
635 LIBCFS_FREE(hdr, sizeof(*hdr));
641 ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
643 cfs_socket_t *sock = conn->ksnc_sock;
647 if (hello->kshm_magic == LNET_PROTO_MAGIC)
652 rc = libcfs_sock_read(sock, &hello->kshm_src_nid,
653 offsetof(ksock_hello_msg_t, kshm_ips) -
654 offsetof(ksock_hello_msg_t, kshm_src_nid),
657 CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
658 rc, HIPQUAD(conn->ksnc_ipaddr));
659 LASSERT (rc < 0 && rc != -EALREADY);
663 if (conn->ksnc_flip) {
664 __swab32s(&hello->kshm_src_pid);
665 __swab64s(&hello->kshm_src_nid);
666 __swab32s(&hello->kshm_dst_pid);
667 __swab64s(&hello->kshm_dst_nid);
668 __swab64s(&hello->kshm_src_incarnation);
669 __swab64s(&hello->kshm_dst_incarnation);
670 __swab32s(&hello->kshm_ctype);
671 __swab32s(&hello->kshm_nips);
674 if (hello->kshm_nips > LNET_MAX_INTERFACES) {
675 CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
676 hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
680 if (hello->kshm_nips == 0)
683 rc = libcfs_sock_read(sock, hello->kshm_ips,
684 hello->kshm_nips * sizeof(__u32), timeout);
686 CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
687 rc, HIPQUAD(conn->ksnc_ipaddr));
688 LASSERT (rc < 0 && rc != -EALREADY);
692 for (i = 0; i < (int) hello->kshm_nips; i++) {
694 __swab32s(&hello->kshm_ips[i]);
696 if (hello->kshm_ips[i] == 0) {
697 CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
698 i, HIPQUAD(conn->ksnc_ipaddr));
707 ksocknal_pack_msg_v1(ksock_tx_t *tx)
709 /* V1.x has no KSOCK_MSG_NOOP */
710 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
711 LASSERT(tx->tx_lnetmsg != NULL);
713 tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr;
714 tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
716 tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
720 ksocknal_pack_msg_v2(ksock_tx_t *tx)
722 tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
724 if (tx->tx_lnetmsg != NULL) {
725 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
727 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
728 tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
729 tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
731 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
733 tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
734 tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
736 /* Don't checksum before start sending, because packet can be piggybacked with ACK */
740 ksocknal_unpack_msg_v1(ksock_msg_t *msg)
743 msg->ksm_type = KSOCK_MSG_LNET;
744 msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
748 ksocknal_unpack_msg_v2(ksock_msg_t *msg)
750 return; /* Do nothing */
753 ksock_proto_t ksocknal_protocol_v1x =
755 .pro_version = KSOCK_PROTO_V1,
756 .pro_send_hello = ksocknal_send_hello_v1,
757 .pro_recv_hello = ksocknal_recv_hello_v1,
758 .pro_pack = ksocknal_pack_msg_v1,
759 .pro_unpack = ksocknal_unpack_msg_v1,
760 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
761 .pro_handle_zcreq = NULL,
762 .pro_handle_zcack = NULL,
763 .pro_queue_tx_zcack = NULL,
764 .pro_match_tx = ksocknal_match_tx
767 ksock_proto_t ksocknal_protocol_v2x =
769 .pro_version = KSOCK_PROTO_V2,
770 .pro_send_hello = ksocknal_send_hello_v2,
771 .pro_recv_hello = ksocknal_recv_hello_v2,
772 .pro_pack = ksocknal_pack_msg_v2,
773 .pro_unpack = ksocknal_unpack_msg_v2,
774 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
775 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
776 .pro_handle_zcreq = ksocknal_handle_zcreq,
777 .pro_handle_zcack = ksocknal_handle_zcack,
778 .pro_match_tx = ksocknal_match_tx
781 ksock_proto_t ksocknal_protocol_v3x =
783 .pro_version = KSOCK_PROTO_V3,
784 .pro_send_hello = ksocknal_send_hello_v2,
785 .pro_recv_hello = ksocknal_recv_hello_v2,
786 .pro_pack = ksocknal_pack_msg_v2,
787 .pro_unpack = ksocknal_unpack_msg_v2,
788 .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
789 .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
790 .pro_handle_zcreq = ksocknal_handle_zcreq,
791 .pro_handle_zcack = ksocknal_handle_zcack,
792 .pro_match_tx = ksocknal_match_tx_v3