4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
35 ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
37 int rc = lnet_sock_getaddr(conn->ksnc_sock, true,
38 &conn->ksnc_peeraddr);
40 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
41 LASSERT(!conn->ksnc_closing);
44 CERROR("Error %d getting sock peer_ni IP\n", rc);
48 rc = lnet_sock_getaddr(conn->ksnc_sock, false,
51 CERROR("Error %d getting sock local IP\n", rc);
59 ksocknal_lib_zc_capable(struct ksock_conn *conn)
61 int caps = conn->ksnc_sock->sk->sk_route_caps;
63 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
66 /* ZC if the socket supports scatter/gather and doesn't need software
68 return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
72 ksocknal_lib_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
73 struct kvec *scratchiov)
75 struct socket *sock = conn->ksnc_sock;
79 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
80 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
81 tx->tx_nob == tx->tx_resid && /* frist sending */
82 tx->tx_msg.ksm_csum == 0) /* not checksummed */
83 ksocknal_lib_csum_tx(tx);
85 /* NB we can't trust socket ops to either consume our iovs
86 * or leave them alone. */
89 #if SOCKNAL_SINGLE_FRAG_TX
91 struct kvec *scratchiov = &scratch;
92 unsigned int niov = 1;
94 unsigned int niov = tx->tx_niov;
96 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
99 scratchiov[0] = tx->tx_hdr;
100 nob += scratchiov[0].iov_len;
103 if (!list_empty(&conn->ksnc_tx_queue) ||
105 msg.msg_flags |= MSG_MORE;
107 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
113 ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
114 struct kvec *scratchiov)
116 struct socket *sock = conn->ksnc_sock;
117 struct bio_vec *kiov = tx->tx_kiov;
121 /* Not NOOP message */
122 LASSERT(tx->tx_lnetmsg != NULL);
124 /* NB we can't trust socket ops to either consume our iovs
125 * or leave them alone. */
126 if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
127 /* Zero copy is enabled */
128 struct sock *sk = sock->sk;
129 struct page *page = kiov->bv_page;
130 int offset = kiov->bv_offset;
131 int fragsize = kiov->bv_len;
132 int msgflg = MSG_DONTWAIT;
134 CDEBUG(D_NET, "page %p + offset %x for %d\n",
135 page, offset, kiov->bv_len);
137 if (!list_empty(&conn->ksnc_tx_queue) ||
138 fragsize < tx->tx_resid)
141 rc = sk->sk_prot->sendpage(sk, page,
142 offset, fragsize, msgflg);
144 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
146 struct kvec *scratchiov = &scratch;
147 unsigned int niov = 1;
149 #ifdef CONFIG_HIGHMEM
150 #warning "XXX risk of kmap deadlock on multiple frags..."
152 unsigned int niov = tx->tx_nkiov;
154 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
157 for (nob = i = 0; i < niov; i++) {
158 scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
160 nob += scratchiov[i].iov_len = kiov[i].bv_len;
163 if (!list_empty(&conn->ksnc_tx_queue) ||
165 msg.msg_flags |= MSG_MORE;
167 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
169 for (i = 0; i < niov; i++)
170 kunmap(kiov[i].bv_page);
176 ksocknal_lib_eager_ack(struct ksock_conn *conn)
178 struct socket *sock = conn->ksnc_sock;
180 /* Remind the socket to ACK eagerly. If I don't, the socket might
181 * think I'm about to send something it could piggy-back the ACK on,
182 * introducing delay in completing zero-copy sends in my peer_ni.
185 tcp_sock_set_quickack(sock->sk, 1);
189 ksocknal_lib_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
191 #if SOCKNAL_SINGLE_FRAG_RX
193 struct kvec *scratchiov = &scratch;
194 unsigned int niov = 1;
196 unsigned int niov = conn->ksnc_rx_niov;
198 struct kvec *iov = conn->ksnc_rx_iov;
199 struct msghdr msg = {
209 /* NB we can't trust socket ops to either consume our iovs
210 * or leave them alone. */
213 for (nob = i = 0; i < niov; i++) {
214 scratchiov[i] = iov[i];
215 nob += scratchiov[i].iov_len;
217 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
219 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
223 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
224 saved_csum = conn->ksnc_msg.ksm_csum;
225 conn->ksnc_msg.ksm_csum = 0;
228 if (saved_csum != 0) {
229 /* accumulate checksum */
230 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
233 fragnob = iov[i].iov_len;
237 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
238 iov[i].iov_base, fragnob);
240 conn->ksnc_msg.ksm_csum = saved_csum;
247 ksocknal_lib_kiov_vunmap(void *addr)
256 ksocknal_lib_kiov_vmap(struct bio_vec *kiov, int niov,
257 struct kvec *iov, struct page **pages)
263 if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
266 LASSERT (niov <= LNET_MAX_IOV);
269 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
272 for (nob = i = 0; i < niov; i++) {
273 if ((kiov[i].bv_offset != 0 && i > 0) ||
274 (kiov[i].bv_offset + kiov[i].bv_len !=
275 PAGE_SIZE && i < niov - 1))
278 pages[i] = kiov[i].bv_page;
279 nob += kiov[i].bv_len;
282 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
286 iov->iov_base = addr + kiov[0].bv_offset;
293 ksocknal_lib_recv_kiov(struct ksock_conn *conn, struct page **pages,
294 struct kvec *scratchiov)
296 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
298 struct kvec *scratchiov = &scratch;
299 struct page **pages = NULL;
300 unsigned int niov = 1;
302 #ifdef CONFIG_HIGHMEM
303 #warning "XXX risk of kmap deadlock on multiple frags..."
305 unsigned int niov = conn->ksnc_rx_nkiov;
307 struct bio_vec *kiov = conn->ksnc_rx_kiov;
308 struct msghdr msg = {
320 /* NB we can't trust socket ops to either consume our iovs
321 * or leave them alone. */
322 if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
323 nob = scratchiov[0].iov_len;
327 for (nob = i = 0; i < niov; i++) {
328 nob += scratchiov[i].iov_len = kiov[i].bv_len;
329 scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
335 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
337 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob,
340 if (conn->ksnc_msg.ksm_csum != 0) {
341 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
344 /* Dang! have to kmap again because I have nowhere to
345 * stash the mapped address. But by doing it while the
346 * page is still mapped, the kernel just bumps the map
347 * count and returns me the address it stashed.
349 base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
350 fragnob = kiov[i].bv_len;
354 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
357 kunmap(kiov[i].bv_page);
362 ksocknal_lib_kiov_vunmap(addr);
364 for (i = 0; i < niov; i++)
365 kunmap(kiov[i].bv_page);
372 ksocknal_lib_csum_tx(struct ksock_tx *tx)
378 LASSERT(tx->tx_hdr.iov_base == (void *)&tx->tx_msg);
379 LASSERT(tx->tx_conn != NULL);
380 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
382 tx->tx_msg.ksm_csum = 0;
384 csum = ksocknal_csum(~0, (void *)tx->tx_hdr.iov_base,
387 for (i = 0; i < tx->tx_nkiov; i++) {
388 base = kmap(tx->tx_kiov[i].bv_page) +
389 tx->tx_kiov[i].bv_offset;
391 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
393 kunmap(tx->tx_kiov[i].bv_page);
396 if (*ksocknal_tunables.ksnd_inject_csum_error) {
398 *ksocknal_tunables.ksnd_inject_csum_error = 0;
401 tx->tx_msg.ksm_csum = csum;
405 ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
407 struct socket *sock = conn->ksnc_sock;
408 struct tcp_sock *tp = tcp_sk(sock->sk);
410 if (ksocknal_connsock_addref(conn) < 0) {
411 LASSERT(conn->ksnc_closing);
418 lnet_sock_getbuf(sock, txmem, rxmem);
420 *nagle = !(tp->nonagle & TCP_NAGLE_OFF);
422 ksocknal_connsock_decref(conn);
429 ksocknal_lib_setup_sock (struct socket *sock)
436 struct tcp_sock *tp = tcp_sk(sock->sk);
438 sock->sk->sk_allocation = GFP_NOFS;
440 /* Ensure this socket aborts active sends immediately when closed. */
441 sock_reset_flag(sock->sk, SOCK_LINGER);
445 if (!*ksocknal_tunables.ksnd_nagle)
446 tcp_sock_set_nodelay(sock->sk);
448 lnet_sock_setbuf(sock,
449 *ksocknal_tunables.ksnd_tx_buffer_size,
450 *ksocknal_tunables.ksnd_rx_buffer_size);
452 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
453 #ifdef SOCKNAL_BACKOFF
454 if (*ksocknal_tunables.ksnd_backoff_init > 0) {
455 int option = *ksocknal_tunables.ksnd_backoff_init;
456 #ifdef SOCKNAL_BACKOFF_MS
460 rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_INIT,
461 (char *)&option, sizeof(option));
463 CERROR("Can't set initial tcp backoff %d: %d\n",
469 if (*ksocknal_tunables.ksnd_backoff_max > 0) {
470 int option = *ksocknal_tunables.ksnd_backoff_max;
471 #ifdef SOCKNAL_BACKOFF_MS
475 rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_MAX,
476 (char *)&option, sizeof(option));
478 CERROR("Can't set maximum tcp backoff %d: %d\n",
485 /* snapshot tunables */
486 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
487 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
488 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
490 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
492 #ifdef HAVE_KERNEL_SETSOCKOPT
493 /* open-coded version doesn't work in all kernels, and
494 * there is no helper function, so call kernel_setsockopt()
498 int option = (do_keepalive ? 1 : 0);
499 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
500 (char *)&option, sizeof(option));
503 if (sock->sk->sk_prot->keepalive)
504 sock->sk->sk_prot->keepalive(sock->sk, do_keepalive);
506 sock_set_flag(sock->sk, SOCK_KEEPOPEN);
508 sock_reset_flag(sock->sk, SOCK_KEEPOPEN);
509 #endif /* HAVE_KERNEL_SETSOCKOPT */
514 rc = tcp_sock_set_keepidle(sock->sk, keep_idle);
516 CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
520 rc = tcp_sock_set_keepintvl(sock->sk, keep_intvl);
522 CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
526 rc = tcp_sock_set_keepcnt(sock->sk, keep_count);
528 CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
536 ksocknal_lib_push_conn(struct ksock_conn *conn)
543 rc = ksocknal_connsock_addref(conn);
544 if (rc != 0) /* being shut down */
547 sk = conn->ksnc_sock->sk;
551 nonagle = tp->nonagle;
552 tp->nonagle = TCP_NAGLE_OFF;
555 tcp_sock_set_nodelay(conn->ksnc_sock->sk);
558 tp->nonagle = nonagle;
561 ksocknal_connsock_decref(conn);
564 void ksocknal_read_callback(struct ksock_conn *conn);
565 void ksocknal_write_callback(struct ksock_conn *conn);
567 * socket call back in Linux
570 #ifdef HAVE_SK_DATA_READY_ONE_ARG
571 ksocknal_data_ready(struct sock *sk)
573 ksocknal_data_ready(struct sock *sk, int n)
576 struct ksock_conn *conn;
579 /* interleave correctly with closing sockets... */
581 read_lock(&ksocknal_data.ksnd_global_lock);
583 conn = sk->sk_user_data;
584 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
585 LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
586 #ifdef HAVE_SK_DATA_READY_ONE_ARG
587 sk->sk_data_ready(sk);
589 sk->sk_data_ready(sk, n);
592 ksocknal_read_callback(conn);
594 read_unlock(&ksocknal_data.ksnd_global_lock);
600 ksocknal_write_space (struct sock *sk)
602 struct ksock_conn *conn;
606 /* interleave correctly with closing sockets... */
608 read_lock(&ksocknal_data.ksnd_global_lock);
610 conn = sk->sk_user_data;
611 wspace = sk_stream_wspace(sk);
612 min_wpace = sk_stream_min_wspace(sk);
614 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
615 sk, wspace, min_wpace, conn,
616 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
617 " ready" : " blocked"),
618 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
619 " scheduled" : " idle"),
620 (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
621 " empty" : " queued"));
623 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
624 LASSERT (sk->sk_write_space != &ksocknal_write_space);
625 sk->sk_write_space (sk);
627 read_unlock(&ksocknal_data.ksnd_global_lock);
631 if (wspace >= min_wpace) { /* got enough space */
632 ksocknal_write_callback(conn);
634 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
635 * ENOMEM check in ksocknal_transmit is race-free (think about
638 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
641 read_unlock(&ksocknal_data.ksnd_global_lock);
645 ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
647 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
648 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
652 ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
654 sock->sk->sk_user_data = conn;
655 sock->sk->sk_data_ready = ksocknal_data_ready;
656 sock->sk->sk_write_space = ksocknal_write_space;
660 ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
662 /* Remove conn's network callbacks.
663 * NB I _have_ to restore the callback, rather than storing a noop,
664 * since the socket could survive past this module being unloaded!! */
665 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
666 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
668 /* A callback could be in progress already; they hold a read lock
669 * on ksnd_global_lock (to serialise with me) and NOOP if
670 * sk_user_data is NULL. */
671 sock->sk->sk_user_data = NULL;
677 ksocknal_lib_memory_pressure(struct ksock_conn *conn)
680 struct ksock_sched *sched;
682 sched = conn->ksnc_scheduler;
683 spin_lock_bh(&sched->kss_lock);
685 if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
686 !conn->ksnc_tx_ready) {
687 /* SOCK_NOSPACE is set when the socket fills
688 * and cleared in the write_space callback
689 * (which also sets ksnc_tx_ready). If
690 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
691 * zero, I didn't fill the socket and
692 * write_space won't reschedule me, so I
693 * return -ENOMEM to get my caller to retry
698 spin_unlock_bh(&sched->kss_lock);