4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
36 ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
38 int rc = lnet_sock_getaddr(conn->ksnc_sock, true,
39 &conn->ksnc_peeraddr);
41 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
42 LASSERT(!conn->ksnc_closing);
45 CERROR("Error %d getting sock peer_ni IP\n", rc);
49 rc = lnet_sock_getaddr(conn->ksnc_sock, false,
52 CERROR("Error %d getting sock local IP\n", rc);
60 ksocknal_lib_zc_capable(struct ksock_conn *conn)
62 int caps = conn->ksnc_sock->sk->sk_route_caps;
64 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
67 /* ZC if the socket supports scatter/gather and doesn't need software
69 return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
73 ksocknal_lib_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
74 struct kvec *scratchiov)
76 struct socket *sock = conn->ksnc_sock;
80 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
81 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
82 tx->tx_nob == tx->tx_resid && /* frist sending */
83 tx->tx_msg.ksm_csum == 0) /* not checksummed */
84 ksocknal_lib_csum_tx(tx);
86 /* NB we can't trust socket ops to either consume our iovs
87 * or leave them alone. */
90 #if SOCKNAL_SINGLE_FRAG_TX
92 struct kvec *scratchiov = &scratch;
93 unsigned int niov = 1;
95 unsigned int niov = tx->tx_niov;
97 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
100 scratchiov[0] = tx->tx_hdr;
101 nob += scratchiov[0].iov_len;
104 if (!list_empty(&conn->ksnc_tx_queue) ||
106 msg.msg_flags |= MSG_MORE;
108 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
114 ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
115 struct kvec *scratchiov)
117 struct socket *sock = conn->ksnc_sock;
118 struct bio_vec *kiov = tx->tx_kiov;
122 /* Not NOOP message */
123 LASSERT(tx->tx_lnetmsg != NULL);
125 /* NB we can't trust socket ops to either consume our iovs
126 * or leave them alone. */
127 if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
128 /* Zero copy is enabled */
129 struct sock *sk = sock->sk;
130 struct page *page = kiov->bv_page;
131 int offset = kiov->bv_offset;
132 int fragsize = kiov->bv_len;
133 int msgflg = MSG_DONTWAIT;
135 CDEBUG(D_NET, "page %p + offset %x for %d\n",
136 page, offset, kiov->bv_len);
138 if (!list_empty(&conn->ksnc_tx_queue) ||
139 fragsize < tx->tx_resid)
142 rc = sk->sk_prot->sendpage(sk, page,
143 offset, fragsize, msgflg);
145 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
147 struct kvec *scratchiov = &scratch;
148 unsigned int niov = 1;
150 #ifdef CONFIG_HIGHMEM
151 #warning "XXX risk of kmap deadlock on multiple frags..."
153 unsigned int niov = tx->tx_nkiov;
155 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
158 for (nob = i = 0; i < niov; i++) {
159 scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
161 nob += scratchiov[i].iov_len = kiov[i].bv_len;
164 if (!list_empty(&conn->ksnc_tx_queue) ||
166 msg.msg_flags |= MSG_MORE;
168 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
170 for (i = 0; i < niov; i++)
171 kunmap(kiov[i].bv_page);
177 ksocknal_lib_eager_ack(struct ksock_conn *conn)
179 struct socket *sock = conn->ksnc_sock;
181 /* Remind the socket to ACK eagerly. If I don't, the socket might
182 * think I'm about to send something it could piggy-back the ACK on,
183 * introducing delay in completing zero-copy sends in my peer_ni.
186 tcp_sock_set_quickack(sock->sk, 1);
190 ksocknal_lib_recv_iov(struct ksock_conn *conn, struct kvec *scratchiov)
192 #if SOCKNAL_SINGLE_FRAG_RX
194 struct kvec *scratchiov = &scratch;
195 unsigned int niov = 1;
197 unsigned int niov = conn->ksnc_rx_niov;
199 struct kvec *iov = conn->ksnc_rx_iov;
200 struct msghdr msg = {
210 /* NB we can't trust socket ops to either consume our iovs
211 * or leave them alone. */
214 for (nob = i = 0; i < niov; i++) {
215 scratchiov[i] = iov[i];
216 nob += scratchiov[i].iov_len;
218 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
220 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
224 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
225 saved_csum = conn->ksnc_msg.ksm_csum;
226 conn->ksnc_msg.ksm_csum = 0;
229 if (saved_csum != 0) {
230 /* accumulate checksum */
231 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
234 fragnob = iov[i].iov_len;
238 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
239 iov[i].iov_base, fragnob);
241 conn->ksnc_msg.ksm_csum = saved_csum;
248 ksocknal_lib_kiov_vunmap(void *addr)
257 ksocknal_lib_kiov_vmap(struct bio_vec *kiov, int niov,
258 struct kvec *iov, struct page **pages)
264 if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
267 LASSERT (niov <= LNET_MAX_IOV);
270 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
273 for (nob = i = 0; i < niov; i++) {
274 if ((kiov[i].bv_offset != 0 && i > 0) ||
275 (kiov[i].bv_offset + kiov[i].bv_len !=
276 PAGE_SIZE && i < niov - 1))
279 pages[i] = kiov[i].bv_page;
280 nob += kiov[i].bv_len;
283 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
287 iov->iov_base = addr + kiov[0].bv_offset;
294 ksocknal_lib_recv_kiov(struct ksock_conn *conn, struct page **pages,
295 struct kvec *scratchiov)
297 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
299 struct kvec *scratchiov = &scratch;
300 struct page **pages = NULL;
301 unsigned int niov = 1;
303 #ifdef CONFIG_HIGHMEM
304 #warning "XXX risk of kmap deadlock on multiple frags..."
306 unsigned int niov = conn->ksnc_rx_nkiov;
308 struct bio_vec *kiov = conn->ksnc_rx_kiov;
309 struct msghdr msg = {
321 /* NB we can't trust socket ops to either consume our iovs
322 * or leave them alone. */
323 if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
324 nob = scratchiov[0].iov_len;
328 for (nob = i = 0; i < niov; i++) {
329 nob += scratchiov[i].iov_len = kiov[i].bv_len;
330 scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
336 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
338 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob,
341 if (conn->ksnc_msg.ksm_csum != 0) {
342 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
345 /* Dang! have to kmap again because I have nowhere to
346 * stash the mapped address. But by doing it while the
347 * page is still mapped, the kernel just bumps the map
348 * count and returns me the address it stashed.
350 base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
351 fragnob = kiov[i].bv_len;
355 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
358 kunmap(kiov[i].bv_page);
363 ksocknal_lib_kiov_vunmap(addr);
365 for (i = 0; i < niov; i++)
366 kunmap(kiov[i].bv_page);
373 ksocknal_lib_csum_tx(struct ksock_tx *tx)
379 LASSERT(tx->tx_hdr.iov_base == (void *)&tx->tx_msg);
380 LASSERT(tx->tx_conn != NULL);
381 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
383 tx->tx_msg.ksm_csum = 0;
385 csum = ksocknal_csum(~0, (void *)tx->tx_hdr.iov_base,
388 for (i = 0; i < tx->tx_nkiov; i++) {
389 base = kmap(tx->tx_kiov[i].bv_page) +
390 tx->tx_kiov[i].bv_offset;
392 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
394 kunmap(tx->tx_kiov[i].bv_page);
397 if (*ksocknal_tunables.ksnd_inject_csum_error) {
399 *ksocknal_tunables.ksnd_inject_csum_error = 0;
402 tx->tx_msg.ksm_csum = csum;
406 ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
408 struct socket *sock = conn->ksnc_sock;
409 struct tcp_sock *tp = tcp_sk(sock->sk);
411 if (ksocknal_connsock_addref(conn) < 0) {
412 LASSERT(conn->ksnc_closing);
419 lnet_sock_getbuf(sock, txmem, rxmem);
421 *nagle = !(tp->nonagle & TCP_NAGLE_OFF);
423 ksocknal_connsock_decref(conn);
430 ksocknal_lib_setup_sock (struct socket *sock)
437 struct tcp_sock *tp = tcp_sk(sock->sk);
439 sock->sk->sk_allocation = GFP_NOFS;
441 /* Ensure this socket aborts active sends immediately when closed. */
442 sock_reset_flag(sock->sk, SOCK_LINGER);
446 if (!*ksocknal_tunables.ksnd_nagle)
447 tcp_sock_set_nodelay(sock->sk);
449 lnet_sock_setbuf(sock,
450 *ksocknal_tunables.ksnd_tx_buffer_size,
451 *ksocknal_tunables.ksnd_rx_buffer_size);
453 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
454 #ifdef SOCKNAL_BACKOFF
455 if (*ksocknal_tunables.ksnd_backoff_init > 0) {
456 int option = *ksocknal_tunables.ksnd_backoff_init;
457 #ifdef SOCKNAL_BACKOFF_MS
461 rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_INIT,
462 (char *)&option, sizeof(option));
464 CERROR("Can't set initial tcp backoff %d: %d\n",
470 if (*ksocknal_tunables.ksnd_backoff_max > 0) {
471 int option = *ksocknal_tunables.ksnd_backoff_max;
472 #ifdef SOCKNAL_BACKOFF_MS
476 rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_MAX,
477 (char *)&option, sizeof(option));
479 CERROR("Can't set maximum tcp backoff %d: %d\n",
486 /* snapshot tunables */
487 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
488 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
489 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
491 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
493 #ifdef HAVE_KERNEL_SETSOCKOPT
494 /* open-coded version doesn't work in all kernels, and
495 * there is no helper function, so call kernel_setsockopt()
499 int option = (do_keepalive ? 1 : 0);
500 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
501 (char *)&option, sizeof(option));
504 if (sock->sk->sk_prot->keepalive)
505 sock->sk->sk_prot->keepalive(sock->sk, do_keepalive);
507 sock_set_flag(sock->sk, SOCK_KEEPOPEN);
509 sock_reset_flag(sock->sk, SOCK_KEEPOPEN);
510 #endif /* HAVE_KERNEL_SETSOCKOPT */
515 rc = tcp_sock_set_keepidle(sock->sk, keep_idle);
517 CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
521 rc = tcp_sock_set_keepintvl(sock->sk, keep_intvl);
523 CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
527 rc = tcp_sock_set_keepcnt(sock->sk, keep_count);
529 CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
537 ksocknal_lib_push_conn(struct ksock_conn *conn)
544 rc = ksocknal_connsock_addref(conn);
545 if (rc != 0) /* being shut down */
548 sk = conn->ksnc_sock->sk;
552 nonagle = tp->nonagle;
553 tp->nonagle = TCP_NAGLE_OFF;
556 tcp_sock_set_nodelay(conn->ksnc_sock->sk);
559 tp->nonagle = nonagle;
562 ksocknal_connsock_decref(conn);
565 void ksocknal_read_callback(struct ksock_conn *conn);
566 void ksocknal_write_callback(struct ksock_conn *conn);
568 * socket call back in Linux
571 #ifdef HAVE_SK_DATA_READY_ONE_ARG
572 ksocknal_data_ready(struct sock *sk)
574 ksocknal_data_ready(struct sock *sk, int n)
577 struct ksock_conn *conn;
580 /* interleave correctly with closing sockets... */
582 read_lock(&ksocknal_data.ksnd_global_lock);
584 conn = sk->sk_user_data;
585 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
586 LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
587 #ifdef HAVE_SK_DATA_READY_ONE_ARG
588 sk->sk_data_ready(sk);
590 sk->sk_data_ready(sk, n);
593 ksocknal_read_callback(conn);
595 read_unlock(&ksocknal_data.ksnd_global_lock);
601 ksocknal_write_space (struct sock *sk)
603 struct ksock_conn *conn;
607 /* interleave correctly with closing sockets... */
609 read_lock(&ksocknal_data.ksnd_global_lock);
611 conn = sk->sk_user_data;
612 wspace = sk_stream_wspace(sk);
613 min_wpace = sk_stream_min_wspace(sk);
615 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
616 sk, wspace, min_wpace, conn,
617 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
618 " ready" : " blocked"),
619 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
620 " scheduled" : " idle"),
621 (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
622 " empty" : " queued"));
624 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
625 LASSERT (sk->sk_write_space != &ksocknal_write_space);
626 sk->sk_write_space (sk);
628 read_unlock(&ksocknal_data.ksnd_global_lock);
632 if (wspace >= min_wpace) { /* got enough space */
633 ksocknal_write_callback(conn);
635 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
636 * ENOMEM check in ksocknal_transmit is race-free (think about
639 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
642 read_unlock(&ksocknal_data.ksnd_global_lock);
646 ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
648 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
649 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
653 ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
655 sock->sk->sk_user_data = conn;
656 sock->sk->sk_data_ready = ksocknal_data_ready;
657 sock->sk->sk_write_space = ksocknal_write_space;
661 ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
663 /* Remove conn's network callbacks.
664 * NB I _have_ to restore the callback, rather than storing a noop,
665 * since the socket could survive past this module being unloaded!! */
666 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
667 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
669 /* A callback could be in progress already; they hold a read lock
670 * on ksnd_global_lock (to serialise with me) and NOOP if
671 * sk_user_data is NULL. */
672 sock->sk->sk_user_data = NULL;
678 ksocknal_lib_memory_pressure(struct ksock_conn *conn)
681 struct ksock_sched *sched;
683 sched = conn->ksnc_scheduler;
684 spin_lock_bh(&sched->kss_lock);
686 if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
687 !conn->ksnc_tx_ready) {
688 /* SOCK_NOSPACE is set when the socket fills
689 * and cleared in the write_space callback
690 * (which also sets ksnc_tx_ready). If
691 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
692 * zero, I didn't fill the socket and
693 * write_space won't reschedule me, so I
694 * return -ENOMEM to get my caller to retry
699 spin_unlock_bh(&sched->kss_lock);