X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fulnds%2Fsocklnd%2Fconn.c;h=4d6e01d63e7ef807919f4339571a697bd37f1c0f;hp=a386bb10dac673e6951c7daa712ef8ce75148f37;hb=b74555726d0e8ba1f11ebe959029c59ce9cdc842;hpb=76a63f95f8516edb2a7d7af797299078f37efd50 diff --git a/lnet/ulnds/socklnd/conn.c b/lnet/ulnds/socklnd/conn.c index a386bb1..4d6e01d 100644 --- a/lnet/ulnds/socklnd/conn.c +++ b/lnet/ulnds/socklnd/conn.c @@ -51,7 +51,7 @@ usocklnd_conn_timed_out(usock_conn_t *conn, cfs_time_t current_time) if (conn->uc_rx_flag && /* receiving is in progress */ cfs_time_aftereq(current_time, conn->uc_rx_deadline)) return 1; - + return 0; } @@ -61,7 +61,7 @@ usocklnd_conn_kill(usock_conn_t *conn) pthread_mutex_lock(&conn->uc_lock); if (conn->uc_state != UC_DEAD) usocklnd_conn_kill_locked(conn); - pthread_mutex_unlock(&conn->uc_lock); + pthread_mutex_unlock(&conn->uc_lock); } /* Mark the conn as DEAD and schedule its deletion */ @@ -82,7 +82,7 @@ usocklnd_conn_allocate() LIBCFS_ALLOC (pr, sizeof(*pr)); if (pr == NULL) return NULL; - + LIBCFS_ALLOC (conn, sizeof(*conn)); if (conn == NULL) { LIBCFS_FREE (pr, sizeof(*pr)); @@ -115,7 +115,7 @@ usocklnd_conn_free(usock_conn_t *conn) LIBCFS_FREE (conn->uc_rx_hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - + LIBCFS_FREE (conn, sizeof(*conn)); } @@ -128,12 +128,12 @@ usocklnd_tear_peer_conn(usock_conn_t *conn) lnet_process_id_t id; int decref_flag = 0; int killall_flag = 0; - + if (peer == NULL) /* nothing to tear */ return; - + pthread_mutex_lock(&peer->up_lock); - pthread_mutex_lock(&conn->uc_lock); + pthread_mutex_lock(&conn->uc_lock); ni = peer->up_ni; id = peer->up_peerid; @@ -142,9 +142,9 @@ usocklnd_tear_peer_conn(usock_conn_t *conn) if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) { /* change state not to finalize twice */ conn->uc_rx_state = UC_RX_KSM_HEADER; - lnet_finalize(peer->up_ni, conn->uc_rx_lnetmsg, -EIO); + lnet_finalize(peer->up_ni, conn->uc_rx_lnetmsg, -EIO); } - + usocklnd_destroy_txlist(peer->up_ni, &conn->uc_tx_list); @@ -155,14 +155,14 @@ usocklnd_tear_peer_conn(usock_conn_t *conn) if(conn->uc_errored && !peer->up_errored) peer->up_errored = killall_flag = 1; } - + pthread_mutex_unlock(&conn->uc_lock); if (killall_flag) usocklnd_del_conns_locked(peer); pthread_mutex_unlock(&peer->up_lock); - + if (!decref_flag) return; @@ -178,7 +178,7 @@ void usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id) { usock_peer_t *peer; - + pthread_rwlock_wrlock(&usock_data.ud_peers_lock); peer = usocklnd_find_peer_locked(ni, id); @@ -192,13 +192,13 @@ usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id) for (i = 0; i < N_CONN_TYPES; i++) LASSERT (peer->up_conns[i] == NULL); - list_del(&peer->up_list); - + list_del(&peer->up_list); + if (peer->up_errored && (peer->up_peerid.pid & LNET_PID_USERFLAG) == 0) lnet_notify (peer->up_ni, peer->up_peerid.nid, 0, cfs_time_seconds(peer->up_last_alive)); - + usocklnd_peer_decref(peer); } @@ -208,18 +208,21 @@ usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id) /* Returns 0 on success, <0 else */ int -usocklnd_create_passive_conn(lnet_ni_t *ni, int fd, usock_conn_t **connp) +usocklnd_create_passive_conn(lnet_ni_t *ni, + cfs_socket_t *sock, usock_conn_t **connp) { int rc; __u32 peer_ip; - __u16 peer_port; + int peer_port; usock_conn_t *conn; - rc = libcfs_getpeername(fd, &peer_ip, &peer_port); + rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); if (rc) return rc; - rc = usocklnd_set_sock_options(fd); + LASSERT (peer_port >= 0); /* uc_peer_port is u16 */ + + rc = usocklnd_set_sock_options(sock); if (rc) return rc; @@ -228,8 +231,8 @@ usocklnd_create_passive_conn(lnet_ni_t *ni, int fd, usock_conn_t **connp) return -ENOMEM; usocklnd_rx_hellomagic_state_transition(conn); - - conn->uc_fd = fd; + + conn->uc_sock = sock; conn->uc_peer_ip = peer_ip; conn->uc_peer_port = peer_port; conn->uc_state = UC_RECEIVING_HELLO; @@ -250,11 +253,11 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type, usock_conn_t **connp) { int rc; - int fd; + cfs_socket_t *sock; usock_conn_t *conn; __u32 dst_ip = LNET_NIDADDR(peer->up_peerid.nid); __u16 dst_port = lnet_acceptor_port(); - + conn = usocklnd_conn_allocate(); if (conn == NULL) return -ENOMEM; @@ -264,32 +267,33 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type, if (conn->uc_tx_hello == NULL) { usocklnd_conn_free(conn); return -ENOMEM; - } - + } + if (the_lnet.ln_pid & LNET_PID_USERFLAG) - rc = usocklnd_connect_cli_mode(&fd, dst_ip, dst_port); + rc = usocklnd_connect_cli_mode(&sock, dst_ip, dst_port); else - rc = usocklnd_connect_srv_mode(&fd, dst_ip, dst_port); - + rc = usocklnd_connect_srv_mode(&sock, dst_ip, dst_port); + if (rc) { usocklnd_destroy_tx(NULL, conn->uc_tx_hello); usocklnd_conn_free(conn); return rc; } - + conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout); - conn->uc_tx_flag = 1; - - conn->uc_fd = fd; - conn->uc_peer_ip = dst_ip; - conn->uc_peer_port = dst_port; - conn->uc_type = type; + conn->uc_tx_flag = 1; + + conn->uc_sock = sock; + conn->uc_peer_ip = dst_ip; + conn->uc_peer_port = dst_port; + conn->uc_type = type; conn->uc_activeflag = 1; - conn->uc_state = UC_CONNECTING; - conn->uc_pt_idx = usocklnd_ip2pt_idx(dst_ip); - conn->uc_ni = NULL; - conn->uc_peerid = peer->up_peerid; - conn->uc_peer = peer; + conn->uc_state = UC_CONNECTING; + conn->uc_pt_idx = usocklnd_ip2pt_idx(dst_ip); + conn->uc_ni = NULL; + conn->uc_peerid = peer->up_peerid; + conn->uc_peer = peer; + usocklnd_peer_addref(peer); CFS_INIT_LIST_HEAD (&conn->uc_tx_list); CFS_INIT_LIST_HEAD (&conn->uc_zcack_list); @@ -302,45 +306,42 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type, /* Returns 0 on success, <0 else */ int -usocklnd_connect_srv_mode(int *fdp, __u32 dst_ip, __u16 dst_port) +usocklnd_connect_srv_mode(cfs_socket_t **sockp, __u32 dst_ip, __u16 dst_port) { - __u16 port; - int fd; - int rc; + __u16 port; + cfs_socket_t *sock; + int rc; + int fatal; - for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT; - port >= LNET_ACCEPTOR_MIN_RESERVED_PORT; + for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT; + port >= LNET_ACCEPTOR_MIN_RESERVED_PORT; port--) { /* Iterate through reserved ports. */ - - rc = libcfs_sock_create(&fd); - if (rc) - return rc; - - rc = libcfs_sock_bind_to_port(fd, port); + rc = libcfs_sock_create(&sock, &fatal, 0, port); if (rc) { - close(fd); + if (fatal) + return rc; continue; } - rc = usocklnd_set_sock_options(fd); + rc = usocklnd_set_sock_options(sock); if (rc) { - close(fd); + libcfs_sock_release(sock); return rc; } - rc = libcfs_sock_connect(fd, dst_ip, dst_port); + rc = libcfs_sock_connect(sock, dst_ip, dst_port); if (rc == 0) { - *fdp = fd; + *sockp = sock; return 0; } - + if (rc != -EADDRINUSE && rc != -EADDRNOTAVAIL) { - close(fd); + libcfs_sock_release(sock); return rc; } - close(fd); + libcfs_sock_release(sock); } CERROR("Can't bind to any reserved port\n"); @@ -349,54 +350,55 @@ usocklnd_connect_srv_mode(int *fdp, __u32 dst_ip, __u16 dst_port) /* Returns 0 on success, <0 else */ int -usocklnd_connect_cli_mode(int *fdp, __u32 dst_ip, __u16 dst_port) +usocklnd_connect_cli_mode(cfs_socket_t **sockp, __u32 dst_ip, __u16 dst_port) { - int fd; - int rc; + cfs_socket_t *sock; + int rc; + int fatal; - rc = libcfs_sock_create(&fd); + rc = libcfs_sock_create(&sock, &fatal, 0, 0); if (rc) return rc; - - rc = usocklnd_set_sock_options(fd); + + rc = usocklnd_set_sock_options(sock); if (rc) { - close(fd); + libcfs_sock_release(sock); return rc; } - rc = libcfs_sock_connect(fd, dst_ip, dst_port); + rc = libcfs_sock_connect(sock, dst_ip, dst_port); if (rc) { - close(fd); + libcfs_sock_release(sock); return rc; } - *fdp = fd; + *sockp = sock; return 0; } int -usocklnd_set_sock_options(int fd) +usocklnd_set_sock_options(cfs_socket_t *sock) { int rc; - rc = libcfs_sock_set_nagle(fd, usock_tuns.ut_socknagle); + rc = libcfs_sock_set_nagle(sock, usock_tuns.ut_socknagle); if (rc) return rc; if (usock_tuns.ut_sockbufsiz) { - rc = libcfs_sock_set_bufsiz(fd, usock_tuns.ut_sockbufsiz); + rc = libcfs_sock_set_bufsiz(sock, usock_tuns.ut_sockbufsiz); if (rc) - return rc; + return rc; } - - return libcfs_fcntl_nonblock(fd); + + return libcfs_fcntl_nonblock(sock); } usock_tx_t * usocklnd_create_noop_tx(__u64 cookie) { usock_tx_t *tx; - + LIBCFS_ALLOC (tx, sizeof(usock_tx_t)); if (tx == NULL) return NULL; @@ -406,22 +408,22 @@ usocklnd_create_noop_tx(__u64 cookie) socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); tx->tx_msg.ksm_zc_cookies[1] = cookie; - + tx->tx_iova[0].iov_base = (void *)&tx->tx_msg; tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); tx->tx_iov = tx->tx_iova; tx->tx_niov = 1; - + return tx; } - + usock_tx_t * usocklnd_create_tx(lnet_msg_t *lntmsg) { usock_tx_t *tx; - unsigned int payload_niov = lntmsg->msg_niov; - struct iovec *payload_iov = lntmsg->msg_iov; + unsigned int payload_niov = lntmsg->msg_niov; + struct iovec *payload_iov = lntmsg->msg_iov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; int size = offsetof(usock_tx_t, @@ -435,14 +437,14 @@ usocklnd_create_tx(lnet_msg_t *lntmsg) tx->tx_lnetmsg = lntmsg; tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + payload_nob; - + socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = lntmsg->msg_hdr; tx->tx_iova[0].iov_base = (void *)&tx->tx_msg; tx->tx_iova[0].iov_len = sizeof(ksock_msg_t); tx->tx_iov = tx->tx_iova; - tx->tx_niov = 1 + + tx->tx_niov = 1 + lnet_extract_iov(payload_niov, &tx->tx_iov[1], payload_niov, payload_iov, payload_offset, payload_nob); @@ -460,7 +462,7 @@ usocklnd_init_hello_msg(ksock_hello_msg_t *hello, hello->kshm_version = KSOCK_PROTO_V2; hello->kshm_nips = 0; hello->kshm_ctype = type; - + hello->kshm_dst_incarnation = 0; /* not used */ hello->kshm_src_incarnation = net->un_incarnation; @@ -488,7 +490,7 @@ usocklnd_create_hello_tx(lnet_ni_t *ni, hello = (ksock_hello_msg_t *)&tx->tx_iova[1]; usocklnd_init_hello_msg(hello, ni, type, peer_nid); - + tx->tx_iova[0].iov_base = (void *)hello; tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob = offsetof(ksock_hello_msg_t, kshm_ips); @@ -522,10 +524,10 @@ usocklnd_create_cr_hello_tx(lnet_ni_t *ni, cr->acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr->acr_version = LNET_PROTO_ACCEPTOR_VERSION; cr->acr_nid = peer_nid; - + hello = (ksock_hello_msg_t *)((char *)cr + sizeof(*cr)); usocklnd_init_hello_msg(hello, ni, type, peer_nid); - + tx->tx_iova[0].iov_base = (void *)cr; tx->tx_iova[0].iov_len = tx->tx_resid = tx->tx_nob = sizeof(lnet_acceptor_connreq_t) + @@ -545,7 +547,7 @@ usocklnd_destroy_tx(lnet_ni_t *ni, usock_tx_t *tx) LASSERT (ni != NULL || lnetmsg == NULL); LIBCFS_FREE (tx, tx->tx_size); - + if (lnetmsg != NULL) /* NOOP and hello go without lnetmsg */ lnet_finalize(ni, lnetmsg, rc); } @@ -558,7 +560,7 @@ usocklnd_destroy_txlist(lnet_ni_t *ni, struct list_head *txlist) while (!list_empty(txlist)) { tx = list_entry(txlist->next, usock_tx_t, tx_list); list_del(&tx->tx_list); - + usocklnd_destroy_tx(ni, tx); } } @@ -571,7 +573,7 @@ usocklnd_destroy_zcack_list(struct list_head *zcack_list) while (!list_empty(zcack_list)) { zcack = list_entry(zcack_list->next, usock_zc_ack_t, zc_list); list_del(&zcack->zc_list); - + LIBCFS_FREE (zcack, sizeof(*zcack)); } } @@ -588,7 +590,7 @@ usocklnd_destroy_peer(usock_peer_t *peer) LIBCFS_FREE (peer, sizeof (*peer)); pthread_mutex_lock(&net->un_lock); - if(--net->un_peercount == 0) + if(--net->un_peercount == 0) pthread_cond_signal(&net->un_cond); pthread_mutex_unlock(&net->un_lock); } @@ -604,12 +606,12 @@ usocklnd_destroy_conn(usock_conn_t *conn) } if (!list_empty(&conn->uc_tx_list)) { - LASSERT (conn->uc_peer != NULL); + LASSERT (conn->uc_peer != NULL); usocklnd_destroy_txlist(conn->uc_peer->up_ni, &conn->uc_tx_list); } usocklnd_destroy_zcack_list(&conn->uc_zcack_list); - + if (conn->uc_peer != NULL) usocklnd_peer_decref(conn->uc_peer); @@ -631,7 +633,7 @@ usocklnd_get_conn_type(lnet_msg_t *lntmsg) return SOCKLND_CONN_ANY; nob = sizeof(ksock_msg_t) + lntmsg->msg_len; - + if (nob >= usock_tuns.ut_min_bulk) return SOCKLND_CONN_BULK_OUT; else @@ -698,10 +700,10 @@ usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id, peer->up_errored = 0; peer->up_last_alive = 0; cfs_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */ - pthread_mutex_init(&peer->up_lock, NULL); + pthread_mutex_init(&peer->up_lock, NULL); pthread_mutex_lock(&net->un_lock); - net->un_peercount++; + net->un_peercount++; pthread_mutex_unlock(&net->un_lock); *peerp = peer; @@ -729,7 +731,7 @@ usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id, rc = usocklnd_create_peer(ni, id, &peer); if (rc) return rc; - + pthread_rwlock_wrlock(&usock_data.ud_peers_lock); peer2 = usocklnd_find_peer_locked(ni, id); if (peer2 == NULL) { @@ -739,7 +741,7 @@ usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id, CERROR("Can't create peer: network shutdown\n"); return -ESHUTDOWN; } - + /* peer table will take 1 of my refs on peer */ usocklnd_peer_addref(peer); list_add_tail (&peer->up_list, @@ -749,8 +751,8 @@ usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id, peer = peer2; } pthread_rwlock_unlock(&usock_data.ud_peers_lock); - - find_or_create_peer_done: + + find_or_create_peer_done: *peerp = peer; return 0; } @@ -758,7 +760,7 @@ usocklnd_find_or_create_peer(lnet_ni_t *ni, lnet_process_id_t id, /* NB: both peer and conn locks are held */ static int usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack) -{ +{ if (conn->uc_state == UC_READY && list_empty(&conn->uc_tx_list) && list_empty(&conn->uc_zcack_list) && @@ -767,7 +769,7 @@ usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack) POLLOUT); if (rc != 0) return rc; - } + } list_add_tail(&zc_ack->zc_list, &conn->uc_zcack_list); return 0; @@ -779,7 +781,7 @@ usocklnd_enqueue_zcack(usock_conn_t *conn, usock_zc_ack_t *zc_ack) static void usocklnd_enqueue_tx(usock_conn_t *conn, usock_tx_t *tx, int *send_immediately) -{ +{ if (conn->uc_state == UC_READY && list_empty(&conn->uc_tx_list) && list_empty(&conn->uc_zcack_list) && @@ -787,7 +789,7 @@ usocklnd_enqueue_tx(usock_conn_t *conn, usock_tx_t *tx, conn->uc_sending = 1; *send_immediately = 1; return; - } + } *send_immediately = 0; list_add_tail(&tx->tx_list, &conn->uc_tx_list); @@ -805,12 +807,12 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, int idx; int rc; lnet_pid_t userflag = peer->up_peerid.pid & LNET_PID_USERFLAG; - + if (userflag) type = SOCKLND_CONN_ANY; idx = usocklnd_type2idx(type); - + pthread_mutex_lock(&peer->up_lock); if (peer->up_conns[idx] != NULL) { conn = peer->up_conns[idx]; @@ -823,7 +825,7 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, rc = -EHOSTUNREACH; goto find_or_create_conn_failed; } - + rc = usocklnd_create_active_conn(peer, type, &conn); if (rc) { peer->up_errored = 1; @@ -833,7 +835,7 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, /* peer takes 1 of conn refcount */ usocklnd_link_conn_to_peer(conn, peer, idx); - + rc = usocklnd_add_pollrequest(conn, POLL_ADD_REQUEST, POLLOUT); if (rc) { peer->up_conns[idx] = NULL; @@ -842,7 +844,7 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, } usocklnd_wakeup_pollthread(conn->uc_pt_idx); } - + pthread_mutex_lock(&conn->uc_lock); LASSERT(conn->uc_peer == peer); @@ -850,14 +852,14 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, if (tx != NULL) { usocklnd_enqueue_tx(conn, tx, send_immediately); } else { - rc = usocklnd_enqueue_zcack(conn, zc_ack); + rc = usocklnd_enqueue_zcack(conn, zc_ack); if (rc != 0) { usocklnd_conn_kill_locked(conn); pthread_mutex_unlock(&conn->uc_lock); goto find_or_create_conn_failed; } } - pthread_mutex_unlock(&conn->uc_lock); + pthread_mutex_unlock(&conn->uc_lock); usocklnd_conn_addref(conn); pthread_mutex_unlock(&peer->up_lock); @@ -873,7 +875,7 @@ usocklnd_find_or_create_conn(usock_peer_t *peer, int type, void usocklnd_link_conn_to_peer(usock_conn_t *conn, usock_peer_t *peer, int idx) { - peer->up_conns[idx] = conn; + peer->up_conns[idx] = conn; peer->up_errored = 0; /* this new fresh conn will try * revitalize even stale errored peer */ } @@ -910,7 +912,7 @@ usocklnd_cleanup_stale_conns(usock_peer_t *peer, __u64 incrn, usock_conn_t *skip_conn) { int i; - + if (!peer->up_incrn_is_set) { peer->up_incarnation = incrn; peer->up_incrn_is_set = 1; @@ -921,19 +923,19 @@ usocklnd_cleanup_stale_conns(usock_peer_t *peer, __u64 incrn, return; peer->up_incarnation = incrn; - + for (i = 0; i < N_CONN_TYPES; i++) { usock_conn_t *conn = peer->up_conns[i]; - + if (conn == NULL || conn == skip_conn) continue; - pthread_mutex_lock(&conn->uc_lock); + pthread_mutex_lock(&conn->uc_lock); LASSERT (conn->uc_peer == peer); conn->uc_peer = NULL; peer->up_conns[i] = NULL; if (conn->uc_state != UC_DEAD) - usocklnd_conn_kill_locked(conn); + usocklnd_conn_kill_locked(conn); pthread_mutex_unlock(&conn->uc_lock); usocklnd_conn_decref(conn); @@ -978,7 +980,7 @@ usocklnd_rx_helloversion_state_transition(usock_conn_t *conn) conn->uc_rx_nob_wanted = conn->uc_rx_nob_left = sizeof(conn->uc_rx_hello->kshm_version); - + conn->uc_rx_state = UC_RX_HELLO_VERSION; } @@ -998,7 +1000,7 @@ usocklnd_rx_hellobody_state_transition(usock_conn_t *conn) conn->uc_rx_nob_left = offsetof(ksock_hello_msg_t, kshm_ips) - offsetof(ksock_hello_msg_t, kshm_src_nid); - + conn->uc_rx_state = UC_RX_HELLO_BODY; } @@ -1018,7 +1020,7 @@ usocklnd_rx_helloIPs_state_transition(usock_conn_t *conn) conn->uc_rx_nob_left = conn->uc_rx_hello->kshm_nips * sizeof(conn->uc_rx_hello->kshm_ips[0]); - + conn->uc_rx_state = UC_RX_HELLO_IPS; } @@ -1030,12 +1032,12 @@ usocklnd_rx_lnethdr_state_transition(usock_conn_t *conn) { conn->uc_rx_niov = 1; conn->uc_rx_iov = conn->uc_rx_iova; - conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg.ksm_u.lnetmsg; + conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg.ksm_u.lnetmsg; conn->uc_rx_iov[0].iov_len = conn->uc_rx_nob_wanted = conn->uc_rx_nob_left = sizeof(ksock_lnet_msg_t); - + conn->uc_rx_state = UC_RX_LNET_HEADER; conn->uc_rx_flag = 1; } @@ -1048,12 +1050,12 @@ usocklnd_rx_ksmhdr_state_transition(usock_conn_t *conn) { conn->uc_rx_niov = 1; conn->uc_rx_iov = conn->uc_rx_iova; - conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg; + conn->uc_rx_iov[0].iov_base = &conn->uc_rx_msg; conn->uc_rx_iov[0].iov_len = conn->uc_rx_nob_wanted = - conn->uc_rx_nob_left = + conn->uc_rx_nob_left = offsetof(ksock_msg_t, ksm_u); - + conn->uc_rx_state = UC_RX_KSM_HEADER; conn->uc_rx_flag = 0; } @@ -1070,7 +1072,7 @@ usocklnd_rx_skipping_state_transition(usock_conn_t *conn) unsigned int niov = 0; int skipped = 0; int nob_to_skip = conn->uc_rx_nob_left; - + LASSERT(nob_to_skip != 0); conn->uc_rx_iov = conn->uc_rx_iova;