X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fulnds%2Fsocklnd%2Fhandlers.c;h=e05d63e377580e674262d5a373d9182b5c5918ac;hp=cd843151430d2035f5ba3ee3dc26d722c67324c0;hb=6e3ec5812ebd1b5ecf7cae584f429b013ffe7431;hpb=19205dfea419bac6f3bc58ed1b579b8caf79b895 diff --git a/lnet/ulnds/socklnd/handlers.c b/lnet/ulnds/socklnd/handlers.c index cd84315..e05d63e 100644 --- a/lnet/ulnds/socklnd/handlers.c +++ b/lnet/ulnds/socklnd/handlers.c @@ -1,17 +1,46 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. - * Author: Maxim Patlasov + * GPL HEADER START * - * This file is part of the Lustre file system, http://www.lustre.org - * Lustre is a trademark of Cluster File Systems, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/ulnds/socklnd/handlers.c + * + * Author: Maxim Patlasov */ #include "usocklnd.h" #include -#include +#include int usocklnd_notifier_handler(int fd) @@ -28,8 +57,8 @@ usocklnd_exception_handler(usock_conn_t *conn) if (conn->uc_state == UC_CONNECTING || conn->uc_state == UC_SENDING_HELLO) usocklnd_conn_kill_locked(conn); - - pthread_mutex_unlock(&conn->uc_lock); + + pthread_mutex_unlock(&conn->uc_lock); } int @@ -43,7 +72,7 @@ usocklnd_read_handler(usock_conn_t *conn) rc = 0; pthread_mutex_lock(&conn->uc_lock); state = conn->uc_state; - + /* process special case: LNET calls lnd_recv() asyncronously */ if (state == UC_READY && conn->uc_rx_state == UC_RX_PARSE) { /* still don't have usocklnd_recv() called */ @@ -64,7 +93,7 @@ usocklnd_read_handler(usock_conn_t *conn) * 2) usocklnd_shutdown() can change uc_state to UC_DEAD */ switch (state) { - + case UC_RECEIVING_HELLO: case UC_READY: if (conn->uc_rx_nob_wanted != 0) { @@ -88,7 +117,7 @@ usocklnd_read_handler(usock_conn_t *conn) usocklnd_conn_kill(conn); break; } - + if (continue_reading) goto read_again; @@ -100,7 +129,7 @@ usocklnd_read_handler(usock_conn_t *conn) default: LBUG(); } - + return rc; } @@ -115,7 +144,7 @@ usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) { int rc = 0; __u64 cookie; - + *cont_flag = 0; /* smth. new emerged in RX part - let's process it */ @@ -124,13 +153,13 @@ usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) if (conn->uc_flip) { __swab32s(&conn->uc_rx_msg.ksm_type); __swab32s(&conn->uc_rx_msg.ksm_csum); - __swab64s(&conn->uc_rx_msg.ksm_zc_req_cookie); - __swab64s(&conn->uc_rx_msg.ksm_zc_ack_cookie); - } + __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[0]); + __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[1]); + } /* we never send packets for wich zc-acking is required */ if (conn->uc_rx_msg.ksm_type != KSOCK_MSG_LNET || - conn->uc_rx_msg.ksm_zc_ack_cookie != 0) { + conn->uc_rx_msg.ksm_zc_cookies[1] != 0) { conn->uc_errored = 1; return -EPROTO; } @@ -141,7 +170,7 @@ usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) usocklnd_rx_lnethdr_state_transition(conn); *cont_flag = 1; break; - + case UC_RX_LNET_HEADER: if (the_lnet.ln_pid & LNET_PID_USERFLAG) { /* replace dest_nid,pid (ksocknal sets its own) */ @@ -149,24 +178,24 @@ usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) cpu_to_le64(conn->uc_peer->up_ni->ni_nid); conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr.dest_pid = cpu_to_le32(the_lnet.ln_pid); - - } else if (conn->uc_peer->up_peerid.pid & LNET_PID_USERFLAG) { + + } else if (conn->uc_peer->up_peerid.pid & LNET_PID_USERFLAG) { /* Userspace peer */ lnet_process_id_t *id = &conn->uc_peer->up_peerid; lnet_hdr_t *lhdr = &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr; - + /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); lhdr->src_nid = cpu_to_le64(id->nid); } - + conn->uc_rx_state = UC_RX_PARSE; usocklnd_conn_addref(conn); /* ++ref while parsing */ - - rc = lnet_parse(conn->uc_peer->up_ni, - &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr, + + rc = lnet_parse(conn->uc_peer->up_ni, + &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr, conn->uc_peerid.nid, conn, 0); - + if (rc < 0) { /* I just received garbage: give up on this conn */ conn->uc_errored = 1; @@ -178,41 +207,41 @@ usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) pthread_mutex_lock(&conn->uc_lock); LASSERT (conn->uc_rx_state == UC_RX_PARSE || conn->uc_rx_state == UC_RX_LNET_PAYLOAD); - + /* check whether usocklnd_recv() got called */ if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) *cont_flag = 1; pthread_mutex_unlock(&conn->uc_lock); break; - + case UC_RX_PARSE: LBUG(); /* it's error to be here, because this special * case is handled by caller */ break; - + case UC_RX_PARSE_WAIT: LBUG(); /* it's error to be here, because the conn * shouldn't wait for POLLIN event in this * state */ break; - + case UC_RX_LNET_PAYLOAD: /* payload all received */ lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, 0); - cookie = conn->uc_rx_msg.ksm_zc_req_cookie; + cookie = conn->uc_rx_msg.ksm_zc_cookies[0]; if (cookie != 0) rc = usocklnd_handle_zc_req(conn->uc_peer, cookie); - + if (rc != 0) { /* change state not to finalize twice */ conn->uc_rx_state = UC_RX_KSM_HEADER; return -EPROTO; } - + /* Fall through */ - + case UC_RX_SKIPPING: if (conn->uc_rx_nob_left != 0) { usocklnd_rx_skipping_state_transition(conn); @@ -247,13 +276,13 @@ usocklnd_handle_zc_req(usock_peer_t *peer, __u64 cookie) if (zc_ack == NULL) return -ENOMEM; zc_ack->zc_cookie = cookie; - + /* Let's assume that CONTROL is the best type for zcack, * but userspace clients don't use typed connections */ if (the_lnet.ln_pid & LNET_PID_USERFLAG) type = SOCKLND_CONN_ANY; else - type = SOCKLND_CONN_CONTROL; + type = SOCKLND_CONN_CONTROL; rc = usocklnd_find_or_create_conn(peer, type, &conn, NULL, zc_ack, &dummy); @@ -275,9 +304,9 @@ usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) { int rc = 0; ksock_hello_msg_t *hello = conn->uc_rx_hello; - + *cont_flag = 0; - + /* smth. new emerged in hello - let's process it */ switch (conn->uc_rx_state) { case UC_RX_HELLO_MAGIC: @@ -290,7 +319,7 @@ usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) usocklnd_rx_helloversion_state_transition(conn); *cont_flag = 1; - break; + break; case UC_RX_HELLO_VERSION: if ((!conn->uc_flip && @@ -302,7 +331,7 @@ usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) usocklnd_rx_hellobody_state_transition(conn); *cont_flag = 1; break; - + case UC_RX_HELLO_BODY: if (conn->uc_flip) { ksock_hello_msg_t *hello = conn->uc_rx_hello; @@ -322,8 +351,8 @@ usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) HIPQUAD(conn->uc_peer_ip), conn->uc_peer_port); return -EPROTO; } - - if (conn->uc_rx_hello->kshm_nips) { + + if (conn->uc_rx_hello->kshm_nips) { usocklnd_rx_helloIPs_state_transition(conn); *cont_flag = 1; break; @@ -335,8 +364,8 @@ usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) rc = usocklnd_activeconn_hellorecv(conn); else /* passive conn */ rc = usocklnd_passiveconn_hellorecv(conn); - - break; + + break; default: LBUG(); /* unknown state */ @@ -361,15 +390,15 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) * Don't try to link it to peer because the conn * has already had a chance to proceed at the beginning */ if (peer == NULL) { - LASSERT(list_empty(&conn->uc_tx_list) && - list_empty(&conn->uc_zcack_list)); + LASSERT(cfs_list_empty(&conn->uc_tx_list) && + cfs_list_empty(&conn->uc_zcack_list)); usocklnd_conn_kill(conn); return 0; } peer->up_last_alive = cfs_time_current(); - + /* peer says that we lost the race */ if (hello->kshm_ctype == SOCKLND_CONN_NONE) { /* Start new active conn, relink txs and zc_acks from @@ -377,8 +406,8 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) * Actually, we're expecting that a passive conn will * make us zombie soon and take care of our txs and * zc_acks */ - - struct list_head tx_list, zcack_list; + + cfs_list_t tx_list, zcack_list; usock_conn_t *conn2; int idx = usocklnd_type2idx(conn->uc_type); @@ -396,7 +425,7 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) pthread_mutex_unlock(&peer->up_lock); return 0; } - + LASSERT (peer == conn->uc_peer); LASSERT (peer->up_conns[idx] == conn); @@ -407,22 +436,22 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) pthread_mutex_unlock(&peer->up_lock); return rc; } - + usocklnd_link_conn_to_peer(conn2, peer, idx); conn2->uc_peer = peer; - + /* unlink txs and zcack from the conn */ - list_add(&tx_list, &conn->uc_tx_list); - list_del_init(&conn->uc_tx_list); - list_add(&zcack_list, &conn->uc_zcack_list); - list_del_init(&conn->uc_zcack_list); - + cfs_list_add(&tx_list, &conn->uc_tx_list); + cfs_list_del_init(&conn->uc_tx_list); + cfs_list_add(&zcack_list, &conn->uc_zcack_list); + cfs_list_del_init(&conn->uc_zcack_list); + /* link they to the conn2 */ - list_add(&conn2->uc_tx_list, &tx_list); - list_del_init(&tx_list); - list_add(&conn2->uc_zcack_list, &zcack_list); - list_del_init(&zcack_list); - + cfs_list_add(&conn2->uc_tx_list, &tx_list); + cfs_list_del_init(&tx_list); + cfs_list_add(&conn2->uc_zcack_list, &zcack_list); + cfs_list_del_init(&zcack_list); + /* make conn zombie */ conn->uc_peer = NULL; usocklnd_peer_decref(peer); @@ -435,11 +464,11 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) } else { usocklnd_conn_kill_locked(conn); } - - pthread_mutex_unlock(&conn->uc_lock); + + pthread_mutex_unlock(&conn->uc_lock); pthread_mutex_unlock(&peer->up_lock); usocklnd_conn_decref(conn); - + } else { /* hello->kshm_ctype != SOCKLND_CONN_NONE */ if (conn->uc_type != usocklnd_invert_type(hello->kshm_ctype)) return -EPROTO; @@ -448,7 +477,7 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation, conn); pthread_mutex_unlock(&peer->up_lock); - + /* safely transit to UC_READY state */ /* rc == 0 */ pthread_mutex_lock(&conn->uc_lock); @@ -459,9 +488,9 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) * received hello, but maybe we've smth. to * send? */ LASSERT (conn->uc_sending == 0); - if ( !list_empty(&conn->uc_tx_list) || - !list_empty(&conn->uc_zcack_list) ) { - + if ( !cfs_list_empty(&conn->uc_tx_list) || + !cfs_list_empty(&conn->uc_zcack_list) ) { + conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout); conn->uc_tx_flag = 1; @@ -473,7 +502,7 @@ usocklnd_activeconn_hellorecv(usock_conn_t *conn) if (rc == 0) conn->uc_state = UC_READY; } - pthread_mutex_unlock(&conn->uc_lock); + pthread_mutex_unlock(&conn->uc_lock); } return rc; @@ -503,7 +532,7 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) /* don't know parent peer yet and not zombie */ LASSERT (conn->uc_peer == NULL && ni != NULL); - + /* don't know peer's nid and incarnation yet */ if (peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) { /* do not trust liblustre clients */ @@ -512,7 +541,7 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) peer_ip); if (hello->kshm_ctype != SOCKLND_CONN_ANY) { lnet_ni_decref(ni); - conn->uc_ni = NULL; + conn->uc_ni = NULL; CERROR("Refusing to accept connection of type=%d from " "userspace process %u.%u.%u.%u:%d\n", hello->kshm_ctype, HIPQUAD(peer_ip), peer_port); @@ -523,7 +552,7 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) conn->uc_peerid.nid = hello->kshm_src_nid; } conn->uc_type = type = usocklnd_invert_type(hello->kshm_ctype); - + rc = usocklnd_find_or_create_peer(ni, conn->uc_peerid, &peer); if (rc) { lnet_ni_decref(ni); @@ -532,14 +561,14 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) } peer->up_last_alive = cfs_time_current(); - + idx = usocklnd_type2idx(conn->uc_type); /* safely check whether we're first */ pthread_mutex_lock(&peer->up_lock); usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation, NULL); - + if (peer->up_conns[idx] == NULL) { peer->up_last_alive = cfs_time_current(); conn->uc_peer = peer; @@ -547,7 +576,7 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) usocklnd_link_conn_to_peer(conn, peer, idx); usocklnd_conn_addref(conn); } else { - usocklnd_peer_decref(peer); + usocklnd_peer_decref(peer); /* Resolve race in favour of higher NID */ if (conn->uc_peerid.nid < conn->uc_ni->ni_nid) { @@ -559,7 +588,7 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) /* if conn->uc_peerid.nid > conn->uc_ni->ni_nid, * postpone race resolution till READY state * (hopefully that conn[idx] will die because of - * incoming hello of CONN_NONE type) */ + * incoming hello of CONN_NONE type) */ } pthread_mutex_unlock(&peer->up_lock); @@ -581,10 +610,10 @@ usocklnd_passiveconn_hellorecv(usock_conn_t *conn) conn->uc_tx_deadline = cfs_time_shift(usock_tuns.ut_timeout); conn->uc_tx_flag = 1; rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST, POLLOUT); - + passive_hellorecv_done: pthread_mutex_unlock(&conn->uc_lock); - return rc; + return rc; } int @@ -596,7 +625,7 @@ usocklnd_write_handler(usock_conn_t *conn) int state; usock_peer_t *peer; lnet_ni_t *ni; - + pthread_mutex_lock(&conn->uc_lock); /* like membar */ state = conn->uc_state; pthread_mutex_unlock(&conn->uc_lock); @@ -621,8 +650,8 @@ usocklnd_write_handler(usock_conn_t *conn) rc = usocklnd_activeconn_hellosent(conn); else /* passive conn */ rc = usocklnd_passiveconn_hellosent(conn); - - break; + + break; case UC_READY: pthread_mutex_lock(&conn->uc_lock); @@ -631,12 +660,12 @@ usocklnd_write_handler(usock_conn_t *conn) LASSERT (peer != NULL); ni = peer->up_ni; - if (list_empty(&conn->uc_tx_list) && - list_empty(&conn->uc_zcack_list)) { + if (cfs_list_empty(&conn->uc_tx_list) && + cfs_list_empty(&conn->uc_zcack_list)) { LASSERT(usock_tuns.ut_fair_limit > 1); pthread_mutex_unlock(&conn->uc_lock); return 0; - } + } tx = usocklnd_try_piggyback(&conn->uc_tx_list, &conn->uc_zcack_list); @@ -649,11 +678,11 @@ usocklnd_write_handler(usock_conn_t *conn) if (rc) break; - + rc = usocklnd_send_tx(conn, tx); if (rc == 0) { /* partial send or connection closed */ pthread_mutex_lock(&conn->uc_lock); - list_add(&tx->tx_list, &conn->uc_tx_list); + cfs_list_add(&tx->tx_list, &conn->uc_tx_list); conn->uc_sending = 0; pthread_mutex_unlock(&conn->uc_lock); break; @@ -669,8 +698,8 @@ usocklnd_write_handler(usock_conn_t *conn) pthread_mutex_lock(&conn->uc_lock); conn->uc_sending = 0; if (conn->uc_state != UC_DEAD && - list_empty(&conn->uc_tx_list) && - list_empty(&conn->uc_zcack_list)) { + cfs_list_empty(&conn->uc_tx_list) && + cfs_list_empty(&conn->uc_zcack_list)) { conn->uc_tx_flag = 0; ret = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST, 0); @@ -678,7 +707,7 @@ usocklnd_write_handler(usock_conn_t *conn) rc = ret; } pthread_mutex_unlock(&conn->uc_lock); - + break; case UC_DEAD: @@ -690,7 +719,7 @@ usocklnd_write_handler(usock_conn_t *conn) if (rc < 0) usocklnd_conn_kill(conn); - + return rc; } @@ -699,41 +728,41 @@ usocklnd_write_handler(usock_conn_t *conn) * brand new noop tx for zc_ack from zcack_list. Return NULL * if an error happened */ usock_tx_t * -usocklnd_try_piggyback(struct list_head *tx_list_p, - struct list_head *zcack_list_p) +usocklnd_try_piggyback(cfs_list_t *tx_list_p, + cfs_list_t *zcack_list_p) { usock_tx_t *tx; usock_zc_ack_t *zc_ack; /* assign tx and zc_ack */ - if (list_empty(tx_list_p)) + if (cfs_list_empty(tx_list_p)) tx = NULL; else { - tx = list_entry(tx_list_p->next, usock_tx_t, tx_list); - list_del(&tx->tx_list); + tx = cfs_list_entry(tx_list_p->next, usock_tx_t, tx_list); + cfs_list_del(&tx->tx_list); /* already piggybacked or partially send */ - if (tx->tx_msg.ksm_zc_ack_cookie || + if (tx->tx_msg.ksm_zc_cookies[1] != 0 || tx->tx_resid != tx->tx_nob) return tx; } - - if (list_empty(zcack_list_p)) { + + if (cfs_list_empty(zcack_list_p)) { /* nothing to piggyback */ return tx; } else { - zc_ack = list_entry(zcack_list_p->next, - usock_zc_ack_t, zc_list); - list_del(&zc_ack->zc_list); - } - + zc_ack = cfs_list_entry(zcack_list_p->next, + usock_zc_ack_t, zc_list); + cfs_list_del(&zc_ack->zc_list); + } + if (tx != NULL) /* piggyback the zc-ack cookie */ - tx->tx_msg.ksm_zc_ack_cookie = zc_ack->zc_cookie; + tx->tx_msg.ksm_zc_cookies[1] = zc_ack->zc_cookie; else /* cannot piggyback, need noop */ - tx = usocklnd_create_noop_tx(zc_ack->zc_cookie); - + tx = usocklnd_create_noop_tx(zc_ack->zc_cookie); + LIBCFS_FREE (zc_ack, sizeof(*zc_ack)); return tx; } @@ -746,7 +775,7 @@ int usocklnd_activeconn_hellosent(usock_conn_t *conn) { int rc = 0; - + pthread_mutex_lock(&conn->uc_lock); if (conn->uc_state != UC_DEAD) { @@ -777,8 +806,8 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn) { usock_conn_t *conn2; usock_peer_t *peer; - struct list_head tx_list; - struct list_head zcack_list; + cfs_list_t tx_list; + cfs_list_t zcack_list; int idx; int rc = 0; @@ -788,29 +817,29 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn) /* conn->uc_peer == NULL, so the conn isn't accessible via * peer hash list, so nobody can touch the conn but us */ - + if (conn->uc_ni == NULL) /* remove zombie conn */ goto passive_hellosent_connkill; - + /* all code below is race resolution, because normally * passive conn is linked to peer just after receiving hello */ CFS_INIT_LIST_HEAD (&tx_list); CFS_INIT_LIST_HEAD (&zcack_list); - + /* conn is passive and isn't linked to any peer, so its tx and zc_ack lists have to be empty */ - LASSERT (list_empty(&conn->uc_tx_list) && - list_empty(&conn->uc_zcack_list) && + LASSERT (cfs_list_empty(&conn->uc_tx_list) && + cfs_list_empty(&conn->uc_zcack_list) && conn->uc_sending == 0); rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer); if (rc) return rc; - idx = usocklnd_type2idx(conn->uc_type); + idx = usocklnd_type2idx(conn->uc_type); /* try to link conn to peer */ - pthread_mutex_lock(&peer->up_lock); + pthread_mutex_lock(&peer->up_lock); if (peer->up_conns[idx] == NULL) { usocklnd_link_conn_to_peer(conn, peer, idx); usocklnd_conn_addref(conn); @@ -833,19 +862,19 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn) * We're sure that nobody but us can access to conn, * nevertheless we use mutex (if we're wrong yet, * deadlock is easy to see that corrupted list */ - list_add(&tx_list, &conn2->uc_tx_list); - list_del_init(&conn2->uc_tx_list); - list_add(&zcack_list, &conn2->uc_zcack_list); - list_del_init(&conn2->uc_zcack_list); - + cfs_list_add(&tx_list, &conn2->uc_tx_list); + cfs_list_del_init(&conn2->uc_tx_list); + cfs_list_add(&zcack_list, &conn2->uc_zcack_list); + cfs_list_del_init(&conn2->uc_zcack_list); + pthread_mutex_lock(&conn->uc_lock); - list_add_tail(&conn->uc_tx_list, &tx_list); - list_del_init(&tx_list); - list_add_tail(&conn->uc_zcack_list, &zcack_list); - list_del_init(&zcack_list); + cfs_list_add_tail(&conn->uc_tx_list, &tx_list); + cfs_list_del_init(&tx_list); + cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list); + cfs_list_del_init(&zcack_list); conn->uc_peer = peer; pthread_mutex_unlock(&conn->uc_lock); - + conn2->uc_peer = NULL; /* make conn2 zombie */ pthread_mutex_unlock(&conn2->uc_lock); usocklnd_conn_decref(conn2); @@ -860,7 +889,7 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn) pthread_mutex_unlock(&peer->up_lock); usocklnd_peer_decref(peer); - passive_hellosent_done: + passive_hellosent_done: /* safely transit to UC_READY state */ /* rc == 0 */ pthread_mutex_lock(&conn->uc_lock); @@ -870,8 +899,8 @@ usocklnd_passiveconn_hellosent(usock_conn_t *conn) /* we're ready to recive incoming packets and maybe already have smth. to transmit */ LASSERT (conn->uc_sending == 0); - if ( list_empty(&conn->uc_tx_list) && - list_empty(&conn->uc_zcack_list) ) { + if ( cfs_list_empty(&conn->uc_tx_list) && + cfs_list_empty(&conn->uc_zcack_list) ) { conn->uc_tx_flag = 0; rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST, POLLIN); @@ -906,23 +935,23 @@ usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx) { struct iovec *iov; int nob; - int fd = conn->uc_fd; cfs_time_t t; - + LASSERT (tx->tx_resid != 0); do { usock_peer_t *peer = conn->uc_peer; LASSERT (tx->tx_niov > 0); - - nob = libcfs_sock_writev(fd, tx->tx_iov, tx->tx_niov); + + nob = libcfs_sock_writev(conn->uc_sock, + tx->tx_iov, tx->tx_niov); if (nob < 0) conn->uc_errored = 1; if (nob <= 0) /* write queue is flow-controlled or error */ return nob; - - LASSERT (nob <= tx->tx_resid); + + LASSERT (nob <= tx->tx_resid); tx->tx_resid -= nob; t = cfs_time_current(); conn->uc_tx_deadline = cfs_time_add(t, cfs_time_seconds(usock_tuns.ut_timeout)); @@ -930,22 +959,22 @@ usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx) if(peer != NULL) peer->up_last_alive = t; - /* "consume" iov */ + /* "consume" iov */ iov = tx->tx_iov; - do { - LASSERT (tx->tx_niov > 0); - - if (nob < iov->iov_len) { + do { + LASSERT (tx->tx_niov > 0); + + if (nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); - iov->iov_len -= nob; - break; - } + iov->iov_len -= nob; + break; + } - nob -= iov->iov_len; - tx->tx_iov = ++iov; - tx->tx_niov--; + nob -= iov->iov_len; + tx->tx_iov = ++iov; + tx->tx_niov--; } while (nob != 0); - + } while (tx->tx_resid != 0); return 1; /* send complete */ @@ -965,16 +994,18 @@ usocklnd_read_data(usock_conn_t *conn) do { usock_peer_t *peer = conn->uc_peer; - + LASSERT (conn->uc_rx_niov > 0); - - nob = libcfs_sock_readv(conn->uc_fd, conn->uc_rx_iov, conn->uc_rx_niov); + + nob = libcfs_sock_readv(conn->uc_sock, + conn->uc_rx_iov, conn->uc_rx_niov); if (nob <= 0) {/* read nothing or error */ - conn->uc_errored = 1; + if (nob < 0) + conn->uc_errored = 1; return nob; } - - LASSERT (nob <= conn->uc_rx_nob_wanted); + + LASSERT (nob <= conn->uc_rx_nob_wanted); conn->uc_rx_nob_wanted -= nob; conn->uc_rx_nob_left -= nob; t = cfs_time_current(); @@ -982,23 +1013,23 @@ usocklnd_read_data(usock_conn_t *conn) if(peer != NULL) peer->up_last_alive = t; - - /* "consume" iov */ + + /* "consume" iov */ iov = conn->uc_rx_iov; - do { - LASSERT (conn->uc_rx_niov > 0); - - if (nob < iov->iov_len) { - iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); - iov->iov_len -= nob; - break; - } - - nob -= iov->iov_len; + do { + LASSERT (conn->uc_rx_niov > 0); + + if (nob < iov->iov_len) { + iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); + iov->iov_len -= nob; + break; + } + + nob -= iov->iov_len; conn->uc_rx_iov = ++iov; - conn->uc_rx_niov--; + conn->uc_rx_niov--; } while (nob != 0); - + } while (conn->uc_rx_nob_wanted != 0); return 1; /* read complete */