X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=3b3ede13721ab247f9dba49daf49a6980548eebe;hp=d6dc96fc0886d9d09e59fa159e28d5ce81476e4e;hb=e08ac764867a0e36b303f511c94a0fa27e3dd53d;hpb=83e45ead69babfb2909a3157f054fcd8fdf33360 diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index d6dc96f..3b3ede1 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -203,7 +203,12 @@ kiblnd_post_rx(struct kib_rx *rx, int credit) * own this rx (and rx::rx_conn) anymore, LU-5678. */ kiblnd_conn_addref(conn); +#ifdef HAVE_IB_POST_SEND_RECV_CONST + rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, + (const struct ib_recv_wr **)&bad_wrq); +#else rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); +#endif if (unlikely(rc != 0)) { CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); @@ -605,7 +610,8 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr); if (rc != 0) { - CERROR("Can't map %u pages: %d\n", nob, rc); + CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob, + tx->tx_nfrags, rd->rd_nfrags, rc); return rc; } @@ -725,83 +731,9 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx, return -EINVAL; } -static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx, - struct kib_rdma_desc *rd, unsigned int niov, - struct kvec *iov, int offset, int nob) -{ - struct kib_net *net = ni->ni_data; - struct page *page; - struct scatterlist *sg; - unsigned long vaddr; - int fragnob; - int page_offset; - unsigned int max_niov; - - LASSERT (nob > 0); - LASSERT (niov > 0); - LASSERT (net != NULL); - - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - niov--; - iov++; - LASSERT (niov > 0); - } - - max_niov = niov; - - sg = tx->tx_frags; - do { - LASSERT(niov > 0); - - vaddr = ((unsigned long)iov->iov_base) + offset; - page_offset = vaddr & (PAGE_SIZE - 1); - page = lnet_kvaddr_to_page(vaddr); - if (page == NULL) { - CERROR("Can't find page\n"); - return -EFAULT; - } - - fragnob = min((int)(iov->iov_len - offset), nob); - fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); - - /* - * We're allowed to start at a non-aligned page offset in - * the first fragment and end at a non-aligned page offset - * in the last fragment. - */ - if ((fragnob < (int)PAGE_SIZE - page_offset) && - (niov < max_niov) && nob > fragnob) { - CDEBUG(D_NET, "fragnob %d < available page %d: with" - " remaining %d iovs with %d nob left\n", - fragnob, (int)PAGE_SIZE - page_offset, niov, - nob); - tx->tx_gaps = true; - } - - sg_set_page(sg, page, fragnob, page_offset); - sg = sg_next(sg); - if (!sg) { - CERROR("lacking enough sg entries to map tx\n"); - return -EFAULT; - } - - if (offset + fragnob < iov->iov_len) { - offset += fragnob; - } else { - offset = 0; - iov++; - niov--; - } - nob -= fragnob; - } while (nob > 0); - - return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); -} - static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, struct kib_rdma_desc *rd, int nkiov, - lnet_kiov_t *kiov, int offset, int nob) + struct bio_vec *kiov, int offset, int nob) { struct kib_net *net = ni->ni_data; struct scatterlist *sg; @@ -814,8 +746,8 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, LASSERT(nkiov > 0); LASSERT(net != NULL); - while (offset >= kiov->kiov_len) { - offset -= kiov->kiov_len; + while (offset >= kiov->bv_len) { + offset -= kiov->bv_len; nkiov--; kiov++; LASSERT(nkiov > 0); @@ -827,24 +759,24 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, do { LASSERT(nkiov > 0); - fragnob = min((int)(kiov->kiov_len - offset), nob); + fragnob = min((int)(kiov->bv_len - offset), nob); /* * We're allowed to start at a non-aligned page offset in * the first fragment and end at a non-aligned page offset * in the last fragment. */ - if ((fragnob < (int)(kiov->kiov_len - offset)) && + if ((fragnob < (int)(kiov->bv_len - offset)) && nkiov < max_nkiov && nob > fragnob) { CDEBUG(D_NET, "fragnob %d < available page %d: with" " remaining %d kiovs with %d nob left\n", - fragnob, (int)(kiov->kiov_len - offset), + fragnob, (int)(kiov->bv_len - offset), nkiov, nob); tx->tx_gaps = true; } - sg_set_page(sg, kiov->kiov_page, fragnob, - kiov->kiov_offset + offset); + sg_set_page(sg, kiov->bv_page, fragnob, + kiov->bv_offset + offset); sg = sg_next(sg); if (!sg) { CERROR("lacking enough sg entries to map tx\n"); @@ -974,7 +906,12 @@ __must_hold(&conn->ibc_lock) if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus)) rc = -EINVAL; else +#ifdef HAVE_IB_POST_SEND_RECV_CONST + rc = ib_post_send(conn->ibc_cmid->qp, wr, + (const struct ib_send_wr **)&bad); +#else rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad); +#endif } conn->ibc_last_send = ktime_get(); @@ -1041,8 +978,7 @@ kiblnd_check_sends_locked(struct kib_conn *conn) !list_empty(&conn->ibc_tx_queue_rsrvd)) { tx = list_entry(conn->ibc_tx_queue_rsrvd.next, struct kib_tx, tx_list); - list_del(&tx->tx_list); - list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); + list_move_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } @@ -1085,24 +1021,28 @@ kiblnd_check_sends_locked(struct kib_conn *conn) static void kiblnd_tx_complete(struct kib_tx *tx, int status) { - int failed = (status != IB_WC_SUCCESS); + int failed = (status != IB_WC_SUCCESS); struct kib_conn *conn = tx->tx_conn; - int idle; + int idle; - LASSERT (tx->tx_sending > 0); + if (tx->tx_sending <= 0) { + CERROR("Received an event on a freed tx: %p status %d\n", + tx, tx->tx_status); + return; + } - if (failed) { - if (conn->ibc_state == IBLND_CONN_ESTABLISHED) + if (failed) { + if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CNETERR("Tx -> %s cookie %#llx" - " sending %d waiting %d: failed %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - tx->tx_cookie, tx->tx_sending, tx->tx_waiting, - status); + " sending %d waiting %d: failed %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + tx->tx_cookie, tx->tx_sending, tx->tx_waiting, + status); - kiblnd_close_conn(conn, -EIO); - } else { - kiblnd_peer_alive(conn->ibc_peer); - } + kiblnd_close_conn(conn, -EIO); + } else { + kiblnd_peer_alive(conn->ibc_peer); + } spin_lock(&conn->ibc_lock); @@ -1220,8 +1160,9 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, break; } - sge_nob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx), - kiblnd_rd_frag_size(dstrd, dstidx)), resid); + sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx), + kiblnd_rd_frag_size(dstrd, dstidx), + resid); sge = &tx->tx_sge[tx->tx_nsge]; sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx); @@ -1283,7 +1224,22 @@ kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn) LASSERT(!tx->tx_queued); /* not queued for sending already */ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - timeout_ns = lnet_get_lnd_timeout() * NSEC_PER_SEC; + if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) { + tx->tx_status = -ECONNABORTED; + tx->tx_waiting = 0; + if (tx->tx_conn != NULL) { + /* PUT_DONE first attached to conn as a PUT_REQ */ + LASSERT(tx->tx_conn == conn); + LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); + tx->tx_conn = NULL; + kiblnd_conn_decref(conn); + } + list_add(&tx->tx_list, &conn->ibc_zombie_txs); + + return; + } + + timeout_ns = kiblnd_timeout() * NSEC_PER_SEC; tx->tx_queued = 1; tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns); @@ -1387,8 +1343,9 @@ kiblnd_connect_peer(struct kib_peer_ni *peer_ni) LASSERT (net != NULL); LASSERT (peer_ni->ibp_connecting > 0); - cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP, - IB_QPT_RC); + cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns, + kiblnd_cm_callback, peer_ni, + RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", @@ -1411,12 +1368,12 @@ kiblnd_connect_peer(struct kib_peer_ni *peer_ni) if (*kiblnd_tunables.kib_use_priv_port) { rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, - lnet_get_lnd_timeout() * 1000); + kiblnd_timeout() * 1000); } else { rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, (struct sockaddr *)&dstaddr, - lnet_get_lnd_timeout() * 1000); + kiblnd_timeout() * 1000); } if (rc != 0) { /* Can't initiate address resolution: */ @@ -1439,12 +1396,10 @@ kiblnd_connect_peer(struct kib_peer_ni *peer_ni) bool kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni) { - rwlock_t *glock = &kiblnd_data.kib_global_lock; - char *reason = NULL; - struct list_head txs; - unsigned long flags; - - INIT_LIST_HEAD(&txs); + rwlock_t *glock = &kiblnd_data.kib_global_lock; + char *reason = NULL; + LIST_HEAD(txs); + unsigned long flags; write_lock_irqsave(glock, flags); if (peer_ni->ibp_reconnecting == 0) { @@ -1621,8 +1576,7 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) int target_is_router = lntmsg->msg_target_is_router; int routing = lntmsg->msg_routing; unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + struct bio_vec *payload_kiov = lntmsg->msg_kiov; unsigned int payload_offset = lntmsg->msg_offset; unsigned int payload_nob = lntmsg->msg_len; struct kib_msg *ibmsg; @@ -1641,8 +1595,6 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) /* Thread context */ LASSERT (!in_interrupt()); - /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); switch (type) { default: @@ -1671,16 +1623,10 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) ibmsg = tx->tx_msg; rd = &ibmsg->ibm_u.get.ibgm_rd; - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) - rc = kiblnd_setup_rd_iov(ni, tx, rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.iov, - 0, lntmsg->msg_md->md_length); - else - rc = kiblnd_setup_rd_kiov(ni, tx, rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.kiov, - 0, lntmsg->msg_md->md_length); + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_kiov, + 0, lntmsg->msg_md->md_length); if (rc != 0) { CERROR("Can't setup GET sink for %s: %d\n", libcfs_nid2str(target.nid), rc); @@ -1723,14 +1669,9 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) return -ENOMEM; } - if (payload_kiov == NULL) - rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, - payload_niov, payload_iov, - payload_offset, payload_nob); - else - rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, - payload_niov, payload_kiov, - payload_offset, payload_nob); + rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, + payload_niov, payload_kiov, + payload_offset, payload_nob); if (rc != 0) { CERROR("Can't setup PUT src for %s: %d\n", libcfs_nid2str(target.nid), rc); @@ -1764,16 +1705,11 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - if (payload_kiov != NULL) - lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, - offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), - payload_niov, payload_kiov, - payload_offset, payload_nob); - else - lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg, - offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), - payload_niov, payload_iov, - payload_offset, payload_nob); + lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, + offsetof(struct kib_msg, + ibm_u.immediate.ibim_payload), + payload_niov, payload_kiov, + payload_offset, payload_nob); nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); @@ -1787,13 +1723,12 @@ static void kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) { struct lnet_process_id target = lntmsg->msg_target; - unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; - unsigned int offset = lntmsg->msg_offset; - unsigned int nob = lntmsg->msg_len; + unsigned int niov = lntmsg->msg_niov; + struct bio_vec *kiov = lntmsg->msg_kiov; + unsigned int offset = lntmsg->msg_offset; + unsigned int nob = lntmsg->msg_len; struct kib_tx *tx; - int rc; + int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { @@ -1804,9 +1739,6 @@ kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) if (nob == 0) rc = 0; - else if (kiov == NULL) - rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, - niov, iov, offset, nob); else rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, niov, kiov, offset, nob); @@ -1849,7 +1781,7 @@ failed_0: int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, - int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + int delayed, unsigned int niov, struct bio_vec *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { struct kib_rx *rx = private; @@ -1863,8 +1795,6 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, LASSERT (mlen <= rlen); LASSERT (!in_interrupt()); - /* Either all pages or all vaddrs */ - LASSERT (!(kiov != NULL && iov != NULL)); switch (rxmsg->ibm_type) { default: @@ -1880,16 +1810,11 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, break; } - if (kiov != NULL) - lnet_copy_flat2kiov(niov, kiov, offset, - IBLND_MSG_SIZE, rxmsg, - offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), - mlen); - else - lnet_copy_flat2iov(niov, iov, offset, - IBLND_MSG_SIZE, rxmsg, - offsetof(struct kib_msg, ibm_u.immediate.ibim_payload), - mlen); + lnet_copy_flat2kiov(niov, kiov, offset, + IBLND_MSG_SIZE, rxmsg, + offsetof(struct kib_msg, + ibm_u.immediate.ibim_payload), + mlen); lnet_finalize(lntmsg, 0); break; @@ -1916,12 +1841,8 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, txmsg = tx->tx_msg; rd = &txmsg->ibm_u.putack.ibpam_rd; - if (kiov == NULL) - rc = kiblnd_setup_rd_iov(ni, tx, rd, - niov, iov, offset, mlen); - else - rc = kiblnd_setup_rd_kiov(ni, tx, rd, - niov, kiov, offset, mlen); + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + niov, kiov, offset, mlen); if (rc != 0) { CERROR("Can't setup PUT sink for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); @@ -1994,24 +1915,24 @@ kiblnd_peer_alive(struct kib_peer_ni *peer_ni) static void kiblnd_peer_notify(struct kib_peer_ni *peer_ni) { - int error = 0; + int error = 0; time64_t last_alive = 0; - unsigned long flags; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) { - error = peer_ni->ibp_error; - peer_ni->ibp_error = 0; + error = peer_ni->ibp_error; + peer_ni->ibp_error = 0; - last_alive = peer_ni->ibp_last_alive; - } + last_alive = peer_ni->ibp_last_alive; + } read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (error != 0) - lnet_notify(peer_ni->ibp_ni, - peer_ni->ibp_nid, 0, last_alive); + if (error != 0) + lnet_notify(peer_ni->ibp_ni, + peer_ni->ibp_nid, false, false, last_alive); } void @@ -2122,10 +2043,10 @@ kiblnd_handle_early_rxs(struct kib_conn *conn) write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } -static void +void kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs) { - struct list_head zombies = LIST_HEAD_INIT(zombies); + LIST_HEAD(zombies); struct list_head *tmp; struct list_head *nxt; struct kib_tx *tx; @@ -2170,8 +2091,7 @@ kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs) */ if (tx->tx_sending == 0) { tx->tx_queued = 0; - list_del(&tx->tx_list); - list_add(&tx->tx_list, &zombies); + list_move(&tx->tx_list, &zombies); } } @@ -2192,13 +2112,13 @@ kiblnd_finalise_conn(struct kib_conn *conn) LASSERT (!in_interrupt()); LASSERT (conn->ibc_state > IBLND_CONN_INIT); - kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); - /* abort_receives moves QP state to IB_QPS_ERR. This is only required * for connections that didn't get as far as being connected, because * rdma_disconnect() does this for free. */ kiblnd_abort_receives(conn); + kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); + /* Complete all tx descs not waiting for sends to complete. * NB we should be safe from RDMA now that the QP has changed state */ @@ -2215,7 +2135,7 @@ static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active, int error) { - struct list_head zombies = LIST_HEAD_INIT(zombies); + LIST_HEAD(zombies); unsigned long flags; LASSERT (error != 0); @@ -2271,7 +2191,7 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) { struct kib_peer_ni *peer_ni = conn->ibc_peer; struct kib_tx *tx; - struct list_head txs; + LIST_HEAD(txs); unsigned long flags; int active; @@ -2328,7 +2248,6 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) } /* grab pending txs while I have the lock */ - INIT_LIST_HEAD(&txs); list_splice_init(&peer_ni->ibp_tx_queue, &txs); if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */ @@ -2730,11 +2649,12 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, } write_lock_irqsave(glock, flags); - /* retry connection if it's still needed and no other connection - * attempts (active or passive) are in progress - * NB: reconnect is still needed even when ibp_tx_queue is - * empty if ibp_version != version because reconnect may be - * initiated by kiblnd_query() */ + /* retry connection if it's still needed and no other connection + * attempts (active or passive) are in progress + * NB: reconnect is still needed even when ibp_tx_queue is + * empty if ibp_version != version because reconnect may be + * initiated. + */ reconnect = (!list_empty(&peer_ni->ibp_tx_queue) || peer_ni->ibp_version != version) && peer_ni->ibp_connecting && @@ -3162,7 +3082,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) rc = event->status; } else { rc = rdma_resolve_route( - cmid, lnet_get_lnd_timeout() * 1000); + cmid, kiblnd_timeout() * 1000); if (rc == 0) { struct kib_net *net = peer_ni->ibp_ni->ni_data; struct kib_dev *dev = net->ibn_dev; @@ -3343,9 +3263,9 @@ kiblnd_conn_timed_out_locked(struct kib_conn *conn) static void kiblnd_check_conns (int idx) { - struct list_head closes = LIST_HEAD_INIT(closes); - struct list_head checksends = LIST_HEAD_INIT(checksends); - struct list_head timedout_txs = LIST_HEAD_INIT(timedout_txs); + LIST_HEAD(closes); + LIST_HEAD(checksends); + LIST_HEAD(timedout_txs); struct list_head *peers = &kiblnd_data.kib_peers[idx]; struct list_head *ptmp; struct kib_peer_ni *peer_ni; @@ -3479,8 +3399,6 @@ kiblnd_connd (void *arg) int peer_index = 0; unsigned long deadline = jiffies; - cfs_block_allsigs(); - init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; @@ -3580,7 +3498,7 @@ kiblnd_connd (void *arg) * connection within (n+1)/n times the timeout * interval. */ - lnd_timeout = lnet_get_lnd_timeout(); + lnd_timeout = kiblnd_timeout(); if (lnd_timeout > n * p) chunk = (chunk * n * p) / lnd_timeout; if (chunk == 0) @@ -3592,7 +3510,7 @@ kiblnd_connd (void *arg) kiblnd_data.kib_peer_hash_size; } - deadline += msecs_to_jiffies(p * MSEC_PER_SEC); + deadline += cfs_time_seconds(p); spin_lock_irqsave(lock, flags); } @@ -3606,7 +3524,6 @@ kiblnd_connd (void *arg) schedule_timeout(timeout); - set_current_state(TASK_RUNNING); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); spin_lock_irqsave(lock, flags); } @@ -3741,8 +3658,6 @@ kiblnd_scheduler(void *arg) int busy_loops = 0; int rc; - cfs_block_allsigs(); - init_waitqueue_entry(&wait, current); sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; @@ -3876,14 +3791,13 @@ kiblnd_failover_thread(void *arg) { rwlock_t *glock = &kiblnd_data.kib_global_lock; struct kib_dev *dev; + struct net *ns = arg; wait_queue_entry_t wait; unsigned long flags; int rc; LASSERT(*kiblnd_tunables.kib_dev_failover != 0); - cfs_block_allsigs(); - init_waitqueue_entry(&wait, current); write_lock_irqsave(glock, flags); @@ -3904,7 +3818,7 @@ kiblnd_failover_thread(void *arg) dev->ibd_failover = 1; write_unlock_irqrestore(glock, flags); - rc = kiblnd_dev_failover(dev); + rc = kiblnd_dev_failover(dev, ns); write_lock_irqsave(glock, flags);