X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fgnilnd%2Fgnilnd_cb.c;h=705a341c895a3a9a57a80c6bececc65e5b48304e;hp=381aa647823740eff78e2f156b96cd6af26ca999;hb=aab1d832130ee5c181cf7e0e5aa555244d150b00;hpb=381060a6244dfba4819fa81f2b928beb12a39350 diff --git a/lnet/klnds/gnilnd/gnilnd_cb.c b/lnet/klnds/gnilnd/gnilnd_cb.c index 381aa64..705a341 100644 --- a/lnet/klnds/gnilnd/gnilnd_cb.c +++ b/lnet/klnds/gnilnd/gnilnd_cb.c @@ -23,6 +23,7 @@ * */ +#include #include #include "gnilnd.h" @@ -228,7 +229,7 @@ kgnilnd_free_tx(kgn_tx_t *tx) /* we only allocate this if we need to */ if (tx->tx_phys != NULL) { - cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys); + kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys); CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n", LNET_MAX_IOV * sizeof(gni_mem_segment_t), tx->tx_phys); } @@ -242,9 +243,8 @@ kgnilnd_free_tx(kgn_tx_t *tx) #if 0 KGNILND_POISON(tx, 0x5a, sizeof(kgn_tx_t)); #endif - cfs_mem_cache_free(kgnilnd_data.kgn_tx_cache, tx); - CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", - sizeof(*tx), tx); + CDEBUG(D_MALLOC, "slab-freed 'tx': %lu at %p.\n", sizeof(*tx), tx); + kmem_cache_free(kgnilnd_data.kgn_tx_cache, tx); } kgn_tx_t * @@ -255,7 +255,7 @@ kgnilnd_alloc_tx (void) if (CFS_FAIL_CHECK(CFS_FAIL_GNI_ALLOC_TX)) return tx; - tx = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_cache, CFS_ALLOC_ATOMIC); + tx = kmem_cache_alloc(kgnilnd_data.kgn_tx_cache, GFP_ATOMIC); if (tx == NULL) { CERROR("failed to allocate tx\n"); return NULL; @@ -500,7 +500,6 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io * gni_smsg_send to send that as the payload */ LASSERT(tx->tx_buftype == GNILND_BUF_NONE); - LASSERT(nob >= 0); if (nob == 0) { tx->tx_buffer = NULL; @@ -522,7 +521,7 @@ kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov, struct iovec *io * than kiov_len, we will also have a whole at the end of that page * which isn't allowed */ if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1)) { + (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) { CNETERR("Can't make payload contiguous in I/O VM:" "page %d, offset %u, nob %u, kiov_offset %u kiov_len %u \n", i, offset, nob, kiov->kiov_offset, kiov->kiov_len); @@ -640,8 +639,8 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, LASSERT(tx->tx_buftype == GNILND_BUF_NONE); /* only allocate this if we are going to use it */ - tx->tx_phys = cfs_mem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache, - CFS_ALLOC_ATOMIC); + tx->tx_phys = kmem_cache_alloc(kgnilnd_data.kgn_tx_phys_cache, + GFP_ATOMIC); if (tx->tx_phys == NULL) { CERROR("failed to allocate tx_phys\n"); rc = -ENOMEM; @@ -711,7 +710,7 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, "nkiov %u offset %u\n", kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset); - phys->address = lnet_page2phys(kiov->kiov_page); + phys->address = page_to_phys(kiov->kiov_page); phys++; kiov++; nkiov--; @@ -729,7 +728,7 @@ kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov, error: if (tx->tx_phys != NULL) { - cfs_mem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys); + kmem_cache_free(kgnilnd_data.kgn_tx_phys_cache, tx->tx_phys); CDEBUG(D_MALLOC, "slab-freed 'tx_phys': %lu at %p.\n", sizeof(*tx->tx_phys), tx->tx_phys); tx->tx_phys = NULL; @@ -1099,7 +1098,7 @@ kgnilnd_add_purgatory_tx(kgn_tx_t *tx) if (tx->tx_buffer_copy) gmp->gmp_map_key = tx->tx_buffer_copy_map_key; else - gmp->gmp_map_key = tx->tx_map_key; + gmp->gmp_map_key = tx->tx_map_key; atomic_inc(&conn->gnc_device->gnd_n_mdd_held); @@ -1177,8 +1176,8 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, 0); LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc); } else { - rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout); - LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc); + rrc = kgnilnd_mem_deregister(dev->gnd_handle, &tx->tx_map_key, hold_timeout); + LASSERTF(rrc == GNI_RC_SUCCESS, "rrc %d\n", rrc); } tx->tx_buftype--; @@ -1209,7 +1208,7 @@ kgnilnd_tx_done(kgn_tx_t *tx, int completion) libcfs_nid2str(conn->gnc_peer->gnp_nid) : "", tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx, kgnilnd_tx_state2str(tx->tx_list_state), - cfs_duration_sec((long)jiffies - tx->tx_qtime)); + cfs_duration_sec((unsigned long)jiffies - tx->tx_qtime)); } /* The error codes determine if we hold onto the MDD */ @@ -1697,7 +1696,7 @@ kgnilnd_queue_rdma(kgn_conn_t *conn, kgn_tx_t *tx) void kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx) { - int rc; + int rc = 0; int add_tail = 1; /* set the tx_id here, we delay it until we have an actual conn @@ -1760,6 +1759,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) kgn_peer_t *new_peer = NULL; kgn_conn_t *conn = NULL; int rc; + int node_state; ENTRY; @@ -1800,6 +1800,8 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) CFS_RACE(CFS_FAIL_GNI_FIND_TARGET); + node_state = kgnilnd_get_node_state(LNET_NIDADDR(target->nid)); + /* NB - this will not block during normal operations - * the only writer of this is in the startup/shutdown path. */ rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem); @@ -1811,7 +1813,7 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) /* ignore previous peer entirely - we cycled the lock, so we * will create new peer and at worst drop it if peer is still * in the tables */ - rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net); + rc = kgnilnd_create_peer_safe(&new_peer, target->nid, net, node_state); if (rc != 0) { up_read(&kgnilnd_data.kgn_net_rw_sem); GOTO(no_peer, rc); @@ -1824,7 +1826,20 @@ kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, lnet_process_id_t *target) * if we don't find it, add our new one to the list */ kgnilnd_add_peer_locked(target->nid, new_peer, &peer); + /* don't create a connection if the peer is not up */ + if (peer->gnp_down != GNILND_RCA_NODE_UP) { + write_unlock(&kgnilnd_data.kgn_peer_conn_lock); + rc = -ENETRESET; + GOTO(no_peer, rc); + } + conn = kgnilnd_find_or_create_conn_locked(peer); + + if (CFS_FAIL_CHECK(CFS_FAIL_GNI_DGRAM_DROP_TX)) { + write_unlock(&kgnilnd_data.kgn_peer_conn_lock); + GOTO(no_peer, rc); + } + if (conn != NULL) { /* oh hey, found a conn now... magical */ kgnilnd_queue_tx(conn, tx); @@ -1990,7 +2005,7 @@ kgnilnd_alloc_rx(void) { kgn_rx_t *rx; - rx = cfs_mem_cache_alloc(kgnilnd_data.kgn_rx_cache, CFS_ALLOC_ATOMIC); + rx = kmem_cache_alloc(kgnilnd_data.kgn_rx_cache, GFP_ATOMIC); if (rx == NULL) { CERROR("failed to allocate rx\n"); return NULL; @@ -2037,6 +2052,7 @@ kgnilnd_consume_rx(kgn_rx_t *rx) /* if we are eager, free the cache alloc'd msg */ if (unlikely(rx->grx_eager)) { LIBCFS_FREE(rxmsg, sizeof(*rxmsg) + *kgnilnd_tunables.kgn_max_immediate); + atomic_dec(&kgnilnd_data.kgn_neager_allocs); /* release ref from eager_recv */ kgnilnd_conn_decref(conn); @@ -2045,7 +2061,7 @@ kgnilnd_consume_rx(kgn_rx_t *rx) kgnilnd_release_msg(conn); } - cfs_mem_cache_free(kgnilnd_data.kgn_rx_cache, rx); + kmem_cache_free(kgnilnd_data.kgn_rx_cache, rx); CDEBUG(D_MALLOC, "slab-freed 'rx': %lu at %p.\n", sizeof(*rx), rx); @@ -2342,6 +2358,15 @@ kgnilnd_eager_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, /* we have no credits or buffers for this message, so copy it * somewhere for a later kgnilnd_recv */ + if (atomic_read(&kgnilnd_data.kgn_neager_allocs) >= + *kgnilnd_tunables.kgn_eager_credits) { + CERROR("Out of eager credits to %s\n", + libcfs_nid2str(conn->gnc_peer->gnp_nid)); + return -ENOMEM; + } + + atomic_inc(&kgnilnd_data.kgn_neager_allocs); + LIBCFS_ALLOC(eagermsg, sizeof(*eagermsg) + *kgnilnd_tunables.kgn_max_immediate); if (eagermsg == NULL) { kgnilnd_conn_decref(conn); @@ -2515,7 +2540,7 @@ kgnilnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, tx->tx_msg.gnm_u.putack.gnpam_desc.gnrd_nob = mlen; tx->tx_lntmsg[0] = lntmsg; /* finalize this on RDMA_DONE */ - + tx->tx_qtime = jiffies; /* we only queue from kgnilnd_recv - we might get called from other contexts * and we don't want to block the mutex in those cases */ @@ -2946,7 +2971,6 @@ kgnilnd_reaper(void *arg) struct timer_list timer; DEFINE_WAIT(wait); - cfs_daemonize("kgnilnd_rpr"); cfs_block_allsigs(); /* all gnilnd threads need to run fairly urgently */ @@ -3043,7 +3067,7 @@ kgnilnd_recv_bte_get(kgn_tx_t *tx) { lnet_copy_flat2kiov( niov, kiov, offset, nob, - tx->tx_buffer_copy, tx->tx_offset, nob); + tx->tx_buffer_copy + tx->tx_offset, 0, nob); } else { memcpy(tx->tx_buffer, tx->tx_buffer_copy + tx->tx_offset, nob); } @@ -3063,6 +3087,9 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev) long num_processed = 0; kgn_conn_t *conn = NULL; kgn_tx_t *tx = NULL; + kgn_rdma_desc_t *rdesc; + unsigned int rnob; + __u64 rcookie; for (;;) { /* make sure we don't keep looping if we need to reset */ @@ -3183,31 +3210,28 @@ kgnilnd_check_rdma_cq(kgn_device_t *dev) if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE || tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE_REV) { - if (should_retry) { - kgnilnd_rdma(tx, tx->tx_msg.gnm_type, - &tx->tx_putinfo.gnpam_desc, - tx->tx_putinfo.gnpam_desc.gnrd_nob, - tx->tx_putinfo.gnpam_dst_cookie); - } else { - kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type, - -EFAULT, - tx->tx_putinfo.gnpam_dst_cookie, - tx->tx_msg.gnm_srcnid); - kgnilnd_tx_done(tx, -EFAULT); - } + rdesc = &tx->tx_putinfo.gnpam_desc; + rnob = tx->tx_putinfo.gnpam_desc.gnrd_nob; + rcookie = tx->tx_putinfo.gnpam_dst_cookie; } else { - if (should_retry) { - kgnilnd_rdma(tx, tx->tx_msg.gnm_type, - &tx->tx_getinfo.gngm_desc, - tx->tx_lntmsg[0]->msg_len, - tx->tx_getinfo.gngm_cookie); - } else { - kgnilnd_nak_rdma(conn, tx->tx_msg.gnm_type, - -EFAULT, - tx->tx_getinfo.gngm_cookie, - tx->tx_msg.gnm_srcnid); - kgnilnd_tx_done(tx, -EFAULT); - } + rdesc = &tx->tx_getinfo.gngm_desc; + rnob = tx->tx_lntmsg[0]->msg_len; + rcookie = tx->tx_getinfo.gngm_cookie; + } + + if (should_retry) { + kgnilnd_rdma(tx, + tx->tx_msg.gnm_type, + rdesc, + rnob, rcookie); + } else { + kgnilnd_nak_rdma(conn, + tx->tx_msg.gnm_type, + -EFAULT, + rcookie, + tx->tx_msg.gnm_srcnid); + kgnilnd_tx_done(tx, -EFAULT); + kgnilnd_close_conn(conn, -ECOMM); } /* drop ref from kgnilnd_validate_tx_ev_id */ @@ -3581,7 +3605,7 @@ kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full) case GNILND_MSG_PUT_DONE_REV: kgnilnd_rdma(tx, GNILND_MSG_PUT_DONE_REV, &tx->tx_getinfo.gngm_desc, - tx->tx_lntmsg[0]->msg_len, + tx->tx_nob, tx->tx_getinfo.gngm_cookie); break; case GNILND_MSG_GET_ACK_REV: @@ -3910,6 +3934,13 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc) { int complete = 0; kgn_conn_t *conn = tx->tx_conn; + __u64 nob = tx->tx_nob; + __u32 physnop = tx->tx_phys_npages; + int id = tx->tx_id.txe_smsg_id; + int buftype = tx->tx_buftype; + gni_mem_handle_t hndl; + hndl.qword1 = tx->tx_map_key.qword1; + hndl.qword2 = tx->tx_map_key.qword2; spin_lock(&conn->gnc_list_lock); @@ -3919,6 +3950,22 @@ kgnilnd_complete_tx(kgn_tx_t *tx, int rc) tx->tx_rc = rc; tx->tx_state &= ~GNILND_TX_WAITING_REPLY; + if (rc == -EFAULT) { + CDEBUG(D_NETERROR, "Error %d TX data: TX %p tx_id %x nob %16"LPF64"u physnop %8d buffertype %#8x MemHandle "LPX64"."LPX64"x\n", + rc, tx, id, nob, physnop, buftype, hndl.qword1, hndl.qword2); + + if(*kgnilnd_tunables.kgn_efault_lbug) { + GNIDBG_TOMSG(D_NETERROR, &tx->tx_msg, + "error %d on tx 0x%p->%s id %u/%d state %s age %ds", + rc, tx, conn ? + libcfs_nid2str(conn->gnc_peer->gnp_nid) : "", + tx->tx_id.txe_smsg_id, tx->tx_id.txe_idx, + kgnilnd_tx_state2str(tx->tx_list_state), + cfs_duration_sec((unsigned long) jiffies - tx->tx_qtime)); + LBUG(); + } + } + if (!(tx->tx_state & GNILND_TX_WAITING_COMPLETION)) { kgnilnd_tx_del_state_locked(tx, NULL, conn, GNILND_TX_ALLOCD); /* sample under lock as follow on steps require gnc_list_lock @@ -4850,15 +4897,12 @@ kgnilnd_scheduler(void *arg) { int threadno = (long)arg; kgn_device_t *dev; - char name[16]; int busy_loops = 0; unsigned long deadline = 0; DEFINE_WAIT(wait); dev = &kgnilnd_data.kgn_devices[(threadno + 1) % kgnilnd_data.kgn_ndevs]; - snprintf(name, sizeof(name), "kgnilnd_sd_%02d", threadno); - cfs_daemonize(name); cfs_block_allsigs(); /* all gnilnd threads need to run fairly urgently */