From: Mr NeilBrown Date: Wed, 4 Dec 2019 05:26:07 +0000 (+1100) Subject: LU-13004 gnilnd: remove support for GNILND_BUF_VIRT_* X-Git-Tag: 2.13.54~19 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=refs%2Fchanges%2F47%2F37847%2F8;p=fs%2Flustre-release.git LU-13004 gnilnd: remove support for GNILND_BUF_VIRT_* GNILND_BUF_VIRT_UNMAPPED and GNILND_BUF_VIRT_MAPPED are not longer set, so remove them and any code that only runs when they are set. gnd_map_nvirt gnd_map_virtnob can go too. Test-Parameters: trivial Signed-off-by: Mr NeilBrown Change-Id: If394bc2cf64f903ed4cdb1e1e80a2a017accd562 Reviewed-on: https://review.whamcloud.com/37847 Reviewed-by: Shaun Tancheff Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- diff --git a/lnet/klnds/gnilnd/gnilnd.c b/lnet/klnds/gnilnd/gnilnd.c index 4b24f8e..bbc3daa 100644 --- a/lnet/klnds/gnilnd/gnilnd.c +++ b/lnet/klnds/gnilnd/gnilnd.c @@ -2051,10 +2051,6 @@ kgnilnd_dev_fini(kgn_device_t *dev) "%d physical mappings of %d pages still mapped\n", dev->gnd_map_nphys, dev->gnd_map_physnop); - LASSERTF(dev->gnd_map_nvirt == 0 && dev->gnd_map_virtnob == 0, - "%d virtual mappings of %llu bytes still mapped\n", - dev->gnd_map_nvirt, dev->gnd_map_virtnob); - LASSERTF(atomic_read(&dev->gnd_n_mdd) == 0 && atomic_read(&dev->gnd_n_mdd_held) == 0 && atomic64_read(&dev->gnd_nbytes_map) == 0, diff --git a/lnet/klnds/gnilnd/gnilnd.h b/lnet/klnds/gnilnd/gnilnd.h index 178684c..92d45155 100644 --- a/lnet/klnds/gnilnd/gnilnd.h +++ b/lnet/klnds/gnilnd/gnilnd.h @@ -193,8 +193,6 @@ static inline time_t cfs_duration_sec(long duration_jiffies) #define GNILND_BUF_IMMEDIATE_KIOV 2 /* immediate data */ #define GNILND_BUF_PHYS_UNMAPPED 3 /* physical: not mapped yet */ #define GNILND_BUF_PHYS_MAPPED 4 /* physical: mapped already */ -#define GNILND_BUF_VIRT_UNMAPPED 5 /* virtual: not mapped yet */ -#define GNILND_BUF_VIRT_MAPPED 6 /* virtual: mapped already */ #define GNILND_TX_WAITING_REPLY (1<<1) /* expecting to receive reply */ #define GNILND_TX_WAITING_COMPLETION (1<<2) /* waiting for smsg_send to complete */ @@ -580,8 +578,6 @@ typedef struct kgn_device { atomic64_t gnd_nbytes_map; /* bytes of total GART maps - fma, tx, etc */ __u32 gnd_map_nphys; /* # TX phys mappings */ __u32 gnd_map_physnop; /* # TX phys pages mapped */ - __u32 gnd_map_nvirt; /* # TX virt mappings */ - __u64 gnd_map_virtnob; /* # TX virt bytes mapped */ spinlock_t gnd_map_lock; /* serialize gnd_map_XXX */ unsigned long gnd_next_map; /* next mapping attempt in jiffies */ int gnd_map_attempt; /* last map attempt # */ @@ -1574,8 +1570,7 @@ kgnilnd_tx_del_state_locked(kgn_tx_t *tx, kgn_peer_t *peer, static inline int kgnilnd_tx_mapped(kgn_tx_t *tx) { - return (tx->tx_buftype == GNILND_BUF_VIRT_MAPPED || - tx->tx_buftype == GNILND_BUF_PHYS_MAPPED); + return tx->tx_buftype == GNILND_BUF_PHYS_MAPPED; } static inline struct list_head * diff --git a/lnet/klnds/gnilnd/gnilnd_cb.c b/lnet/klnds/gnilnd/gnilnd_cb.c index 2d5fe4d..42b1cd3 100644 --- a/lnet/klnds/gnilnd/gnilnd_cb.c +++ b/lnet/klnds/gnilnd/gnilnd_cb.c @@ -904,12 +904,6 @@ kgnilnd_mem_add_map_list(kgn_device_t *dev, kgn_tx_t *tx) dev->gnd_map_nphys++; dev->gnd_map_physnop += tx->tx_phys_npages; break; - - case GNILND_BUF_VIRT_MAPPED: - bytes = tx->tx_nob; - dev->gnd_map_nvirt++; - dev->gnd_map_virtnob += tx->tx_nob; - break; } if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || @@ -953,12 +947,6 @@ kgnilnd_mem_del_map_list(kgn_device_t *dev, kgn_tx_t *tx) dev->gnd_map_nphys--; dev->gnd_map_physnop -= tx->tx_phys_npages; break; - - case GNILND_BUF_VIRT_UNMAPPED: - bytes = tx->tx_nob; - dev->gnd_map_nvirt--; - dev->gnd_map_virtnob -= tx->tx_nob; - break; } if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || @@ -1011,7 +999,6 @@ kgnilnd_map_buffer(kgn_tx_t *tx) case GNILND_BUF_IMMEDIATE: case GNILND_BUF_IMMEDIATE_KIOV: case GNILND_BUF_PHYS_MAPPED: - case GNILND_BUF_VIRT_MAPPED: return 0; case GNILND_BUF_PHYS_UNMAPPED: @@ -1024,41 +1011,16 @@ kgnilnd_map_buffer(kgn_tx_t *tx) * - this needs to turn into a non-fatal error soon to allow * GART resource, etc starvation handling */ if (rrc != GNI_RC_SUCCESS) { - GNIDBG_TX(D_NET, tx, "Can't map %d pages: dev %d " - "phys %u pp %u, virt %u nob %llu", + GNIDBG_TX(D_NET, tx, + "Can't map %d pages: dev %d phys %u pp %u", tx->tx_phys_npages, dev->gnd_id, - dev->gnd_map_nphys, dev->gnd_map_physnop, - dev->gnd_map_nvirt, dev->gnd_map_virtnob); + dev->gnd_map_nphys, dev->gnd_map_physnop); RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL); } tx->tx_buftype = GNILND_BUF_PHYS_MAPPED; kgnilnd_mem_add_map_list(dev, tx); return 0; - - case GNILND_BUF_VIRT_UNMAPPED: - rrc = kgnilnd_mem_register(dev->gnd_handle, - (__u64)tx->tx_buffer, tx->tx_nob, - NULL, flags, &tx->tx_map_key); - if (rrc != GNI_RC_SUCCESS) { - GNIDBG_TX(D_NET, tx, "Can't map %u bytes: dev %d " - "phys %u pp %u, virt %u nob %llu", - tx->tx_nob, dev->gnd_id, - dev->gnd_map_nphys, dev->gnd_map_physnop, - dev->gnd_map_nvirt, dev->gnd_map_virtnob); - RETURN(rrc == GNI_RC_ERROR_RESOURCE ? -ENOMEM : -EINVAL); - } - - tx->tx_buftype = GNILND_BUF_VIRT_MAPPED; - kgnilnd_mem_add_map_list(dev, tx); - if (tx->tx_msg.gnm_type == GNILND_MSG_PUT_ACK || - tx->tx_msg.gnm_type == GNILND_MSG_GET_REQ) { - atomic64_add(tx->tx_nob, &dev->gnd_rdmaq_bytes_out); - GNIDBG_TX(D_NETTRACE, tx, "rdma ++ %d to %ld\n", - tx->tx_nob, atomic64_read(&dev->gnd_rdmaq_bytes_out)); - } - - return 0; } } @@ -1102,8 +1064,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) /* code below relies on +1 relationship ... */ BUILD_BUG_ON(GNILND_BUF_PHYS_MAPPED != (GNILND_BUF_PHYS_UNMAPPED + 1)); - BUILD_BUG_ON(GNILND_BUF_VIRT_MAPPED != - (GNILND_BUF_VIRT_UNMAPPED + 1)); switch (tx->tx_buftype) { default: @@ -1112,7 +1072,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) case GNILND_BUF_NONE: case GNILND_BUF_IMMEDIATE: case GNILND_BUF_PHYS_UNMAPPED: - case GNILND_BUF_VIRT_UNMAPPED: break; case GNILND_BUF_IMMEDIATE_KIOV: if (tx->tx_phys != NULL) { @@ -1126,7 +1085,6 @@ kgnilnd_unmap_buffer(kgn_tx_t *tx, int error) break; case GNILND_BUF_PHYS_MAPPED: - case GNILND_BUF_VIRT_MAPPED: LASSERT(tx->tx_conn != NULL); dev = tx->tx_conn->gnc_device; @@ -4406,8 +4364,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); @@ -4425,8 +4382,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval); @@ -4438,8 +4394,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); lnet_set_reply_msg_len(net->gnn_ni, tx->tx_lntmsg[1], @@ -4453,8 +4408,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); @@ -4467,8 +4421,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_finalize_rx_done(tx, msg); @@ -4480,8 +4433,7 @@ kgnilnd_check_fma_rx(kgn_conn_t *conn) if (tx == NULL) break; - GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED || - tx->tx_buftype == GNILND_BUF_VIRT_MAPPED, + GNITX_ASSERTF(tx, tx->tx_buftype == GNILND_BUF_PHYS_MAPPED, "bad tx buftype %d", tx->tx_buftype); kgnilnd_complete_tx(tx, msg->gnm_u.completion.gncm_retval); @@ -4784,11 +4736,9 @@ kgnilnd_process_mapped_tx(kgn_device_t *dev) } else { GNIDBG_TX(log_retrans_level, tx, "transient map failure #%d %d pages/%d bytes phys %u@%u " - "virt %u@%llu " "nq_map %d mdd# %d/%d GART %ld", dev->gnd_map_attempt, tx->tx_phys_npages, tx->tx_nob, dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE, - dev->gnd_map_nvirt, dev->gnd_map_virtnob, atomic_read(&dev->gnd_nq_map), atomic_read(&dev->gnd_n_mdd), atomic_read(&dev->gnd_n_mdd_held), atomic64_read(&dev->gnd_nbytes_map)); diff --git a/lnet/klnds/gnilnd/gnilnd_proc.c b/lnet/klnds/gnilnd/gnilnd_proc.c index b9210b8..a833026 100644 --- a/lnet/klnds/gnilnd/gnilnd_proc.c +++ b/lnet/klnds/gnilnd/gnilnd_proc.c @@ -235,8 +235,6 @@ kgnilnd_stats_seq_show(struct seq_file *sf, void *v) "TX queued maps: %d\n" "TX phys nmaps: %d\n" "TX phys bytes: %lu\n" - "TX virt nmaps: %d\n" - "TX virt bytes: %llu\n" "RDMAQ bytes_auth: %ld\n" "RDMAQ bytes_left: %ld\n" "RDMAQ nstalls: %d\n" @@ -272,7 +270,6 @@ kgnilnd_stats_seq_show(struct seq_file *sf, void *v) atomic64_read(&dev->gnd_nbytes_map), atomic_read(&dev->gnd_nq_map), dev->gnd_map_nphys, dev->gnd_map_physnop * PAGE_SIZE, - dev->gnd_map_nvirt, dev->gnd_map_virtnob, atomic64_read(&dev->gnd_rdmaq_bytes_out), atomic64_read(&dev->gnd_rdmaq_bytes_ok), atomic_read(&dev->gnd_rdmaq_nstalls),