X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fgnilnd%2Fgnilnd.c;h=7e67339992250c7259e4be8e8bb6399bb7a1d650;hb=47d17a97a3decf242595a45dba8143bdb44ec5ec;hp=6c00370fb40c3eb6938e856686a915fc0176ac8b;hpb=381060a6244dfba4819fa81f2b928beb12a39350;p=fs%2Flustre-release.git diff --git a/lnet/klnds/gnilnd/gnilnd.c b/lnet/klnds/gnilnd/gnilnd.c index 6c00370..7e67339 100644 --- a/lnet/klnds/gnilnd/gnilnd.c +++ b/lnet/klnds/gnilnd/gnilnd.c @@ -1,6 +1,8 @@ /* * Copyright (C) 2012 Cray, Inc. * + * Copyright (c) 2013, 2014, Intel Corporation. + * * Author: Nic Henke * Author: James Shimek * @@ -24,7 +26,11 @@ /* Primary entry points from LNET. There are no guarantees against reentrance. */ lnd_t the_kgnilnd = { +#ifdef CONFIG_CRAY_XT .lnd_type = GNILND, +#else + .lnd_type = GNIIPLND, +#endif .lnd_startup = kgnilnd_startup, .lnd_shutdown = kgnilnd_shutdown, .lnd_ctl = kgnilnd_ctl, @@ -36,6 +42,52 @@ lnd_t the_kgnilnd = { kgn_data_t kgnilnd_data; +int +kgnilnd_thread_start(int(*fn)(void *arg), void *arg, char *name, int id) +{ + struct task_struct *thrd; + + thrd = kthread_run(fn, arg, "%s_%02d", name, id); + if (IS_ERR(thrd)) + return PTR_ERR(thrd); + + atomic_inc(&kgnilnd_data.kgn_nthreads); + return 0; +} + +/* bind scheduler threads to cpus */ +int +kgnilnd_start_sd_threads(void) +{ + int cpu; + int i = 0; + struct task_struct *task; + + for_each_online_cpu(cpu) { + /* don't bind to cpu 0 - all interrupts are processed here */ + if (cpu == 0) + continue; + + task = kthread_create(kgnilnd_scheduler, (void *)((long)i), + "%s_%02d", "kgnilnd_sd", i); + if (!IS_ERR(task)) { + kthread_bind(task, cpu); + wake_up_process(task); + } else { + CERROR("Can't spawn gnilnd scheduler[%d] %ld\n", i, + PTR_ERR(task)); + return PTR_ERR(task); + } + atomic_inc(&kgnilnd_data.kgn_nthreads); + + if (++i >= *kgnilnd_tunables.kgn_sched_threads) { + break; + } + } + + return 0; +} + /* needs write_lock on kgn_peer_conn_lock */ int kgnilnd_close_stale_conns_locked(kgn_peer_t *peer, kgn_conn_t *newconn) @@ -199,10 +251,11 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev) LIBCFS_ALLOC(conn->gnc_tx_ref_table, GNILND_MAX_MSG_ID * sizeof(void *)); if (conn->gnc_tx_ref_table == NULL) { CERROR("Can't allocate conn tx_ref_table\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } + mutex_init(&conn->gnc_smsg_mutex); + mutex_init(&conn->gnc_rdma_mutex); atomic_set(&conn->gnc_refcount, 1); atomic_set(&conn->gnc_reaper_noop, 0); atomic_set(&conn->gnc_sched_noop, 0); @@ -231,8 +284,7 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev) if (conn->gnc_cqid == 0) { CERROR("Could not allocate unique CQ ID for conn 0x%p\n", conn); - rc = -E2BIG; - GOTO(failed, rc); + GOTO(failed, rc = -E2BIG); } CDEBUG(D_NET, "alloc cqid %u for conn 0x%p\n", @@ -251,10 +303,8 @@ kgnilnd_create_conn(kgn_conn_t **connp, kgn_device_t *dev) rrc = kgnilnd_ep_create(dev->gnd_handle, dev->gnd_snd_fma_cqh, &conn->gnc_ephandle); mutex_unlock(&dev->gnd_cq_mutex); - if (rrc != GNI_RC_SUCCESS) { - rc = -ENETDOWN; - GOTO(failed, rc); - } + if (rrc != GNI_RC_SUCCESS) + GOTO(failed, rc = -ENETDOWN); CDEBUG(D_NET, "created conn 0x%p ep_hndl 0x%p\n", conn, conn->gnc_ephandle); @@ -458,7 +508,7 @@ kgnilnd_peer_alive(kgn_peer_t *peer) } void -kgnilnd_peer_notify(kgn_peer_t *peer, int error) +kgnilnd_peer_notify(kgn_peer_t *peer, int error, int alive) { int tell_lnet = 0; int nnets = 0; @@ -487,10 +537,10 @@ kgnilnd_peer_notify(kgn_peer_t *peer, int error) peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting, conn, kgnilnd_data.kgn_in_reset, error); - if ((peer->gnp_connecting == GNILND_PEER_IDLE) && + if (((peer->gnp_connecting == GNILND_PEER_IDLE) && (conn == NULL) && (!kgnilnd_data.kgn_in_reset) && - (!kgnilnd_conn_clean_errno(error))) { + (!kgnilnd_conn_clean_errno(error))) || alive) { tell_lnet = 1; } @@ -554,8 +604,8 @@ kgnilnd_peer_notify(kgn_peer_t *peer, int error) peer, libcfs_nid2str(peer_nid), peer->gnp_last_alive, cfs_duration_sec(jiffies - peer->gnp_last_alive)); - lnet_notify(net->gnn_ni, peer_nid, 0, peer->gnp_last_alive); - + lnet_notify(net->gnn_ni, peer_nid, alive, + peer->gnp_last_alive); kgnilnd_net_decref(net); } @@ -804,8 +854,8 @@ kgnilnd_complete_closed_conn(kgn_conn_t *conn) /* I'm telling Mommy! - use peer_error if they initiated close */ kgnilnd_peer_notify(conn->gnc_peer, - conn->gnc_error == -ECONNRESET ? conn->gnc_peer_error - : conn->gnc_error); + conn->gnc_error == -ECONNRESET ? + conn->gnc_peer_error : conn->gnc_error, 0); EXIT; } @@ -914,7 +964,10 @@ return_out: * kgn_peer_conn_lock is held, we guarantee that nobody calls * kgnilnd_add_peer_locked without checking gnn_shutdown */ int -kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net) +kgnilnd_create_peer_safe(kgn_peer_t **peerp, + lnet_nid_t nid, + kgn_net_t *net, + int node_state) { kgn_peer_t *peer; int rc; @@ -946,7 +999,7 @@ kgnilnd_create_peer_safe(kgn_peer_t **peerp, lnet_nid_t nid, kgn_net_t *net) return -ENOMEM; } peer->gnp_nid = nid; - peer->gnp_down = GNILND_RCA_NODE_UP; + peer->gnp_down = node_state; /* translate from nid to nic addr & store */ rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(nid), 1, &peer->gnp_host_id); @@ -1053,6 +1106,8 @@ kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer) CDEBUG(D_NET, "conn %p peer %p dev %p\n", conn, peer, conn->gnc_device); + LASSERTF(conn->gnc_in_purgatory == 0, + "Conn already in purgatory\n"); conn->gnc_in_purgatory = 1; mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id]; @@ -1159,7 +1214,7 @@ kgnilnd_release_purgatory_list(struct list_head *conn_list) * make sure we tell LNet - if this is from other context, * the checks in the function will prevent an errant * notification */ - kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error); + kgnilnd_peer_notify(conn->gnc_peer, conn->gnc_error, 0); list_for_each_entry_safe(gmp, gmpN, &conn->gnc_mdd_list, gmp_list) { @@ -1334,11 +1389,14 @@ kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp) { kgn_peer_t *peer; int rc; + int node_state; ENTRY; if (nid == LNET_NID_ANY) return -EINVAL; + node_state = kgnilnd_get_node_state(LNET_NIDADDR(nid)); + /* NB - this will not block during normal operations - * the only writer of this is in the startup/shutdown path. */ rc = down_read_trylock(&kgnilnd_data.kgn_net_rw_sem); @@ -1346,7 +1404,7 @@ kgnilnd_add_peer(kgn_net_t *net, lnet_nid_t nid, kgn_peer_t **peerp) rc = -ESHUTDOWN; RETURN(rc); } - rc = kgnilnd_create_peer_safe(&peer, nid, net); + rc = kgnilnd_create_peer_safe(&peer, nid, net, node_state); if (rc != 0) { up_read(&kgnilnd_data.kgn_net_rw_sem); RETURN(rc); @@ -1513,9 +1571,6 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command, write_unlock(&kgnilnd_data.kgn_peer_conn_lock); - /* release all of the souls found held in purgatory */ - kgnilnd_release_purgatory_list(&souls); - /* nuke peer TX */ kgnilnd_txlist_done(&zombies, error); @@ -1537,7 +1592,8 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command, atomic_read(&kgnilnd_data.kgn_npending_detach) || atomic_read(&kgnilnd_data.kgn_npending_unlink)) { - cfs_pause(cfs_time_seconds(1)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n", @@ -1607,8 +1663,8 @@ kgnilnd_get_conn_info(kgn_peer_t *peer, *device_id = conn->gnc_device->gnd_host_id; *peerstamp = conn->gnc_peerstamp; - *tx_seq = conn->gnc_tx_seq; - *rx_seq = conn->gnc_rx_seq; + *tx_seq = atomic_read(&conn->gnc_tx_seq); + *rx_seq = atomic_read(&conn->gnc_rx_seq); *fmaq_len = kgnilnd_count_list(&conn->gnc_fmaq); *nfma = atomic_read(&conn->gnc_nlive_fma); *nrdma = atomic_read(&conn->gnc_nlive_rdma); @@ -1650,7 +1706,7 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down) { int rc; kgn_peer_t *peer, *new_peer; - CFS_LIST_HEAD(zombies); + LIST_HEAD(zombies); write_lock(&kgnilnd_data.kgn_peer_conn_lock); peer = kgnilnd_find_peer_locked(nid); @@ -1732,13 +1788,10 @@ kgnilnd_report_node_state(lnet_nid_t nid, int down) * kgnilnd_tx_done */ kgnilnd_txlist_done(&zombies, -ENETRESET); - - if (*kgnilnd_tunables.kgn_peer_health) { - kgnilnd_peer_notify(peer, -ECONNRESET); - } + kgnilnd_peer_notify(peer, -ECONNRESET, 0); + LCONSOLE_INFO("Recieved down event for nid %lld\n", nid); } - CDEBUG(D_INFO, "marking nid %lld %s\n", nid, down ? "down" : "up"); return 0; } @@ -1869,7 +1922,10 @@ kgnilnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when) kgn_tx_t *tx; kgn_peer_t *peer = NULL; kgn_conn_t *conn = NULL; - lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; + lnet_process_id_t id = { + .nid = nid, + .pid = LNET_PID_LUSTRE, + }; ENTRY; /* I expect to find him, so only take a read lock */ @@ -1945,12 +2001,11 @@ kgnilnd_dev_init(kgn_device_t *dev) cq_size = *kgnilnd_tunables.kgn_credits * 2 * 3; rrc = kgnilnd_cdm_create(dev->gnd_id, *kgnilnd_tunables.kgn_ptag, - GNILND_COOKIE, 0, + *kgnilnd_tunables.kgn_pkey, 0, &dev->gnd_domain); if (rrc != GNI_RC_SUCCESS) { CERROR("Can't create CDM %d (%d)\n", dev->gnd_id, rrc); - rc = -ENODEV; - GOTO(failed, rc); + GOTO(failed, rc = -ENODEV); } rrc = kgnilnd_cdm_attach(dev->gnd_domain, dev->gnd_id, @@ -1958,17 +2013,14 @@ kgnilnd_dev_init(kgn_device_t *dev) if (rrc != GNI_RC_SUCCESS) { CERROR("Can't attach CDM to device %d (%d)\n", dev->gnd_id, rrc); - rc = -ENODEV; - GOTO(failed, rc); + GOTO(failed, rc = -ENODEV); } /* a bit gross, but not much we can do - Aries Sim doesn't have * hardcoded NIC/NID that we can use */ rc = kgnilnd_setup_nic_translation(dev->gnd_host_id); - if (rc != 0) { - rc = -ENODEV; - GOTO(failed, rc); - } + if (rc != 0) + GOTO(failed, rc = -ENODEV); /* only dev 0 gets the errors - no need to reset the stack twice * - this works because we have a single PTAG, if we had more @@ -1982,8 +2034,7 @@ kgnilnd_dev_init(kgn_device_t *dev) if (rrc != GNI_RC_SUCCESS) { CERROR("Can't subscribe for errors on device %d: rc %d\n", dev->gnd_id, rrc); - rc = -ENODEV; - GOTO(failed, rc); + GOTO(failed, rc = -ENODEV); } rc = kgnilnd_set_quiesce_callback(dev->gnd_handle, @@ -1991,11 +2042,16 @@ kgnilnd_dev_init(kgn_device_t *dev) if (rc != GNI_RC_SUCCESS) { CERROR("Can't subscribe for quiesce callback on device %d: rc %d\n", dev->gnd_id, rrc); - rc = -ENODEV; - GOTO(failed, rc); + GOTO(failed, rc = -ENODEV); } } + rrc = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_IP, &kgnilnd_data.kgn_sock); + if (rrc < 0) { + CERROR("sock_create returned %d\n", rrc); + GOTO(failed, rrc); + } + rc = kgnilnd_nicaddr_to_nid(dev->gnd_host_id, &dev->gnd_nid); if (rc < 0) { /* log messages during startup */ @@ -2003,19 +2059,17 @@ kgnilnd_dev_init(kgn_device_t *dev) CERROR("couldn't translate host_id 0x%x to nid. rc %d\n", dev->gnd_host_id, rc); } - rc = -ESRCH; - GOTO(failed, rc); + GOTO(failed, rc = -ESRCH); } CDEBUG(D_NET, "NIC %x -> NID %d\n", dev->gnd_host_id, dev->gnd_nid); - rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size, + rrc = kgnilnd_cq_create(dev->gnd_handle, *kgnilnd_tunables.kgn_credits, 0, kgnilnd_device_callback, dev->gnd_id, &dev->gnd_snd_rdma_cqh); if (rrc != GNI_RC_SUCCESS) { CERROR("Can't create rdma send cq size %u for device " "%d (%d)\n", cq_size, dev->gnd_id, rrc); - rc = -EINVAL; - GOTO(failed, rc); + GOTO(failed, rc = -EINVAL); } rrc = kgnilnd_cq_create(dev->gnd_handle, cq_size, @@ -2024,8 +2078,7 @@ kgnilnd_dev_init(kgn_device_t *dev) if (rrc != GNI_RC_SUCCESS) { CERROR("Can't create fma send cq size %u for device %d (%d)\n", cq_size, dev->gnd_id, rrc); - rc = -EINVAL; - GOTO(failed, rc); + GOTO(failed, rc = -EINVAL); } /* This one we size differently - overflows are possible and it needs to be @@ -2037,8 +2090,7 @@ kgnilnd_dev_init(kgn_device_t *dev) if (rrc != GNI_RC_SUCCESS) { CERROR("Can't create fma cq size %d for device %d (%d)\n", *kgnilnd_tunables.kgn_fma_cq_size, dev->gnd_id, rrc); - rc = -EINVAL; - GOTO(failed, rc); + GOTO(failed, rc = -EINVAL); } RETURN(0); @@ -2126,10 +2178,12 @@ kgnilnd_dev_fini(kgn_device_t *dev) dev->gnd_domain = NULL; } + if (kgnilnd_data.kgn_sock) + sock_release(kgnilnd_data.kgn_sock); + EXIT; } - int kgnilnd_base_startup(void) { struct timeval tv; @@ -2138,6 +2192,15 @@ int kgnilnd_base_startup(void) int i; kgn_device_t *dev; struct task_struct *thrd; + +#if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE) + /* limit how much memory can be allocated for fma blocks in + * instances where many nodes need to reconnects at the same time */ + struct sysinfo si; + si_meminfo(&si); + kgnilnd_data.free_pages_limit = si.totalram/4; +#endif + ENTRY; LASSERTF(kgnilnd_data.kgn_init == GNILND_INIT_NOTHING, @@ -2145,6 +2208,7 @@ int kgnilnd_base_startup(void) /* zero pointers, flags etc */ memset(&kgnilnd_data, 0, sizeof(kgnilnd_data)); + kgnilnd_check_kgni_version(); /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and * a unique (for all time) connstamp so we can uniquely identify @@ -2167,7 +2231,7 @@ int kgnilnd_base_startup(void) INIT_LIST_HEAD(&dev->gnd_map_tx); INIT_LIST_HEAD(&dev->gnd_fma_buffs); mutex_init(&dev->gnd_cq_mutex); - sema_init(&dev->gnd_fmablk_sem, 1); + mutex_init(&dev->gnd_fmablk_mutex); spin_lock_init(&dev->gnd_fmablk_lock); init_waitqueue_head(&dev->gnd_waitq); init_waitqueue_head(&dev->gnd_dgram_waitq); @@ -2190,10 +2254,8 @@ int kgnilnd_base_startup(void) LIBCFS_ALLOC(dev->gnd_dgrams, sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size); - if (dev->gnd_dgrams == NULL) { - rc = -ENOMEM; - GOTO(failed, rc); - } + if (dev->gnd_dgrams == NULL) + GOTO(failed, rc = -ENOMEM); for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) { INIT_LIST_HEAD(&dev->gnd_dgrams[i]); @@ -2217,7 +2279,7 @@ int kgnilnd_base_startup(void) init_waitqueue_head(&kgnilnd_data.kgn_ruhroh_waitq); spin_lock_init(&kgnilnd_data.kgn_reaper_lock); - sema_init(&kgnilnd_data.kgn_quiesce_sem, 1); + mutex_init(&kgnilnd_data.kgn_quiesce_mutex); atomic_set(&kgnilnd_data.kgn_nquiesce, 0); atomic_set(&kgnilnd_data.kgn_npending_conns, 0); atomic_set(&kgnilnd_data.kgn_npending_unlink, 0); @@ -2228,17 +2290,15 @@ int kgnilnd_base_startup(void) /* OK to call kgnilnd_api_shutdown() to cleanup now */ kgnilnd_data.kgn_init = GNILND_INIT_DATA; - PORTAL_MODULE_USE; + try_module_get(THIS_MODULE); rwlock_init(&kgnilnd_data.kgn_peer_conn_lock); LIBCFS_ALLOC(kgnilnd_data.kgn_peers, sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size); - if (kgnilnd_data.kgn_peers == NULL) { - rc = -ENOMEM; - GOTO(failed, rc); - } + if (kgnilnd_data.kgn_peers == NULL) + GOTO(failed, rc = -ENOMEM); for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) { INIT_LIST_HEAD(&kgnilnd_data.kgn_peers[i]); @@ -2247,10 +2307,8 @@ int kgnilnd_base_startup(void) LIBCFS_ALLOC(kgnilnd_data.kgn_conns, sizeof(struct list_head) * *kgnilnd_tunables.kgn_peer_hash_size); - if (kgnilnd_data.kgn_conns == NULL) { - rc = -ENOMEM; - GOTO(failed, rc); - } + if (kgnilnd_data.kgn_conns == NULL) + GOTO(failed, rc = -ENOMEM); for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) { INIT_LIST_HEAD(&kgnilnd_data.kgn_conns[i]); @@ -2259,68 +2317,49 @@ int kgnilnd_base_startup(void) LIBCFS_ALLOC(kgnilnd_data.kgn_nets, sizeof(struct list_head) * *kgnilnd_tunables.kgn_net_hash_size); - if (kgnilnd_data.kgn_nets == NULL) { - rc = -ENOMEM; - GOTO(failed, rc); - } + if (kgnilnd_data.kgn_nets == NULL) + GOTO(failed, rc = -ENOMEM); for (i = 0; i < *kgnilnd_tunables.kgn_net_hash_size; i++) { INIT_LIST_HEAD(&kgnilnd_data.kgn_nets[i]); } kgnilnd_data.kgn_mbox_cache = - cfs_mem_cache_create("kgn_mbox_block", - KMALLOC_MAX_SIZE, - 0, /* offset */ - SLAB_HWCACHE_ALIGN); /* flags */ + kmem_cache_create("kgn_mbox_block", KMALLOC_MAX_SIZE, 0, + SLAB_HWCACHE_ALIGN, NULL); if (kgnilnd_data.kgn_mbox_cache == NULL) { CERROR("Can't create slab for physical mbox blocks\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } kgnilnd_data.kgn_rx_cache = - cfs_mem_cache_create("kgn_rx_t", - sizeof(kgn_rx_t), - 0, /* offset */ - 0); /* flags */ + kmem_cache_create("kgn_rx_t", sizeof(kgn_rx_t), 0, 0, NULL); if (kgnilnd_data.kgn_rx_cache == NULL) { CERROR("Can't create slab for kgn_rx_t descriptors\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } kgnilnd_data.kgn_tx_cache = - cfs_mem_cache_create("kgn_tx_t", - sizeof(kgn_tx_t), - 0, /* offset */ - 0); /* flags */ + kmem_cache_create("kgn_tx_t", sizeof(kgn_tx_t), 0, 0, NULL); if (kgnilnd_data.kgn_tx_cache == NULL) { CERROR("Can't create slab for kgn_tx_t\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } kgnilnd_data.kgn_tx_phys_cache = - cfs_mem_cache_create("kgn_tx_phys", - LNET_MAX_IOV * sizeof(gni_mem_segment_t), - 0, /* offset */ - 0); /* flags */ + kmem_cache_create("kgn_tx_phys", + LNET_MAX_IOV * sizeof(gni_mem_segment_t), + 0, 0, NULL); if (kgnilnd_data.kgn_tx_phys_cache == NULL) { CERROR("Can't create slab for kgn_tx_phys\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } kgnilnd_data.kgn_dgram_cache = - cfs_mem_cache_create("kgn_dgram_t", - sizeof(kgn_dgram_t), - 0, /* offset */ - 0); /* flags */ + kmem_cache_create("kgn_dgram_t", sizeof(kgn_dgram_t), 0, 0, NULL); if (kgnilnd_data.kgn_dgram_cache == NULL) { CERROR("Can't create slab for outgoing datagrams\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } /* allocate a MAX_IOV array of page pointers for each cpu */ @@ -2328,8 +2367,7 @@ int kgnilnd_base_startup(void) GFP_KERNEL); if (kgnilnd_data.kgn_cksum_map_pages == NULL) { CERROR("Can't allocate vmap cksum pages\n"); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } kgnilnd_data.kgn_cksum_npages = num_possible_cpus(); memset(kgnilnd_data.kgn_cksum_map_pages, 0, @@ -2340,8 +2378,7 @@ int kgnilnd_base_startup(void) GFP_KERNEL); if (kgnilnd_data.kgn_cksum_map_pages[i] == NULL) { CERROR("Can't allocate vmap cksum pages for cpu %d\n", i); - rc = -ENOMEM; - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } } @@ -2357,16 +2394,14 @@ int kgnilnd_base_startup(void) kgnilnd_data.kgn_ndevs++; rc = kgnilnd_allocate_phys_fmablk(dev); - if (rc) { + if (rc) GOTO(failed, rc); - } } } if (kgnilnd_data.kgn_ndevs == 0) { CERROR("Can't initialise any GNI devices\n"); - rc = -ENODEV; - GOTO(failed, rc); + GOTO(failed, rc = -ENODEV); } rc = kgnilnd_thread_start(kgnilnd_reaper, NULL, "kgnilnd_rpr", 0); @@ -2395,13 +2430,20 @@ int kgnilnd_base_startup(void) } /* threads will load balance across devs as they are available */ - for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) { - rc = kgnilnd_thread_start(kgnilnd_scheduler, (void *)((long)i), - "kgnilnd_sd", i); - if (rc != 0) { - CERROR("Can't spawn gnilnd scheduler[%d]: %d\n", - i, rc); + if (*kgnilnd_tunables.kgn_thread_affinity) { + rc = kgnilnd_start_sd_threads(); + if (rc != 0) GOTO(failed, rc); + } else { + for (i = 0; i < *kgnilnd_tunables.kgn_sched_threads; i++) { + rc = kgnilnd_thread_start(kgnilnd_scheduler, + (void *)((long)i), + "kgnilnd_sd", i); + if (rc != 0) { + CERROR("Can't spawn gnilnd scheduler[%d]: %d\n", + i, rc); + GOTO(failed, rc); + } } } @@ -2432,8 +2474,6 @@ int kgnilnd_base_startup(void) } } - - /* flag everything initialised */ kgnilnd_data.kgn_init = GNILND_INIT_ALL; /*****************************************************/ @@ -2450,7 +2490,7 @@ failed: void kgnilnd_base_shutdown(void) { - int i; + int i, j; ENTRY; while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_SHUTDOWN, 1)) {}; @@ -2460,10 +2500,30 @@ kgnilnd_base_shutdown(void) for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) { kgn_device_t *dev = &kgnilnd_data.kgn_devices[i]; kgnilnd_cancel_wc_dgrams(dev); + kgnilnd_cancel_dgrams(dev); kgnilnd_del_conn_or_peer(NULL, LNET_NID_ANY, GNILND_DEL_PEER, -ESHUTDOWN); kgnilnd_wait_for_canceled_dgrams(dev); } + /* We need to verify there are no conns left before we let the threads + * shut down otherwise we could clean up the peers but still have + * some outstanding conns due to orphaned datagram conns that are + * being cleaned up. + */ + i = 2; + while (atomic_read(&kgnilnd_data.kgn_nconns) != 0) { + i++; + + for(j = 0; j < kgnilnd_data.kgn_ndevs; ++j) { + kgn_device_t *dev = &kgnilnd_data.kgn_devices[j]; + kgnilnd_schedule_device(dev); + } + + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, + "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + } /* Peer state all cleaned up BEFORE setting shutdown, so threads don't * have to worry about shutdown races. NB connections may be created * while there are still active connds, but these will be temporary @@ -2481,7 +2541,8 @@ kgnilnd_base_shutdown(void) i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting for ruhroh thread to terminate\n"); - cfs_pause(cfs_time_seconds(1)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); } /* Flag threads to terminate */ @@ -2491,7 +2552,7 @@ kgnilnd_base_shutdown(void) kgn_device_t *dev = &kgnilnd_data.kgn_devices[i]; /* should clear all the MDDs */ - kgnilnd_unmap_phys_fmablk(dev); + kgnilnd_unmap_fma_blocks(dev); kgnilnd_schedule_device(dev); wake_up_all(&dev->gnd_dgram_waitq); @@ -2503,7 +2564,8 @@ kgnilnd_base_shutdown(void) wake_up_all(&kgnilnd_data.kgn_reaper_waitq); spin_unlock(&kgnilnd_data.kgn_reaper_lock); - kgnilnd_wakeup_rca_thread(); + if (atomic_read(&kgnilnd_data.kgn_nthreads)) + kgnilnd_wakeup_rca_thread(); /* Wait for threads to exit */ i = 2; @@ -2512,7 +2574,8 @@ kgnilnd_base_shutdown(void) CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "Waiting for %d threads to terminate\n", atomic_read(&kgnilnd_data.kgn_nthreads)); - cfs_pause(cfs_time_seconds(1)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); } LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0, @@ -2569,30 +2632,20 @@ kgnilnd_base_shutdown(void) kgnilnd_free_phys_fmablk(dev); } - if (kgnilnd_data.kgn_mbox_cache != NULL) { - i = cfs_mem_cache_destroy(kgnilnd_data.kgn_mbox_cache); - LASSERTF(i == 0, "rc %d destroying kgn_mbox_cache\n", i); - } + if (kgnilnd_data.kgn_mbox_cache != NULL) + kmem_cache_destroy(kgnilnd_data.kgn_mbox_cache); - if (kgnilnd_data.kgn_rx_cache != NULL) { - i = cfs_mem_cache_destroy(kgnilnd_data.kgn_rx_cache); - LASSERTF(i == 0, "rc %d destroying kgn_rx_cache\n", i); - } + if (kgnilnd_data.kgn_rx_cache != NULL) + kmem_cache_destroy(kgnilnd_data.kgn_rx_cache); - if (kgnilnd_data.kgn_tx_cache != NULL) { - i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_cache); - LASSERTF(i == 0, "rc %d destroying kgn_tx_cache\n", i); - } + if (kgnilnd_data.kgn_tx_cache != NULL) + kmem_cache_destroy(kgnilnd_data.kgn_tx_cache); - if (kgnilnd_data.kgn_tx_phys_cache != NULL) { - i = cfs_mem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache); - LASSERTF(i == 0, "rc %d destroying kgn_tx_phys_cache\n", i); - } + if (kgnilnd_data.kgn_tx_phys_cache != NULL) + kmem_cache_destroy(kgnilnd_data.kgn_tx_phys_cache); - if (kgnilnd_data.kgn_dgram_cache != NULL) { - i = cfs_mem_cache_destroy(kgnilnd_data.kgn_dgram_cache); - LASSERTF(i == 0, "rc %d destroying kgn_dgram_cache\n", i); - } + if (kgnilnd_data.kgn_dgram_cache != NULL) + kmem_cache_destroy(kgnilnd_data.kgn_dgram_cache); if (kgnilnd_data.kgn_cksum_map_pages != NULL) { for (i = 0; i < kgnilnd_data.kgn_cksum_npages; i++) { @@ -2607,7 +2660,7 @@ kgnilnd_base_shutdown(void) atomic_read(&libcfs_kmemory)); kgnilnd_data.kgn_init = GNILND_INIT_NOTHING; - PORTAL_MODULE_UNUSE; + module_put(THIS_MODULE); EXIT; } @@ -2630,14 +2683,13 @@ kgnilnd_startup(lnet_ni_t *ni) } /* Serialize with shutdown. */ - down(&kgnilnd_data.kgn_quiesce_sem); + mutex_lock(&kgnilnd_data.kgn_quiesce_mutex); LIBCFS_ALLOC(net, sizeof(*net)); if (net == NULL) { CERROR("could not allocate net for new interface instance\n"); - rc = -ENOMEM; /* no need to cleanup the CDM... */ - GOTO(failed, rc); + GOTO(failed, rc = -ENOMEM); } INIT_LIST_HEAD(&net->gnn_list); ni->ni_data = net; @@ -2661,8 +2713,7 @@ kgnilnd_startup(lnet_ni_t *ni) timeout); ni->ni_data = NULL; LIBCFS_FREE(net, sizeof(*net)); - rc = -EINVAL; - GOTO(failed, rc); + GOTO(failed, rc = -EINVAL); } else ni->ni_peertimeout = timeout; @@ -2699,10 +2750,10 @@ kgnilnd_startup(lnet_ni_t *ni) /* we need a separate thread to call probe_wait_by_id until * we get a function callback notifier from kgni */ - up(&kgnilnd_data.kgn_quiesce_sem); + mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex); RETURN(0); failed: - up(&kgnilnd_data.kgn_quiesce_sem); + mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex); kgnilnd_shutdown(ni); RETURN(rc); } @@ -2721,14 +2772,13 @@ kgnilnd_shutdown(lnet_ni_t *ni) "init %d\n", kgnilnd_data.kgn_init); /* Serialize with startup. */ - down(&kgnilnd_data.kgn_quiesce_sem); + mutex_lock(&kgnilnd_data.kgn_quiesce_mutex); CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); if (net == NULL) { CERROR("got NULL net for ni %p\n", ni); - rc = -EINVAL; - GOTO(out, rc); + GOTO(out, rc = -EINVAL); } LASSERTF(ni == net->gnn_ni, @@ -2771,7 +2821,8 @@ kgnilnd_shutdown(lnet_ni_t *ni) "Waiting for %d references to clear on net %d\n", atomic_read(&net->gnn_refcount), net->gnn_netnum); - cfs_pause(cfs_time_seconds(1)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); } /* release ref from kgnilnd_startup */ @@ -2805,9 +2856,8 @@ out: CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); - up(&kgnilnd_data.kgn_quiesce_sem); + mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex); EXIT; - return; } void __exit