/*
* Copyright (C) 2012 Cray, Inc.
*
- * Author: Igor Gorodetsky <iogordet@cray.com>
* Author: Nic Henke <nic@cray.com>
* Author: James Shimek <jshimek@cray.com>
*
gni_smsg_attr_t smsg_attr;
unsigned long fmablk_vers;
- /* we'll use fmablk_vers and the gnd_fmablk_sem to gate access
+ /* we'll use fmablk_vers and the gnd_fmablk_mutex to gate access
* to this allocation code. Everyone will sample the version
- * before and after getting the semaphore. If it has changed,
+ * before and after getting the mutex. If it has changed,
* we'll bail out to check the lists again - this indicates that
* some sort of change was made to the lists and it is possible
* that there is a mailbox for us to find now. This should prevent
* that need a yet-to-be-allocated mailbox for a connection. */
fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
- down(&device->gnd_fmablk_sem);
+ mutex_lock(&device->gnd_fmablk_mutex);
if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
/* version changed while we were waiting for semaphore,
* we'll recheck the lists assuming something nice happened */
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return 0;
}
* as reallocating them is tough if there is memory fragmentation */
if (use_phys) {
- fma_blk->gnm_block = cfs_mem_cache_alloc(kgnilnd_data.kgn_mbox_cache, CFS_ALLOC_ATOMIC);
+ fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
if (fma_blk->gnm_block == NULL) {
CNETERR("could not allocate physical SMSG mailbox memory\n");
rc = -ENOMEM;
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return 0;
if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
} else {
- cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+ kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
}
free_desc:
LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
out:
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
return rc;
}
/* PHYS blocks don't get mapped */
if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
atomic64_sub(fma_blk->gnm_blk_size, &dev->gnd_nbytes_map);
+ fma_blk->gnm_state = GNILND_FMABLK_IDLE;
} else if (kgnilnd_data.kgn_in_reset) {
/* in stack reset, clear MDD handle for PHYS blocks, as we'll
* re-use the fma_blk after reset so we don't have to drop/allocate
fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
- cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+ kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
} else {
LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
}
mbox = &fma_blk->gnm_mbox_info[id];
mbox->mbx_create_conn_memset = jiffies;
+ mbox->mbx_nallocs++;
+ mbox->mbx_nallocs_total++;
/* zero mbox to remove any old data from our last use.
* this better be safe, if not our purgatory timers
"conn %p bit %d already cleared in fma_blk %p\n",
conn, id, fma_blk);
conn->gnc_fma_blk = NULL;
+ mbox->mbx_nallocs--;
}
if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FMABLK_AVAIL)) {
int rc = 0;
kgn_fma_memblock_t *fma_blk;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
- if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
+ if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
rc = kgnilnd_map_fmablk(device, fma_blk);
if (rc)
break;
+ }
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
RETURN(rc);
}
void
-kgnilnd_unmap_phys_fmablk(kgn_device_t *device)
+kgnilnd_unmap_fma_blocks(kgn_device_t *device)
{
kgn_fma_memblock_t *fma_blk;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
- if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
- kgnilnd_unmap_fmablk(device, fma_blk);
+ kgnilnd_unmap_fmablk(device, fma_blk);
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
}
void
kgn_fma_memblock_t *fma_blk, *fma_blkN;
- /* use sem to gate access to single thread, just in case */
- down(&device->gnd_fmablk_sem);
+ /* use mutex to gate access to single thread, just in case */
+ mutex_lock(&device->gnd_fmablk_mutex);
spin_lock(&device->gnd_fmablk_lock);
}
spin_unlock(&device->gnd_fmablk_lock);
- up(&device->gnd_fmablk_sem);
+ mutex_unlock(&device->gnd_fmablk_mutex);
}
/* kgnilnd dgram nid->struct managment */
{
kgn_dgram_t *dgram;
- dgram = cfs_mem_cache_alloc(kgnilnd_data.kgn_dgram_cache,
- CFS_ALLOC_ATOMIC);
+ dgram = kmem_cache_alloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
if (dgram == NULL)
return -ENOMEM;
atomic_inc(&dev->gnd_ndgrams);
- CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p.\n",
- sizeof(*dgram), dgram);
+ CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p %s ndgrams"
+ " %d\n",
+ sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+ atomic_read(&dev->gnd_ndgrams));
*dgramp = dgram;
return 0;
dgram->gndg_magic = 0x6f5a6b5f;
atomic_dec(&dev->gnd_ndgrams);
- cfs_mem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
- CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p.\n",
- sizeof(*dgram), dgram);
+ kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
+ CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p %s"
+ " ndgrams %d\n",
+ sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+ atomic_read(&dev->gnd_ndgrams));
}
int
RETURN(rc);
}
+/* The shutdown flag is set from the shutdown and stack reset threads. */
void
-kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
+kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram, int shutdown)
{
+ /* The conns of canceled active dgrams need to be put in purgatory so
+ * we don't reuse the mailbox */
+ if (unlikely(dgram->gndg_state == GNILND_DGRAM_CANCELED)) {
+ kgn_peer_t *peer;
+ kgn_conn_t *conn = dgram->gndg_conn;
+ lnet_nid_t nid = dgram->gndg_conn_out.gncr_dstnid;
+
+ dgram->gndg_state = GNILND_DGRAM_DONE;
+
+ /* During shutdown we've already removed the peer so we don't
+ * need to add a peer. During stack reset we don't care about
+ * MDDs since they are all released. */
+ if (!shutdown) {
+ write_lock(&kgnilnd_data.kgn_peer_conn_lock);
+ peer = kgnilnd_find_peer_locked(nid);
+
+ if (peer != NULL) {
+ CDEBUG(D_NET, "adding peer's conn with nid %s "
+ "to purgatory\n", libcfs_nid2str(nid));
+ kgnilnd_conn_addref(conn);
+ conn->gnc_peer = peer;
+ kgnilnd_peer_addref(peer);
+ kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
+ conn->gnc_state = GNILND_CONN_CLOSED;
+ list_add_tail(&conn->gnc_list,
+ &peer->gnp_conns);
+ kgnilnd_add_purgatory_locked(conn,
+ conn->gnc_peer);
+ kgnilnd_schedule_conn(conn);
+ }
+ write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+ }
+ }
+
spin_lock(&dev->gnd_dgram_lock);
kgnilnd_cancel_dgram_locked(dgram);
spin_unlock(&dev->gnd_dgram_lock);
int rerc;
rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
- LASSERTF(rerc == 0,
- "error %d: dev %d could not repost wildcard datagram id 0x%p\n",
- rerc, dev->gnd_id, dgram);
+ if (rerc != 0) {
+ /* We failed to repost the WC dgram for some reason
+ * mark it so the repost system attempts to repost */
+ kgnilnd_admin_addref(dev->gnd_nwcdgrams);
+ }
}
/* always free the old dgram */
dgram, kgnilnd_dgram_state2str(dgram));
LASSERTF(!list_empty(&dgram->gndg_list),
- "dgram 0x%p with bad list state %s\n",
- dgram, kgnilnd_dgram_state2str(dgram));
+ "dgram 0x%p with bad list state %s type %s\n",
+ dgram, kgnilnd_dgram_state2str(dgram),
+ kgnilnd_dgram_type2str(dgram));
/* now we know that the datagram structure is ok, so pull off list */
list_del_init(&dgram->gndg_list);
dgram->gndg_state = GNILND_DGRAM_PROCESSING;
}
- spin_unlock(&dev->gnd_dgram_lock);
-
- /* we now "own" this datagram */
-
LASSERTF(dgram->gndg_conn != NULL,
"dgram 0x%p with NULL conn\n", dgram);
(__u64)dgram, &post_state,
&remote_addr, &remote_id);
+ /* we now "own" this datagram */
+ spin_unlock(&dev->gnd_dgram_lock);
+
LASSERTF(grc != GNI_RC_NO_MATCH, "kgni lied! probe_by_id told us that"
" id "LPU64" was ready\n", readyid);
/* fake rc to mark that we've done something */
rc = 1;
} else {
- /* bring out your dead! */
- dgram->gndg_state = GNILND_DGRAM_DONE;
+ /* let kgnilnd_release_dgram take care of canceled dgrams */
+ if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
+ dgram->gndg_state = GNILND_DGRAM_DONE;
+ }
}
*dgramp = dgram;
probe_for_out:
- kgnilnd_release_dgram(dev, dgram);
+ kgnilnd_release_dgram(dev, dgram, 0);
RETURN(rc);
}
list_for_each_entry_safe(dg, dgN, &zombies, gndg_list) {
list_del_init(&dg->gndg_list);
- kgnilnd_release_dgram(dev, dg);
+ kgnilnd_release_dgram(dev, dg, 1);
}
RETURN(0);
}
+int
+kgnilnd_cancel_dgrams(kgn_device_t *dev)
+{
+ kgn_dgram_t *dg, *dgN;
+ int i;
+ ENTRY;
+
+ /* Cancel any outstanding non wildcard datagrams regardless
+ * of which net they are on as we are in base shutdown and
+ * dont care about connecting anymore.
+ */
+
+ LASSERTF(kgnilnd_data.kgn_wc_kill == 1,"We didnt get called from base shutdown\n");
+
+ spin_lock(&dev->gnd_dgram_lock);
+
+ for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size -1); i++) {
+ list_for_each_entry_safe(dg, dgN, &dev->gnd_dgrams[i], gndg_list) {
+ if (dg->gndg_type != GNILND_DGRAM_WC_REQ)
+ kgnilnd_cancel_dgram_locked(dg);
+ }
+ }
+
+ spin_unlock(&dev->gnd_dgram_lock);
+
+ RETURN(0);
+}
+
+
void
kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
{
rc = kgnilnd_probe_for_dgram(dev, &dgram);
if (rc != 0) {
/* if we got a valid dgram or one that is now done, clean up */
- kgnilnd_release_dgram(dev, dgram);
+ kgnilnd_release_dgram(dev, dgram, 1);
}
} while (atomic_read(&dev->gnd_canceled_dgrams));
}
/* assume this is a new peer - it makes locking cleaner when it isn't */
/* no holding kgn_net_rw_sem - already are at the kgnilnd_dgram_mover level */
- rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL);
+ rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL, GNILND_RCA_NODE_UP);
if (rc != 0) {
CERROR("Can't create peer for %s\n", libcfs_nid2str(her_nid));
return rc;
}
}
+ if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+ CNETERR("Received connection request from %s that RCA thinks is"
+ " down.\n", libcfs_nid2str(her_nid));
+ peer->gnp_down = GNILND_RCA_NODE_UP;
+ }
+
nstale = kgnilnd_close_stale_conns_locked(peer, conn);
/* either way with peer (new or existing), we are ok with ref counts here as the
conn->gnc_last_tx = jiffies - (cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout)) * 2);
conn->gnc_state = GNILND_CONN_ESTABLISHED;
+ /* save the dgram type used to establish this connection */
+ conn->gnc_dgram_type = dgram->gndg_type;
+
/* refs are not transferred from dgram to tables, so increment to
* take ownership */
kgnilnd_conn_addref(conn);
lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid,
1, cfs_time_current());
- /* schedule the conn to pick up any SMSG sent by peer before we could
- * process this dgram */
- kgnilnd_schedule_conn(conn);
-
/* drop our 'hold' ref */
kgnilnd_conn_decref(conn);
libcfs_nid2str(connreq->gncr_srcnid),
libcfs_nid2str(connreq->gncr_dstnid), errno, rc);
} else {
- rc = 0;
spin_lock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
if (list_empty(&peer->gnp_connd_list)) {
/* success! we found a peer and at least marked pending_nak */
write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
- return 0;
+ return rc;
}
int
orig_dstnid = dgram->gndg_conn_out.gncr_dstnid;
- kgnilnd_release_dgram(dev, dgram);
+ kgnilnd_release_dgram(dev, dgram, 0);
CDEBUG(D_NET, "cleaning up dgram to %s, rc %d\n",
libcfs_nid2str(orig_dstnid), rc);
DEFINE_WAIT(mover_done);
snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
- cfs_daemonize(name);
cfs_block_allsigs();
/* all gnilnd threads need to run fairly urgently */
}
int
-kgnilnd_start_outbound_dgrams(kgn_device_t *dev)
+kgnilnd_start_outbound_dgrams(kgn_device_t *dev, unsigned long deadline)
{
int did_something = 0, rc;
kgn_peer_t *peer = NULL;
spin_lock(&dev->gnd_connd_lock);
/* Active connect - we added this in kgnilnd_launch_tx */
- while (!list_empty(&dev->gnd_connd_peers)) {
+ while (!list_empty(&dev->gnd_connd_peers) && time_before(jiffies, deadline)) {
peer = list_first_entry(&dev->gnd_connd_peers,
kgn_peer_t, gnp_connd_list);
RETURN(did_something);
}
+int
+kgnilnd_repost_wc_dgrams(kgn_device_t *dev)
+{
+ int did_something = 0, to_repost, i;
+ to_repost = atomic_read(&dev->gnd_nwcdgrams);
+ ENTRY;
+
+ for (i = 0; i < to_repost; ++i) {
+ int rerc;
+ rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
+ if (rerc == 0) {
+ kgnilnd_admin_decref(dev->gnd_nwcdgrams);
+ did_something += 1;
+ } else {
+ CDEBUG(D_NETERROR, "error %d: dev %d could not post wildcard datagram\n",
+ rerc, dev->gnd_id);
+ break;
+ }
+ }
+
+ RETURN(did_something);
+}
+
static void
kgnilnd_dgram_poke_with_stick(unsigned long arg)
{
unsigned long next_purge_check = jiffies - 1;
unsigned long timeout;
struct timer_list timer;
+ unsigned long deadline = 0;
DEFINE_WAIT(wait);
snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
- cfs_daemonize(name);
cfs_block_allsigs();
/* all gnilnd threads need to run fairly urgently */
set_user_nice(current, *kgnilnd_tunables.kgn_nice);
/* we are ok not locking for these variables as the dgram waitq threads
* will block both due to tying up net (kgn_shutdown) and the completion
* event for the dgram_waitq (kgn_quiesce_trigger) */
-
+ deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
while (!kgnilnd_data.kgn_shutdown) {
/* Safe: kgn_shutdown only set when quiescent */
up_read(&kgnilnd_data.kgn_net_rw_sem);
+ CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_DGRAM_DEADLINE,
+ (*kgnilnd_tunables.kgn_dgram_timeout + 1));
/* start new outbound dgrams */
- did_something += kgnilnd_start_outbound_dgrams(dev);
+ did_something += kgnilnd_start_outbound_dgrams(dev, deadline);
/* find dead dgrams */
if (time_after_eq(jiffies, next_purge_check)) {
cfs_time_seconds(kgnilnd_data.kgn_new_min_timeout / 4);
}
+ did_something += kgnilnd_repost_wc_dgrams(dev);
+
/* careful with the jiffy wrap... */
timeout = (long)(next_purge_check - jiffies);
CDEBUG(D_INFO, "did %d timeout %lu next %lu jiffies %lu\n",
did_something, timeout, next_purge_check, jiffies);
- if (did_something || timeout <= 0) {
+ if ((did_something || timeout <= 0) && time_before(jiffies, deadline)) {
did_something = 0;
continue;
}
/* last second chance for others to poke us */
did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
- /* check flag variables before comitting */
- if (!did_something &&
+ /* check flag variables before comittingi even if we did something;
+ * if we are after the deadline call schedule */
+ if ((!did_something || time_after(jiffies, deadline)) &&
!kgnilnd_data.kgn_shutdown &&
!kgnilnd_data.kgn_quiesce_trigger) {
CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
wake_up_all(&dev->gnd_dgping_waitq);
schedule();
CDEBUG(D_INFO, "awake after schedule\n");
+ deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
}
del_singleshot_timer_sync(&timer);
kgnilnd_thread_fini();
return 0;
}
-