Whamcloud - gitweb
LU-4588 code: replace semaphores with mutexes
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_conn.c
index 38aee5b..4cf2dd2 100644 (file)
@@ -1,7 +1,6 @@
 /*
  * Copyright (C) 2012 Cray, Inc.
  *
- *   Author: Igor Gorodetsky <iogordet@cray.com>
  *   Author: Nic Henke <nic@cray.com>
  *   Author: James Shimek <jshimek@cray.com>
  *
@@ -80,9 +79,9 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
        gni_smsg_attr_t         smsg_attr;
        unsigned long           fmablk_vers;
 
-       /* we'll use fmablk_vers and the gnd_fmablk_sem to gate access
+       /* we'll use fmablk_vers and the gnd_fmablk_mutex to gate access
         * to this allocation code. Everyone will sample the version
-        * before and after getting the semaphore. If it has changed,
+        * before and after getting the mutex. If it has changed,
         * we'll bail out to check the lists again - this indicates that
         * some sort of change was made to the lists and it is possible
         * that there is a mailbox for us to find now. This should prevent
@@ -90,12 +89,12 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
         * that need a yet-to-be-allocated mailbox for a connection. */
 
        fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
-       down(&device->gnd_fmablk_sem);
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
                /* version changed while we were waiting for semaphore,
                 * we'll recheck the lists assuming something nice happened */
-               up(&device->gnd_fmablk_sem);
+               mutex_unlock(&device->gnd_fmablk_mutex);
                return 0;
        }
 
@@ -126,7 +125,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
         * as reallocating them is tough if there is memory fragmentation */
 
        if (use_phys) {
-               fma_blk->gnm_block = cfs_mem_cache_alloc(kgnilnd_data.kgn_mbox_cache, CFS_ALLOC_ATOMIC);
+               fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
                if (fma_blk->gnm_block == NULL) {
                        CNETERR("could not allocate physical SMSG mailbox memory\n");
                        rc = -ENOMEM;
@@ -204,7 +203,7 @@ kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
 
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 
        return 0;
 
@@ -216,12 +215,12 @@ free_blk:
        if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
                LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        } else {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        }
 free_desc:
        LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
 out:
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
        return rc;
 }
 
@@ -263,6 +262,7 @@ kgnilnd_unmap_fmablk(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
        /* PHYS blocks don't get mapped */
        if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
                atomic64_sub(fma_blk->gnm_blk_size, &dev->gnd_nbytes_map);
+               fma_blk->gnm_state = GNILND_FMABLK_IDLE;
        } else if (kgnilnd_data.kgn_in_reset) {
                /* in stack reset, clear MDD handle for PHYS blocks, as we'll
                 * re-use the fma_blk after reset so we don't have to drop/allocate
@@ -310,7 +310,7 @@ kgnilnd_free_fmablk_locked(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
                fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
 
        if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
-               cfs_mem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
+               kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
        } else {
                LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
        }
@@ -388,6 +388,8 @@ kgnilnd_find_free_mbox(kgn_conn_t *conn)
 
                mbox = &fma_blk->gnm_mbox_info[id];
                mbox->mbx_create_conn_memset = jiffies;
+               mbox->mbx_nallocs++;
+               mbox->mbx_nallocs_total++;
 
                /* zero mbox to remove any old data from our last use.
                 * this better be safe, if not our purgatory timers
@@ -508,6 +510,7 @@ kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold)
                        "conn %p bit %d already cleared in fma_blk %p\n",
                         conn, id, fma_blk);
                conn->gnc_fma_blk = NULL;
+               mbox->mbx_nallocs--;
        }
 
        if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FMABLK_AVAIL)) {
@@ -581,42 +584,42 @@ kgnilnd_map_phys_fmablk(kgn_device_t *device)
        int                     rc = 0;
        kgn_fma_memblock_t     *fma_blk;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
        list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
-               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
+               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
                        rc = kgnilnd_map_fmablk(device, fma_blk);
                        if (rc)
                                break;
+               }
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 
        RETURN(rc);
 }
 
 void
-kgnilnd_unmap_phys_fmablk(kgn_device_t *device)
+kgnilnd_unmap_fma_blocks(kgn_device_t *device)
 {
 
        kgn_fma_memblock_t      *fma_blk;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
        list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
-               if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
-                       kgnilnd_unmap_fmablk(device, fma_blk);
+               kgnilnd_unmap_fmablk(device, fma_blk);
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 }
 
 void
@@ -625,8 +628,8 @@ kgnilnd_free_phys_fmablk(kgn_device_t *device)
 
        kgn_fma_memblock_t      *fma_blk, *fma_blkN;
 
-       /* use sem to gate access to single thread, just in case */
-       down(&device->gnd_fmablk_sem);
+       /* use mutex to gate access to single thread, just in case */
+       mutex_lock(&device->gnd_fmablk_mutex);
 
        spin_lock(&device->gnd_fmablk_lock);
 
@@ -636,7 +639,7 @@ kgnilnd_free_phys_fmablk(kgn_device_t *device)
        }
        spin_unlock(&device->gnd_fmablk_lock);
 
-       up(&device->gnd_fmablk_sem);
+       mutex_unlock(&device->gnd_fmablk_mutex);
 }
 
 /* kgnilnd dgram nid->struct managment */
@@ -922,8 +925,7 @@ kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t ty
 {
        kgn_dgram_t         *dgram;
 
-       dgram = cfs_mem_cache_alloc(kgnilnd_data.kgn_dgram_cache,
-                                   CFS_ALLOC_ATOMIC);
+       dgram = kmem_cache_alloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
        if (dgram == NULL)
                return -ENOMEM;
 
@@ -937,8 +939,10 @@ kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t ty
 
        atomic_inc(&dev->gnd_ndgrams);
 
-       CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p.\n",
-              sizeof(*dgram), dgram);
+       CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p %s ndgrams"
+               " %d\n",
+               sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+               atomic_read(&dev->gnd_ndgrams));
 
        *dgramp = dgram;
        return 0;
@@ -1149,9 +1153,11 @@ kgnilnd_free_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
        dgram->gndg_magic = 0x6f5a6b5f;
        atomic_dec(&dev->gnd_ndgrams);
 
-       cfs_mem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
-       CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p.\n",
-              sizeof(*dgram), dgram);
+       kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
+       CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p %s"
+              " ndgrams %d\n",
+              sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
+              atomic_read(&dev->gnd_ndgrams));
 }
 
 int
@@ -1302,9 +1308,44 @@ post_failed:
        RETURN(rc);
 }
 
+/* The shutdown flag is set from the shutdown and stack reset threads. */
 void
-kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
+kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram, int shutdown)
 {
+       /* The conns of canceled active dgrams need to be put in purgatory so
+        * we don't reuse the mailbox */
+       if (unlikely(dgram->gndg_state == GNILND_DGRAM_CANCELED)) {
+               kgn_peer_t *peer;
+               kgn_conn_t *conn = dgram->gndg_conn;
+               lnet_nid_t nid = dgram->gndg_conn_out.gncr_dstnid;
+
+               dgram->gndg_state = GNILND_DGRAM_DONE;
+
+               /* During shutdown we've already removed the peer so we don't
+                * need to add a peer. During stack reset we don't care about
+                * MDDs since they are all released. */
+               if (!shutdown) {
+                       write_lock(&kgnilnd_data.kgn_peer_conn_lock);
+                       peer = kgnilnd_find_peer_locked(nid);
+
+                       if (peer != NULL) {
+                               CDEBUG(D_NET, "adding peer's conn with nid %s "
+                                       "to purgatory\n", libcfs_nid2str(nid));
+                               kgnilnd_conn_addref(conn);
+                               conn->gnc_peer = peer;
+                               kgnilnd_peer_addref(peer);
+                               kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
+                               conn->gnc_state = GNILND_CONN_CLOSED;
+                               list_add_tail(&conn->gnc_list,
+                                             &peer->gnp_conns);
+                               kgnilnd_add_purgatory_locked(conn,
+                                                            conn->gnc_peer);
+                               kgnilnd_schedule_conn(conn);
+                       }
+                       write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
+               }
+       }
+
        spin_lock(&dev->gnd_dgram_lock);
        kgnilnd_cancel_dgram_locked(dgram);
        spin_unlock(&dev->gnd_dgram_lock);
@@ -1326,9 +1367,11 @@ kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
                        int     rerc;
 
                        rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
-                       LASSERTF(rerc == 0,
-                               "error %d: dev %d could not repost wildcard datagram id 0x%p\n",
-                               rerc, dev->gnd_id, dgram);
+                       if (rerc != 0) {
+                               /* We failed to repost the WC dgram for some reason
+                                * mark it so the repost system attempts to repost */
+                               kgnilnd_admin_addref(dev->gnd_nwcdgrams);
+                       }
                }
 
                /* always free the old dgram */
@@ -1376,8 +1419,9 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                 dgram, kgnilnd_dgram_state2str(dgram));
 
        LASSERTF(!list_empty(&dgram->gndg_list),
-                "dgram 0x%p with bad list state %s\n",
-                dgram, kgnilnd_dgram_state2str(dgram));
+                "dgram 0x%p with bad list state %s type %s\n",
+                dgram, kgnilnd_dgram_state2str(dgram),
+                kgnilnd_dgram_type2str(dgram));
 
        /* now we know that the datagram structure is ok, so pull off list */
        list_del_init(&dgram->gndg_list);
@@ -1389,10 +1433,6 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                dgram->gndg_state = GNILND_DGRAM_PROCESSING;
        }
 
-       spin_unlock(&dev->gnd_dgram_lock);
-
-       /* we now "own" this datagram */
-
        LASSERTF(dgram->gndg_conn != NULL,
                "dgram 0x%p with NULL conn\n", dgram);
 
@@ -1400,6 +1440,9 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                                             (__u64)dgram, &post_state,
                                             &remote_addr, &remote_id);
 
+       /* we now "own" this datagram */
+       spin_unlock(&dev->gnd_dgram_lock);
+
        LASSERTF(grc != GNI_RC_NO_MATCH, "kgni lied! probe_by_id told us that"
                 " id "LPU64" was ready\n", readyid);
 
@@ -1429,8 +1472,10 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
                /* fake rc to mark that we've done something */
                rc = 1;
        } else {
-               /* bring out your dead! */
-               dgram->gndg_state = GNILND_DGRAM_DONE;
+               /* let kgnilnd_release_dgram take care of canceled dgrams */
+               if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
+                       dgram->gndg_state = GNILND_DGRAM_DONE;
+               }
        }
 
        *dgramp = dgram;
@@ -1438,7 +1483,7 @@ kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
 
 probe_for_out:
 
-       kgnilnd_release_dgram(dev, dgram);
+       kgnilnd_release_dgram(dev, dgram, 0);
        RETURN(rc);
 }
 
@@ -1545,12 +1590,41 @@ kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
 
        list_for_each_entry_safe(dg, dgN, &zombies, gndg_list) {
                list_del_init(&dg->gndg_list);
-               kgnilnd_release_dgram(dev, dg);
+               kgnilnd_release_dgram(dev, dg, 1);
        }
        RETURN(0);
 
 }
 
+int
+kgnilnd_cancel_dgrams(kgn_device_t *dev)
+{
+       kgn_dgram_t *dg, *dgN;
+       int i;
+       ENTRY;
+
+       /* Cancel any outstanding non wildcard datagrams regardless
+        * of which net they are on as we are in base shutdown and
+        * dont care about connecting anymore.
+        */
+
+       LASSERTF(kgnilnd_data.kgn_wc_kill == 1,"We didnt get called from base shutdown\n");
+
+       spin_lock(&dev->gnd_dgram_lock);
+
+       for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size -1); i++) {
+               list_for_each_entry_safe(dg, dgN, &dev->gnd_dgrams[i], gndg_list) {
+                       if (dg->gndg_type != GNILND_DGRAM_WC_REQ)
+                               kgnilnd_cancel_dgram_locked(dg);
+               }
+       }
+
+       spin_unlock(&dev->gnd_dgram_lock);
+
+       RETURN(0);
+}
+
+
 void
 kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
 {
@@ -1592,7 +1666,7 @@ kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
                rc = kgnilnd_probe_for_dgram(dev, &dgram);
                if (rc != 0) {
                        /* if we got a valid dgram or one that is now done, clean up */
-                       kgnilnd_release_dgram(dev, dgram);
+                       kgnilnd_release_dgram(dev, dgram, 1);
                }
        } while (atomic_read(&dev->gnd_canceled_dgrams));
 }
@@ -1685,7 +1759,7 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
        /* assume this is a new peer  - it makes locking cleaner when it isn't */
        /* no holding kgn_net_rw_sem - already are at the kgnilnd_dgram_mover level */
 
-       rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL);
+       rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL, GNILND_RCA_NODE_UP);
        if (rc != 0) {
                CERROR("Can't create peer for %s\n", libcfs_nid2str(her_nid));
                return rc;
@@ -1740,6 +1814,12 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
                }
        }
 
+       if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
+               CNETERR("Received connection request from %s that RCA thinks is"
+                       " down.\n", libcfs_nid2str(her_nid));
+               peer->gnp_down = GNILND_RCA_NODE_UP;
+       }
+
        nstale = kgnilnd_close_stale_conns_locked(peer, conn);
 
        /* either way with peer (new or existing), we are ok with ref counts here as the
@@ -1761,6 +1841,9 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
        conn->gnc_last_tx = jiffies - (cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout)) * 2);
        conn->gnc_state = GNILND_CONN_ESTABLISHED;
 
+       /* save the dgram type used to establish this connection */
+       conn->gnc_dgram_type = dgram->gndg_type;
+
        /* refs are not transferred from dgram to tables, so increment to
         * take ownership */
        kgnilnd_conn_addref(conn);
@@ -1838,10 +1921,6 @@ kgnilnd_finish_connect(kgn_dgram_t *dgram)
        lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid,
                     1, cfs_time_current());
 
-       /* schedule the conn to pick up any SMSG sent by peer before we could
-        * process this dgram */
-       kgnilnd_schedule_conn(conn);
-
        /* drop our 'hold' ref */
        kgnilnd_conn_decref(conn);
 
@@ -1917,7 +1996,6 @@ kgnilnd_process_nak(kgn_dgram_t *dgram)
                        libcfs_nid2str(connreq->gncr_srcnid),
                        libcfs_nid2str(connreq->gncr_dstnid), errno, rc);
        } else {
-               rc = 0;
                spin_lock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
 
                if (list_empty(&peer->gnp_connd_list)) {
@@ -1948,7 +2026,7 @@ kgnilnd_process_nak(kgn_dgram_t *dgram)
        /* success! we found a peer and at least marked pending_nak */
        write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
 
-       return 0;
+       return rc;
 }
 
 int
@@ -2046,7 +2124,7 @@ inform_peer:
 
        orig_dstnid = dgram->gndg_conn_out.gncr_dstnid;
 
-       kgnilnd_release_dgram(dev, dgram);
+       kgnilnd_release_dgram(dev, dgram, 0);
 
        CDEBUG(D_NET, "cleaning up dgram to %s, rc %d\n",
               libcfs_nid2str(orig_dstnid), rc);
@@ -2163,7 +2241,6 @@ kgnilnd_dgram_waitq(void *arg)
        DEFINE_WAIT(mover_done);
 
        snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
 
        /* all gnilnd threads need to run fairly urgently */
@@ -2203,7 +2280,7 @@ kgnilnd_dgram_waitq(void *arg)
 }
 
 int
-kgnilnd_start_outbound_dgrams(kgn_device_t *dev)
+kgnilnd_start_outbound_dgrams(kgn_device_t *dev, unsigned long deadline)
 {
        int                      did_something = 0, rc;
        kgn_peer_t              *peer = NULL;
@@ -2211,7 +2288,7 @@ kgnilnd_start_outbound_dgrams(kgn_device_t *dev)
        spin_lock(&dev->gnd_connd_lock);
 
        /* Active connect - we added this in kgnilnd_launch_tx */
-       while (!list_empty(&dev->gnd_connd_peers)) {
+       while (!list_empty(&dev->gnd_connd_peers) && time_before(jiffies, deadline)) {
                peer = list_first_entry(&dev->gnd_connd_peers,
                                        kgn_peer_t, gnp_connd_list);
 
@@ -2298,6 +2375,29 @@ kgnilnd_start_outbound_dgrams(kgn_device_t *dev)
        RETURN(did_something);
 }
 
+int
+kgnilnd_repost_wc_dgrams(kgn_device_t *dev)
+{
+       int did_something = 0, to_repost, i;
+       to_repost = atomic_read(&dev->gnd_nwcdgrams);
+       ENTRY;
+
+       for (i = 0; i < to_repost; ++i) {
+               int     rerc;
+               rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
+               if (rerc == 0) {
+                       kgnilnd_admin_decref(dev->gnd_nwcdgrams);
+                       did_something += 1;
+               } else {
+                       CDEBUG(D_NETERROR, "error %d: dev %d could not post wildcard datagram\n",
+                               rerc, dev->gnd_id);
+                       break;
+               }
+       }
+
+       RETURN(did_something);
+}
+
 static void
 kgnilnd_dgram_poke_with_stick(unsigned long arg)
 {
@@ -2317,10 +2417,10 @@ kgnilnd_dgram_mover(void *arg)
        unsigned long            next_purge_check = jiffies - 1;
        unsigned long            timeout;
        struct timer_list        timer;
+       unsigned long            deadline = 0;
        DEFINE_WAIT(wait);
 
        snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
-       cfs_daemonize(name);
        cfs_block_allsigs();
        /* all gnilnd threads need to run fairly urgently */
        set_user_nice(current, *kgnilnd_tunables.kgn_nice);
@@ -2328,7 +2428,7 @@ kgnilnd_dgram_mover(void *arg)
        /* we are ok not locking for these variables as the dgram waitq threads
         * will block both due to tying up net (kgn_shutdown) and the completion
         * event for the dgram_waitq (kgn_quiesce_trigger) */
-
+       deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
        while (!kgnilnd_data.kgn_shutdown) {
                /* Safe: kgn_shutdown only set when quiescent */
 
@@ -2356,8 +2456,10 @@ kgnilnd_dgram_mover(void *arg)
 
                up_read(&kgnilnd_data.kgn_net_rw_sem);
 
+               CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_DGRAM_DEADLINE,
+                       (*kgnilnd_tunables.kgn_dgram_timeout + 1));
                /* start new outbound dgrams */
-               did_something += kgnilnd_start_outbound_dgrams(dev);
+               did_something += kgnilnd_start_outbound_dgrams(dev, deadline);
 
                /* find dead dgrams */
                if (time_after_eq(jiffies, next_purge_check)) {
@@ -2368,13 +2470,15 @@ kgnilnd_dgram_mover(void *arg)
                                      cfs_time_seconds(kgnilnd_data.kgn_new_min_timeout / 4);
                }
 
+               did_something += kgnilnd_repost_wc_dgrams(dev);
+
                /* careful with the jiffy wrap... */
                timeout = (long)(next_purge_check - jiffies);
 
                CDEBUG(D_INFO, "did %d timeout %lu next %lu jiffies %lu\n",
                       did_something, timeout, next_purge_check, jiffies);
 
-               if (did_something || timeout <= 0) {
+               if ((did_something || timeout <= 0) && time_before(jiffies, deadline)) {
                        did_something = 0;
                        continue;
                }
@@ -2387,8 +2491,9 @@ kgnilnd_dgram_mover(void *arg)
                /* last second chance for others to poke us */
                did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
 
-               /* check flag variables before comitting */
-               if (!did_something &&
+               /* check flag variables before comittingi even if we did something;
+                * if we are after the deadline call schedule */
+               if ((!did_something || time_after(jiffies, deadline)) &&
                    !kgnilnd_data.kgn_shutdown &&
                    !kgnilnd_data.kgn_quiesce_trigger) {
                        CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
@@ -2396,6 +2501,7 @@ kgnilnd_dgram_mover(void *arg)
                        wake_up_all(&dev->gnd_dgping_waitq);
                        schedule();
                        CDEBUG(D_INFO, "awake after schedule\n");
+                       deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
                }
 
                del_singleshot_timer_sync(&timer);
@@ -2405,4 +2511,3 @@ kgnilnd_dgram_mover(void *arg)
        kgnilnd_thread_fini();
        return 0;
 }
-