Whamcloud - gitweb
LU-9019 lnd: remove remaining cfs_time wrappers 42/31042/2
authorJames Simmons <uja.ornl@yahoo.com>
Fri, 26 Jan 2018 20:18:09 +0000 (15:18 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 9 Feb 2018 05:57:35 +0000 (05:57 +0000)
Remove remaining libcfs time wrappers from ko2iblnd. Also fix bug
in ksocklnd to use cfs_time_seconds for calling schedule_timeout
instead of cfs_durations_sec. That was the opposite of the
conversion we needed. the remaining jiffy use it moved to
time64_t.

Change-Id: I5847d7260ac8a9be1b165423adb7b8e9a53998d2
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/31042
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/socklnd/socklnd_cb.c

index fc1ce47..dc653d9 100644 (file)
@@ -578,10 +578,10 @@ kiblnd_debug_rx (kib_rx_t *rx)
 static void
 kiblnd_debug_tx (kib_tx_t *tx)
 {
-        CDEBUG(D_CONSOLE, "      %p snd %d q %d w %d rc %d dl %lx "
+       CDEBUG(D_CONSOLE, "      %p snd %d q %d w %d rc %d dl %lld "
               "cookie %#llx msg %s%s type %x cred %d\n",
                tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
-               tx->tx_status, tx->tx_deadline, tx->tx_cookie,
+              tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie,
                tx->tx_lntmsg[0] == NULL ? "-" : "!",
                tx->tx_lntmsg[1] == NULL ? "-" : "!",
                tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
@@ -1667,8 +1667,8 @@ kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
        if (rc)
                goto out_fpo;
 
-       fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
-       fpo->fpo_owner    = fps;
+       fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+       fpo->fpo_owner = fps;
        *pp_fpo = fpo;
 
        return 0;
@@ -1739,13 +1739,13 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
 }
 
 static int
-kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, time64_t now)
 {
         if (fpo->fpo_map_count != 0) /* still in use */
                 return 0;
         if (fpo->fpo_failed)
                 return 1;
-        return cfs_time_aftereq(now, fpo->fpo_deadline);
+       return now >= fpo->fpo_deadline;
 }
 
 static int
@@ -1774,11 +1774,11 @@ void
 kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
 {
        struct list_head   zombies = LIST_HEAD_INIT(zombies);
-       kib_fmr_pool_t    *fpo = fmr->fmr_pool;
+       kib_fmr_pool_t *fpo = fmr->fmr_pool;
        kib_fmr_poolset_t *fps;
-       cfs_time_t         now = cfs_time_current();
-       kib_fmr_pool_t    *tmp;
-       int                rc;
+       time64_t now = ktime_get_seconds();
+       kib_fmr_pool_t *tmp;
+       int rc;
 
        if (!fpo)
                return;
@@ -1843,7 +1843,7 @@ again:
        spin_lock(&fps->fps_lock);
        version = fps->fps_version;
        list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
-               fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+               fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
                fpo->fpo_map_count++;
 
                if (fpo->fpo_is_fmr) {
@@ -1993,7 +1993,7 @@ again:
 
        }
 
-       if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+       if (ktime_get_seconds() < fps->fps_next_retry) {
                /* someone failed recently */
                spin_unlock(&fps->fps_lock);
                return -EAGAIN;
@@ -2010,7 +2010,7 @@ again:
                fps->fps_version++;
                list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
        } else {
-               fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+               fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
        }
        spin_unlock(&fps->fps_lock);
 
@@ -2033,9 +2033,9 @@ kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
 
        memset(pool, 0, sizeof(kib_pool_t));
        INIT_LIST_HEAD(&pool->po_free_list);
-       pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
-       pool->po_owner    = ps;
-       pool->po_size     = size;
+       pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+       pool->po_owner = ps;
+       pool->po_size = size;
 }
 
 static void
@@ -2118,22 +2118,22 @@ kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
 }
 
 static int
-kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+kiblnd_pool_is_idle(kib_pool_t *pool, time64_t now)
 {
         if (pool->po_allocated != 0) /* still in use */
                 return 0;
         if (pool->po_failed)
                 return 1;
-        return cfs_time_aftereq(now, pool->po_deadline);
+       return now >= pool->po_deadline;
 }
 
 void
 kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
 {
        struct list_head zombies = LIST_HEAD_INIT(zombies);
-       kib_poolset_t   *ps = pool->po_owner;
-       kib_pool_t      *tmp;
-       cfs_time_t       now = cfs_time_current();
+       kib_poolset_t *ps = pool->po_owner;
+       kib_pool_t *tmp;
+       time64_t now = ktime_get_seconds();
 
        spin_lock(&ps->ps_lock);
 
@@ -2165,8 +2165,8 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
        kib_pool_t              *pool;
        int                     rc;
        unsigned int            interval = 1;
-       cfs_time_t              time_before;
-       unsigned int            trips = 0;
+       ktime_t time_before;
+       unsigned int trips = 0;
 
 again:
        spin_lock(&ps->ps_lock);
@@ -2175,7 +2175,8 @@ again:
                        continue;
 
                pool->po_allocated++;
-               pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+               pool->po_deadline = ktime_get_seconds() +
+                                   IBLND_POOL_DEADLINE;
                node = pool->po_free_list.next;
                list_del(node);
 
@@ -2205,7 +2206,7 @@ again:
                 goto again;
         }
 
-       if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+       if (ktime_get_seconds() < ps->ps_next_retry) {
                /* someone failed recently */
                spin_unlock(&ps->ps_lock);
                return NULL;
@@ -2215,17 +2216,17 @@ again:
        spin_unlock(&ps->ps_lock);
 
        CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-       time_before = cfs_time_current();
+       time_before = ktime_get();
        rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
-       CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
-              cfs_time_current() - time_before);
+       CDEBUG(D_NET, "ps_pool_create took %lld ms to complete",
+              ktime_ms_delta(ktime_get(), time_before));
 
        spin_lock(&ps->ps_lock);
        ps->ps_increasing = 0;
        if (rc == 0) {
                list_add_tail(&pool->po_list, &ps->ps_pool_list);
        } else {
-               ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+               ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
                CERROR("Can't allocate new %s pool because out of memory\n",
                       ps->ps_name);
        }
index ccc8596..67454c3 100644 (file)
@@ -188,7 +188,7 @@ typedef struct
        char                    ibd_ifname[KIB_IFNAME_SIZE];
        int                     ibd_nnets;      /* # nets extant */
 
-       cfs_time_t              ibd_next_failover;
+       time64_t                ibd_next_failover;
        /* # failover failures */
        int                     ibd_failed_failover;
        /* failover in progress */
@@ -254,7 +254,7 @@ typedef struct kib_poolset
        /* failed pool list */
        struct list_head        ps_failed_pool_list;
        /* time stamp for retry if failed to allocate */
-       cfs_time_t              ps_next_retry;
+       time64_t                ps_next_retry;
        /* is allocating new pool */
        int                     ps_increasing;
        /* new pool size */
@@ -281,7 +281,7 @@ typedef struct kib_pool
        /* pool_set of this pool */
        kib_poolset_t          *po_owner;
        /* deadline of this pool */
-       cfs_time_t              po_deadline;
+       time64_t                po_deadline;
        /* # of elements in use */
        int                     po_allocated;
        /* pool is created on failed HCA */
@@ -316,7 +316,7 @@ typedef struct
        /* is allocating new pool */
        int                     fps_increasing;
        /* time stamp for retry if failed to allocate */
-       cfs_time_t              fps_next_retry;
+       time64_t                fps_next_retry;
 } kib_fmr_poolset_t;
 
 #ifndef HAVE_IB_RDMA_WR
@@ -352,7 +352,7 @@ typedef struct
                        int               fpo_pool_size;
                } fast_reg;
        };
-       cfs_time_t              fpo_deadline;   /* deadline of this pool */
+       time64_t                fpo_deadline;   /* deadline of this pool */
        int                     fpo_failed;     /* fmr pool is failed */
        int                     fpo_map_count;  /* # of mapped FMR */
        bool                    fpo_is_fmr; /* True if FMR pools allocated */
@@ -612,7 +612,7 @@ typedef struct kib_tx                           /* transmit message */
        /* LNET completion status */
        int                     tx_status;
        /* completion deadline */
-       unsigned long           tx_deadline;
+       ktime_t                 tx_deadline;
        /* completion cookie */
        __u64                   tx_cookie;
        /* lnet msgs to finalize on completion */
@@ -700,7 +700,7 @@ typedef struct kib_conn
        /* CQ callback fired */
        unsigned int            ibc_ready:1;
        /* time of last send */
-       unsigned long           ibc_last_send;
+       ktime_t                 ibc_last_send;
        /** link chain for kiblnd_check_conns only */
        struct list_head        ibc_connd_list;
        /** rxs completed before ESTABLISHED */
@@ -754,7 +754,7 @@ typedef struct kib_peer
        struct list_head        ibp_tx_queue;
        /* incarnation of peer_ni */
        __u64                   ibp_incarnation;
-       /* when (in jiffies) I was last alive */
+       /* when (in seconds) I was last alive */
        time64_t                ibp_last_alive;
        /* # users */
        atomic_t                ibp_refcount;
@@ -919,10 +919,11 @@ kiblnd_get_conn_locked (kib_peer_ni_t *peer_ni)
 static inline int
 kiblnd_send_keepalive(kib_conn_t *conn)
 {
+       s64 keepalive_ns = *kiblnd_tunables.kib_keepalive * NSEC_PER_SEC;
+
        return (*kiblnd_tunables.kib_keepalive > 0) &&
-               cfs_time_after(jiffies, conn->ibc_last_send +
-                              msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
-                                               MSEC_PER_SEC));
+               ktime_after(ktime_get(),
+                           ktime_add_ns(conn->ibc_last_send, keepalive_ns));
 }
 
 static inline int
index b96131a..56928c5 100644 (file)
@@ -941,7 +941,7 @@ __must_hold(&conn->ibc_lock)
                rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
        }
 
-        conn->ibc_last_send = jiffies;
+       conn->ibc_last_send = ktime_get();
 
         if (rc == 0)
                 return 0;
@@ -1239,15 +1239,15 @@ static void
 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 {
        struct list_head *q;
+       s64 timeout_ns;
 
        LASSERT(tx->tx_nwrq > 0);       /* work items set up */
        LASSERT(!tx->tx_queued);        /* not queued for sending already */
        LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
+       timeout_ns = *kiblnd_tunables.kib_timeout * NSEC_PER_SEC;
        tx->tx_queued = 1;
-       tx->tx_deadline = jiffies +
-                         msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
-                                          MSEC_PER_SEC);
+       tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
 
         if (tx->tx_conn == NULL) {
                 kiblnd_conn_addref(conn);
@@ -2227,7 +2227,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         /* connection established */
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        conn->ibc_last_send = jiffies;
+       conn->ibc_last_send = ktime_get();
         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
         kiblnd_peer_alive(peer_ni);
 
@@ -3244,10 +3244,11 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
                        LASSERT(tx->tx_waiting || tx->tx_sending != 0);
                }
 
-               if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
-                       CERROR("Timed out tx: %s, %lu seconds\n",
+               if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+                       CERROR("Timed out tx: %s, %lld seconds\n",
                               kiblnd_queue2str(conn, txs),
-                              cfs_duration_sec(jiffies - tx->tx_deadline));
+                              ktime_ms_delta(ktime_get(),
+                                             tx->tx_deadline) / MSEC_PER_SEC);
                        return 1;
                }
        }
@@ -3289,10 +3290,11 @@ kiblnd_check_conns (int idx)
 
                /* Check tx_deadline */
                list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
-                       if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
-                               CWARN("Timed out tx for %s: %lu seconds\n",
+                       if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+                               CWARN("Timed out tx for %s: %lld seconds\n",
                                      libcfs_nid2str(peer_ni->ibp_nid),
-                                     cfs_duration_sec(jiffies - tx->tx_deadline));
+                                     ktime_ms_delta(ktime_get(),
+                                                    tx->tx_deadline) / MSEC_PER_SEC);
                                list_move(&tx->tx_list, &timedout_txs);
                        }
                }
@@ -3800,8 +3802,7 @@ kiblnd_failover_thread(void *arg)
 
                list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
                                     ibd_fail_list) {
-                        if (cfs_time_before(cfs_time_current(),
-                                            dev->ibd_next_failover))
+                       if (ktime_get_seconds() < dev->ibd_next_failover)
                                 continue;
                         do_failover = 1;
                         break;
@@ -3819,13 +3820,13 @@ kiblnd_failover_thread(void *arg)
                         LASSERT (dev->ibd_failover);
                         dev->ibd_failover = 0;
                         if (rc >= 0) { /* Device is OK or failover succeed */
-                                dev->ibd_next_failover = cfs_time_shift(3);
+                               dev->ibd_next_failover = ktime_get_seconds() + 3;
                                 continue;
                         }
 
                         /* failed to failover, retry later */
-                        dev->ibd_next_failover =
-                                cfs_time_shift(min(dev->ibd_failed_failover, 10));
+                       dev->ibd_next_failover = ktime_get_seconds() +
+                                                min(dev->ibd_failed_failover, 10);
                         if (kiblnd_dev_can_failover(dev)) {
                                list_add_tail(&dev->ibd_fail_list,
                                               &kiblnd_data.kib_failed_devs);
index ceb8254..078638f 100644 (file)
@@ -2627,7 +2627,7 @@ int ksocknal_reaper(void *arg)
                if (!ksocknal_data.ksnd_shuttingdown &&
                    list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
                    list_empty(&ksocknal_data.ksnd_zombie_conns))
-                       schedule_timeout(cfs_duration_sec(timeout));
+                       schedule_timeout(cfs_time_seconds(timeout));
 
                set_current_state(TASK_RUNNING);
                remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);