Whamcloud - gitweb
LU-7734 lnet: fix routing selection
[fs/lustre-release.git] / lnet / lnet / lib-eq.c
index 059afa2..4ff2ba6 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 /*
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -58,7 +56,7 @@
  * \param handle On successful return, this location will hold a handle for
  * the newly created EQ.
  *
- * \retval 0       On success.
+ * \retval 0      On success.
  * \retval -EINVAL If an parameter is not valid.
  * \retval -ENOMEM If memory for the EQ can't be allocated.
  *
  */
 int
 LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
-            lnet_handle_eq_t *handle)
+           lnet_handle_eq_t *handle)
 {
-        lnet_eq_t     *eq;
+       lnet_eq_t     *eq;
 
-        LASSERT (the_lnet.ln_init);
-        LASSERT (the_lnet.ln_refcount > 0);
+       LASSERT(the_lnet.ln_refcount > 0);
 
-        /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
-         * overflow, they don't skip entries, so the queue has the same
-         * apparent capacity at all times */
+       /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
+        * overflow, they don't skip entries, so the queue has the same
+        * apparent capacity at all times */
 
-       count = cfs_power2_roundup(count);
+       if (count)
+               count = roundup_pow_of_two(count);
 
        if (callback != LNET_EQ_HANDLER_NONE && count != 0) {
                CWARN("EQ callback is guaranteed to get every event, "
@@ -97,30 +95,48 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 
        if (count != 0) {
                LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
-               if (eq->eq_events == NULL) {
-                       lnet_eq_free(eq);
-                       return -ENOMEM;
-               }
+               if (eq->eq_events == NULL)
+                       goto failed;
                /* NB allocator has set all event sequence numbers to 0,
                 * so all them should be earlier than eq_deq_seq */
        }
 
-        eq->eq_deq_seq = 1;
-        eq->eq_enq_seq = 1;
-        eq->eq_size = count;
-        eq->eq_refcount = 0;
-        eq->eq_callback = callback;
+       eq->eq_deq_seq = 1;
+       eq->eq_enq_seq = 1;
+       eq->eq_size = count;
+       eq->eq_callback = callback;
+
+       eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
+                                      sizeof(*eq->eq_refs[0]));
+       if (eq->eq_refs == NULL)
+               goto failed;
 
-       lnet_res_lock();
+       /* MUST hold both exclusive lnet_res_lock */
+       lnet_res_lock(LNET_LOCK_EX);
+       /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+        * both EQ lookup and poll event with only lnet_eq_wait_lock */
+       lnet_eq_wait_lock();
 
        lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
-       cfs_list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
+       list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
 
-       lnet_res_unlock();
+       lnet_eq_wait_unlock();
+       lnet_res_unlock(LNET_LOCK_EX);
 
        lnet_eq2handle(handle, eq);
        return 0;
+
+failed:
+       if (eq->eq_events != NULL)
+               LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
+
+       if (eq->eq_refs != NULL)
+               cfs_percpt_free(eq->eq_refs);
+
+       lnet_eq_free(eq);
+       return -ENOMEM;
 }
+EXPORT_SYMBOL(LNetEQAlloc);
 
 /**
  * Release the resources associated with an event queue if it's idle;
@@ -135,48 +151,63 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
 int
 LNetEQFree(lnet_handle_eq_t eqh)
 {
-        lnet_eq_t     *eq;
-        int            size;
-        lnet_event_t  *events;
+       struct lnet_eq  *eq;
+       lnet_event_t    *events = NULL;
+       int             **refs = NULL;
+       int             *ref;
+       int             rc = 0;
+       int             size = 0;
+       int             i;
 
-        LASSERT (the_lnet.ln_init);
-        LASSERT (the_lnet.ln_refcount > 0);
+       LASSERT(the_lnet.ln_refcount > 0);
 
-       lnet_res_lock();
+       lnet_res_lock(LNET_LOCK_EX);
+       /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+        * both EQ lookup and poll event with only lnet_eq_wait_lock */
+       lnet_eq_wait_lock();
 
        eq = lnet_handle2eq(&eqh);
        if (eq == NULL) {
-               lnet_res_unlock();
-               return -ENOENT;
+               rc = -ENOENT;
+               goto out;
        }
 
-       if (eq->eq_refcount != 0) {
-               CDEBUG(D_NET, "Event queue (%d) busy on destroy.\n",
-                      eq->eq_refcount);
-               lnet_res_unlock();
-               return -EBUSY;
+       cfs_percpt_for_each(ref, i, eq->eq_refs) {
+               LASSERT(*ref >= 0);
+               if (*ref == 0)
+                       continue;
+
+               CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
+                      i, *ref);
+               rc = -EBUSY;
+               goto out;
        }
 
        /* stash for free after lock dropped */
        events  = eq->eq_events;
        size    = eq->eq_size;
+       refs    = eq->eq_refs;
 
        lnet_res_lh_invalidate(&eq->eq_lh);
-       cfs_list_del(&eq->eq_list);
-       lnet_eq_free_locked(eq);
-
-       lnet_res_unlock();
+       list_del(&eq->eq_list);
+       lnet_eq_free(eq);
+ out:
+       lnet_eq_wait_unlock();
+       lnet_res_unlock(LNET_LOCK_EX);
 
        if (events != NULL)
                LIBCFS_FREE(events, size * sizeof(lnet_event_t));
+       if (refs != NULL)
+               cfs_percpt_free(refs);
 
-       return 0;
+       return rc;
 }
+EXPORT_SYMBOL(LNetEQFree);
 
 void
 lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 {
-       /* MUST called with resource lock hold */
+       /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
        int index;
 
        if (eq->eq_size == 0) {
@@ -185,6 +216,7 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
                return;
        }
 
+       lnet_eq_wait_lock();
        ev->sequence = eq->eq_enq_seq++;
 
        LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
@@ -195,51 +227,43 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
        if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
                eq->eq_callback(ev);
 
-#ifdef __KERNEL__
-       /* Wake anyone waiting in LNetEQPoll() */
-       if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
-               cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
-#else
-# ifndef HAVE_LIBPTHREAD
-       /* LNetEQPoll() calls into _the_ LND to wait for action */
-# else
        /* Wake anyone waiting in LNetEQPoll() */
-       pthread_cond_broadcast(&the_lnet.ln_eq_cond);
-# endif
-#endif
+       if (waitqueue_active(&the_lnet.ln_eq_waitq))
+               wake_up_all(&the_lnet.ln_eq_waitq);
+       lnet_eq_wait_unlock();
 }
 
-int
+static int
 lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
 {
-        int           new_index = eq->eq_deq_seq & (eq->eq_size - 1);
-        lnet_event_t *new_event = &eq->eq_events[new_index];
-        int           rc;
-        ENTRY;
+       int             new_index = eq->eq_deq_seq & (eq->eq_size - 1);
+       lnet_event_t    *new_event = &eq->eq_events[new_index];
+       int             rc;
+       ENTRY;
 
-        if (LNET_SEQ_GT (eq->eq_deq_seq, new_event->sequence)) {
-                RETURN(0);
-        }
+       /* must called with lnet_eq_wait_lock hold */
+       if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
+               RETURN(0);
 
-        /* We've got a new event... */
-        *ev = *new_event;
+       /* We've got a new event... */
+       *ev = *new_event;
 
        CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
               new_event, eq->eq_deq_seq, eq->eq_size);
 
-        /* ...but did it overwrite an event we've not seen yet? */
-        if (eq->eq_deq_seq == new_event->sequence) {
-                rc = 1;
-        } else {
-                /* don't complain with CERROR: some EQs are sized small
-                 * anyway; if it's important, the caller should complain */
-                CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
-                       eq->eq_deq_seq, new_event->sequence);
-                rc = -EOVERFLOW;
-        }
-
-        eq->eq_deq_seq = new_event->sequence + 1;
-        RETURN(rc);
+       /* ...but did it overwrite an event we've not seen yet? */
+       if (eq->eq_deq_seq == new_event->sequence) {
+               rc = 1;
+       } else {
+               /* don't complain with CERROR: some EQs are sized small
+                * anyway; if it's important, the caller should complain */
+               CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
+                      eq->eq_deq_seq, new_event->sequence);
+               rc = -EOVERFLOW;
+       }
+
+       eq->eq_deq_seq = new_event->sequence + 1;
+       RETURN(rc);
 }
 
 /**
@@ -251,8 +275,8 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
  * \param event On successful return (1 or -EOVERFLOW), this location will
  * hold the next event in the EQ.
  *
- * \retval 0          No pending event in the EQ.
- * \retval 1          Indicates success.
+ * \retval 0         No pending event in the EQ.
+ * \retval 1         Indicates success.
  * \retval -ENOENT    If \a eventq does not point to a valid EQ.
  * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
  * at least one event between this event and the last event obtained from the
@@ -261,11 +285,12 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
 int
 LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
 {
-        int which;
+       int which;
 
-        return LNetEQPoll(&eventq, 1, 0,
-                         event, &which);
+       return LNetEQPoll(&eventq, 1, 0,
+                        event, &which);
 }
+EXPORT_SYMBOL(LNetEQGet);
 
 /**
  * Block the calling process until there is an event in the EQ.
@@ -277,7 +302,7 @@ LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
  * \param event On successful return (1 or -EOVERFLOW), this location will
  * hold the next event in the EQ.
  *
- * \retval 1          Indicates success.
+ * \retval 1         Indicates success.
  * \retval -ENOENT    If \a eventq does not point to a valid EQ.
  * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
  * at least one event between this event and the last event obtained from the
@@ -286,159 +311,39 @@ LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
 int
 LNetEQWait (lnet_handle_eq_t eventq, lnet_event_t *event)
 {
-        int which;
+       int which;
 
-        return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
-                         event, &which);
+       return LNetEQPoll(&eventq, 1, MAX_SCHEDULE_TIMEOUT,
+                         event, &which);
 }
-
-#ifdef __KERNEL__
+EXPORT_SYMBOL(LNetEQWait);
 
 static int
-lnet_eq_wait_locked(int *timeout_ms)
+lnet_eq_wait_locked(signed long *timeout)
+__must_hold(&the_lnet.ln_eq_wait_lock)
 {
-       int              tms = *timeout_ms;
-       int              wait;
-       cfs_waitlink_t   wl;
-       cfs_time_t       now;
+       signed long tms = *timeout;
+       wait_queue_t wl;
+       int wait;
 
        if (tms == 0)
-               return -1; /* don't want to wait and no new event */
-
-       cfs_waitlink_init(&wl);
-       cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-       cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+               return -ENXIO; /* don't want to wait and no new event */
 
-       lnet_res_unlock();
+       init_waitqueue_entry(&wl, current);
+       add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
-       if (tms < 0) {
-               cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
-
-       } else {
-               struct timeval tv;
-
-               now = cfs_time_current();
-               cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
-                                   cfs_time_seconds(tms) / 1000);
-               cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
-               tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
-               if (tms < 0) /* no more wait but may have new event */
-                       tms = 0;
-       }
+       lnet_eq_wait_unlock();
 
+       tms = schedule_timeout_interruptible(tms);
        wait = tms != 0; /* might need to call here again */
-       *timeout_ms = tms;
-
-       lnet_res_lock();
-       cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
-
-       return wait;
-}
-
-#else /* !__KERNEL__ */
-
-# ifdef HAVE_LIBPTHREAD
-static void
-lnet_eq_cond_wait(struct timespec *ts)
-{
-       if (ts == NULL) {
-               pthread_cond_wait(&the_lnet.ln_eq_cond, &the_lnet.ln_res_lock);
-       } else {
-               pthread_cond_timedwait(&the_lnet.ln_eq_cond,
-                                      &the_lnet.ln_res_lock, ts);
-       }
-}
-# endif
-
-static int
-lnet_eq_wait_locked(int *timeout_ms)
-{
-       lnet_ni_t         *eq_waitni = NULL;
-       int                tms = *timeout_ms;
-       int                wait;
-       struct timeval     then;
-       struct timeval     now;
-
-       if (the_lnet.ln_eq_waitni != NULL) {
-               /* I have a single NI that I have to call into, to get
-                * events queued, or to block. */
-               lnet_res_unlock();
-
-               LNET_LOCK();
-               eq_waitni = the_lnet.ln_eq_waitni;
-               if (unlikely(eq_waitni == NULL)) {
-                       LNET_UNLOCK();
-
-                       lnet_res_lock();
-                       return -1;
-               }
-
-               lnet_ni_addref_locked(eq_waitni);
-               LNET_UNLOCK();
-
-               if (tms <= 0) { /* even for tms == 0 */
-                       (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
-               } else {
-                       gettimeofday(&then, NULL);
-
-                       (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
-                       gettimeofday(&now, NULL);
-                       tms -= (now.tv_sec - then.tv_sec) * 1000 +
-                              (now.tv_usec - then.tv_usec) / 1000;
-                       if (tms < 0)
-                               tms = 0;
-               }
-
-               lnet_ni_decref(eq_waitni);
-               lnet_res_lock();
-       } else { /* w/o eq_waitni */
-# ifndef HAVE_LIBPTHREAD
-               /* If I'm single-threaded, LNET fails at startup if it can't
-                * set the_lnet.ln_eqwaitni correctly.  */
-               LBUG();
-# else /* HAVE_LIBPTHREAD */
-               struct timespec  ts;
+       *timeout = tms;
 
-               if (tms == 0) /* don't want to wait and new event */
-                       return -1;
-
-               if (tms < 0) {
-                       lnet_eq_cond_wait(NULL);
-
-               } else {
-
-                       gettimeofday(&then, NULL);
-
-                       ts.tv_sec = then.tv_sec + tms / 1000;
-                       ts.tv_nsec = then.tv_usec * 1000 +
-                                    (tms % 1000) * 1000000;
-                       if (ts.tv_nsec >= 1000000000) {
-                               ts.tv_sec++;
-                               ts.tv_nsec -= 1000000000;
-                       }
-
-                       lnet_eq_cond_wait(&ts);
-
-                       gettimeofday(&now, NULL);
-                       tms -= (now.tv_sec - then.tv_sec) * 1000 +
-                              (now.tv_usec - then.tv_usec) / 1000;
-                       if (tms < 0)
-                               tms = 0;
-               }
-# endif /* HAVE_LIBPTHREAD */
-       }
-
-       wait = tms != 0;
-       *timeout_ms = tms;
+       lnet_eq_wait_lock();
+       remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 
        return wait;
 }
 
-#endif /* __KERNEL__ */
-
-
 /**
  * Block the calling process until there's an event from a set of EQs or
  * timeout happens.
@@ -451,59 +356,48 @@ lnet_eq_wait_locked(int *timeout_ms)
  * fixed period, or block indefinitely.
  *
  * \param eventqs,neq An array of EQ handles, and size of the array.
- * \param timeout_ms Time in milliseconds to wait for an event to occur on
- * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
+ * \param timeout Time in jiffies to wait for an event to occur on
+ * one of the EQs. The constant MAX_SCHEDULE_TIMEOUT can be used to indicate an
  * infinite timeout.
  * \param event,which On successful return (1 or -EOVERFLOW), \a event will
  * hold the next event in the EQs, and \a which will contain the index of the
  * EQ from which the event was taken.
  *
- * \retval 0          No pending event in the EQs after timeout.
- * \retval 1          Indicates success.
+ * \retval 0         No pending event in the EQs after timeout.
+ * \retval 1         Indicates success.
  * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
  * at least one event between this event and the last event obtained from the
  * EQ indicated by \a which has been dropped due to limited space in the EQ.
  * \retval -ENOENT    If there's an invalid handle in \a eventqs.
  */
 int
-LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
-            lnet_event_t *event, int *which)
+LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, signed long timeout,
+          lnet_event_t *event, int *which)
 {
        int     wait = 1;
        int     rc;
        int     i;
-        ENTRY;
-
-        LASSERT (the_lnet.ln_init);
-        LASSERT (the_lnet.ln_refcount > 0);
-
-        if (neq < 1)
-                RETURN(-ENOENT);
+       ENTRY;
 
-       lnet_res_lock();
+       LASSERT(the_lnet.ln_refcount > 0);
 
-        for (;;) {
-#ifndef __KERNEL__
-               lnet_res_unlock();
+       if (neq < 1)
+               RETURN(-ENOENT);
 
-               /* Recursion breaker */
-               if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
-                   !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh))
-                       lnet_router_checker();
+       lnet_eq_wait_lock();
 
-               lnet_res_lock();
-#endif
+       for (;;) {
                for (i = 0; i < neq; i++) {
                        lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
 
                        if (eq == NULL) {
-                               lnet_res_unlock();
+                               lnet_eq_wait_unlock();
                                RETURN(-ENOENT);
                        }
 
                        rc = lnet_eq_dequeue_event(eq, event);
                        if (rc != 0) {
-                               lnet_res_unlock();
+                               lnet_eq_wait_unlock();
                                *which = i;
                                RETURN(rc);
                        }
@@ -517,13 +411,13 @@ LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
                 * -1 : did nothing and it's sure no new event
                 *  1 : sleep inside and wait until new event
                 *  0 : don't want to wait anymore, but might have new event
-                *      so need to call dequeue again
+                *      so need to call dequeue again
                 */
-               wait = lnet_eq_wait_locked(&timeout_ms);
+               wait = lnet_eq_wait_locked(&timeout);
                if (wait < 0) /* no new event */
                        break;
        }
 
-       lnet_res_unlock();
+       lnet_eq_wait_unlock();
        RETURN(0);
 }