/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
{
lnet_eq_t *eq;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
/* We need count to be a power of 2 so that when eq_{enq,deq}_seq
if (count != 0) {
LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
- if (eq->eq_events == NULL) {
- lnet_eq_free(eq);
- return -ENOMEM;
- }
+ if (eq->eq_events == NULL)
+ goto failed;
/* NB allocator has set all event sequence numbers to 0,
* so all them should be earlier than eq_deq_seq */
}
eq->eq_deq_seq = 1;
eq->eq_enq_seq = 1;
eq->eq_size = count;
- eq->eq_refcount = 0;
eq->eq_callback = callback;
- lnet_res_lock();
+ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*eq->eq_refs[0]));
+ if (eq->eq_refs == NULL)
+ goto failed;
+
+ /* MUST hold both exclusive lnet_res_lock */
+ lnet_res_lock(LNET_LOCK_EX);
+ /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ lnet_eq_wait_lock();
lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
- cfs_list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
+ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
- lnet_res_unlock();
+ lnet_eq_wait_unlock();
+ lnet_res_unlock(LNET_LOCK_EX);
lnet_eq2handle(handle, eq);
return 0;
+
+failed:
+ if (eq->eq_events != NULL)
+ LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
+
+ if (eq->eq_refs != NULL)
+ cfs_percpt_free(eq->eq_refs);
+
+ lnet_eq_free(eq);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(LNetEQAlloc);
/**
* Release the resources associated with an event queue if it's idle;
int
LNetEQFree(lnet_handle_eq_t eqh)
{
- lnet_eq_t *eq;
- int size;
- lnet_event_t *events;
+ struct lnet_eq *eq;
+ lnet_event_t *events = NULL;
+ int **refs = NULL;
+ int *ref;
+ int rc = 0;
+ int size = 0;
+ int i;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_refcount > 0);
- lnet_res_lock();
+ lnet_res_lock(LNET_LOCK_EX);
+ /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ lnet_eq_wait_lock();
eq = lnet_handle2eq(&eqh);
if (eq == NULL) {
- lnet_res_unlock();
- return -ENOENT;
+ rc = -ENOENT;
+ goto out;
}
- if (eq->eq_refcount != 0) {
- CDEBUG(D_NET, "Event queue (%d) busy on destroy.\n",
- eq->eq_refcount);
- lnet_res_unlock();
- return -EBUSY;
+ cfs_percpt_for_each(ref, i, eq->eq_refs) {
+ LASSERT(*ref >= 0);
+ if (*ref == 0)
+ continue;
+
+ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
+ i, *ref);
+ rc = -EBUSY;
+ goto out;
}
/* stash for free after lock dropped */
events = eq->eq_events;
size = eq->eq_size;
+ refs = eq->eq_refs;
lnet_res_lh_invalidate(&eq->eq_lh);
- cfs_list_del(&eq->eq_list);
- lnet_eq_free_locked(eq);
-
- lnet_res_unlock();
+ list_del(&eq->eq_list);
+ lnet_eq_free(eq);
+ out:
+ lnet_eq_wait_unlock();
+ lnet_res_unlock(LNET_LOCK_EX);
if (events != NULL)
LIBCFS_FREE(events, size * sizeof(lnet_event_t));
+ if (refs != NULL)
+ cfs_percpt_free(refs);
- return 0;
+ return rc;
}
+EXPORT_SYMBOL(LNetEQFree);
void
lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
{
- /* MUST called with resource lock hold */
+ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
int index;
if (eq->eq_size == 0) {
return;
}
+ lnet_eq_wait_lock();
ev->sequence = eq->eq_enq_seq++;
LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
eq->eq_callback(ev);
-#ifdef __KERNEL__
/* Wake anyone waiting in LNetEQPoll() */
- if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
- cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
-#else
-# ifndef HAVE_LIBPTHREAD
- /* LNetEQPoll() calls into _the_ LND to wait for action */
-# else
- /* Wake anyone waiting in LNetEQPoll() */
- pthread_cond_broadcast(&the_lnet.ln_eq_cond);
-# endif
-#endif
+ if (waitqueue_active(&the_lnet.ln_eq_waitq))
+ wake_up_all(&the_lnet.ln_eq_waitq);
+ lnet_eq_wait_unlock();
}
-int
+static int
lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
{
- int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
- lnet_event_t *new_event = &eq->eq_events[new_index];
- int rc;
- ENTRY;
+ int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
+ lnet_event_t *new_event = &eq->eq_events[new_index];
+ int rc;
+ ENTRY;
- if (LNET_SEQ_GT (eq->eq_deq_seq, new_event->sequence)) {
- RETURN(0);
- }
+ /* must called with lnet_eq_wait_lock hold */
+ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
+ RETURN(0);
- /* We've got a new event... */
- *ev = *new_event;
+ /* We've got a new event... */
+ *ev = *new_event;
CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
new_event, eq->eq_deq_seq, eq->eq_size);
return LNetEQPoll(&eventq, 1, 0,
event, &which);
}
+EXPORT_SYMBOL(LNetEQGet);
/**
* Block the calling process until there is an event in the EQ.
return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
event, &which);
}
-
-#ifdef __KERNEL__
+EXPORT_SYMBOL(LNetEQWait);
static int
lnet_eq_wait_locked(int *timeout_ms)
+__must_hold(&the_lnet.ln_eq_wait_lock)
{
- int tms = *timeout_ms;
- int wait;
- cfs_waitlink_t wl;
- cfs_time_t now;
+ int tms = *timeout_ms;
+ int wait;
+ wait_queue_t wl;
+ cfs_time_t now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
- cfs_waitlink_init(&wl);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+ init_waitqueue_entry_current(&wl);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
- lnet_res_unlock();
+ lnet_eq_wait_unlock();
if (tms < 0) {
- cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wl, TASK_INTERRUPTIBLE);
} else {
struct timeval tv;
now = cfs_time_current();
- cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+ waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
wait = tms != 0; /* might need to call here again */
*timeout_ms = tms;
- lnet_res_lock();
- cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
-
- return wait;
-}
-
-#else /* !__KERNEL__ */
-
-# ifdef HAVE_LIBPTHREAD
-static void
-lnet_eq_cond_wait(struct timespec *ts)
-{
- if (ts == NULL) {
- pthread_cond_wait(&the_lnet.ln_eq_cond, &the_lnet.ln_res_lock);
- } else {
- pthread_cond_timedwait(&the_lnet.ln_eq_cond,
- &the_lnet.ln_res_lock, ts);
- }
-}
-# endif
-
-static int
-lnet_eq_wait_locked(int *timeout_ms)
-{
- lnet_ni_t *eq_waitni = NULL;
- int tms = *timeout_ms;
- int wait;
- struct timeval then;
- struct timeval now;
-
- if (the_lnet.ln_eq_waitni != NULL) {
- /* I have a single NI that I have to call into, to get
- * events queued, or to block. */
- lnet_res_unlock();
-
- LNET_LOCK();
- eq_waitni = the_lnet.ln_eq_waitni;
- if (unlikely(eq_waitni == NULL)) {
- LNET_UNLOCK();
-
- lnet_res_lock();
- return -1;
- }
-
- lnet_ni_addref_locked(eq_waitni);
- LNET_UNLOCK();
-
- if (tms <= 0) { /* even for tms == 0 */
- (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
- } else {
- gettimeofday(&then, NULL);
-
- (eq_waitni->ni_lnd->lnd_wait)(eq_waitni, tms);
-
- gettimeofday(&now, NULL);
- tms -= (now.tv_sec - then.tv_sec) * 1000 +
- (now.tv_usec - then.tv_usec) / 1000;
- if (tms < 0)
- tms = 0;
- }
-
- lnet_ni_decref(eq_waitni);
- lnet_res_lock();
- } else { /* w/o eq_waitni */
-# ifndef HAVE_LIBPTHREAD
- /* If I'm single-threaded, LNET fails at startup if it can't
- * set the_lnet.ln_eqwaitni correctly. */
- LBUG();
-# else /* HAVE_LIBPTHREAD */
- struct timespec ts;
-
- if (tms == 0) /* don't want to wait and new event */
- return -1;
-
- if (tms < 0) {
- lnet_eq_cond_wait(NULL);
-
- } else {
-
- gettimeofday(&then, NULL);
-
- ts.tv_sec = then.tv_sec + tms / 1000;
- ts.tv_nsec = then.tv_usec * 1000 +
- (tms % 1000) * 1000000;
- if (ts.tv_nsec >= 1000000000) {
- ts.tv_sec++;
- ts.tv_nsec -= 1000000000;
- }
-
- lnet_eq_cond_wait(&ts);
-
- gettimeofday(&now, NULL);
- tms -= (now.tv_sec - then.tv_sec) * 1000 +
- (now.tv_usec - then.tv_usec) / 1000;
- if (tms < 0)
- tms = 0;
- }
-# endif /* HAVE_LIBPTHREAD */
- }
-
- wait = tms != 0;
- *timeout_ms = tms;
+ lnet_eq_wait_lock();
+ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
return wait;
}
-#endif /* __KERNEL__ */
-
-
/**
* Block the calling process until there's an event from a set of EQs or
* timeout happens.
* \retval -ENOENT If there's an invalid handle in \a eventqs.
*/
int
-LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
- lnet_event_t *event, int *which)
+LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
+ lnet_event_t *event, int *which)
{
int wait = 1;
int rc;
int i;
ENTRY;
- LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (neq < 1)
RETURN(-ENOENT);
- lnet_res_lock();
-
- for (;;) {
-#ifndef __KERNEL__
- lnet_res_unlock();
-
- /* Recursion breaker */
- if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
- !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh))
- lnet_router_checker();
+ lnet_eq_wait_lock();
- lnet_res_lock();
-#endif
+ for (;;) {
for (i = 0; i < neq; i++) {
lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
if (eq == NULL) {
- lnet_res_unlock();
+ lnet_eq_wait_unlock();
RETURN(-ENOENT);
}
rc = lnet_eq_dequeue_event(eq, event);
if (rc != 0) {
- lnet_res_unlock();
+ lnet_eq_wait_unlock();
*which = i;
RETURN(rc);
}
break;
}
- lnet_res_unlock();
+ lnet_eq_wait_unlock();
RETURN(0);
}