X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lnet%2Flnet%2Flib-eq.c;h=3bca6b77539a6aa8bfc68a43d6801275ed82bd1d;hb=9b7ca14430edf84f8c031b1489b3c0d2fb617535;hp=8cfff7b8079cbef885f8f5c7ebc438f55da5b845;hpb=72057a3af19ee02d9a686bd7e7d074917e381310;p=fs%2Flustre-release.git diff --git a/lnet/lnet/lib-eq.c b/lnet/lnet/lib-eq.c index 8cfff7b..3bca6b7 100644 --- a/lnet/lnet/lib-eq.c +++ b/lnet/lnet/lib-eq.c @@ -23,7 +23,7 @@ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -64,9 +64,9 @@ */ int LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, - lnet_handle_eq_t *handle) + struct lnet_handle_eq *handle) { - lnet_eq_t *eq; + struct lnet_eq *eq; LASSERT(the_lnet.ln_refcount > 0); @@ -94,7 +94,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, return -ENOMEM; if (count != 0) { - LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t)); + LIBCFS_ALLOC(eq->eq_events, count * sizeof(struct lnet_event)); if (eq->eq_events == NULL) goto failed; /* NB allocator has set all event sequence numbers to 0, @@ -128,7 +128,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, failed: if (eq->eq_events != NULL) - LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t)); + LIBCFS_FREE(eq->eq_events, count * sizeof(struct lnet_event)); if (eq->eq_refs != NULL) cfs_percpt_free(eq->eq_refs); @@ -149,10 +149,10 @@ EXPORT_SYMBOL(LNetEQAlloc); * \retval -EBUSY If the EQ is still in use by some MDs. */ int -LNetEQFree(lnet_handle_eq_t eqh) +LNetEQFree(struct lnet_handle_eq eqh) { struct lnet_eq *eq; - lnet_event_t *events = NULL; + struct lnet_event *events = NULL; int **refs = NULL; int *ref; int rc = 0; @@ -196,7 +196,7 @@ LNetEQFree(lnet_handle_eq_t eqh) lnet_res_unlock(LNET_LOCK_EX); if (events != NULL) - LIBCFS_FREE(events, size * sizeof(lnet_event_t)); + LIBCFS_FREE(events, size * sizeof(struct lnet_event)); if (refs != NULL) cfs_percpt_free(refs); @@ -205,7 +205,7 @@ LNetEQFree(lnet_handle_eq_t eqh) EXPORT_SYMBOL(LNetEQFree); void -lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev) +lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev) { /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ int index; @@ -234,10 +234,10 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev) } static int -lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev) +lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev) { int new_index = eq->eq_deq_seq & (eq->eq_size - 1); - lnet_event_t *new_event = &eq->eq_events[new_index]; + struct lnet_event *new_event = &eq->eq_events[new_index]; int rc; ENTRY; @@ -283,7 +283,7 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev) * EQ has been dropped due to limited space in the EQ. */ int -LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event) +LNetEQGet(struct lnet_handle_eq eventq, struct lnet_event *event) { int which; @@ -309,48 +309,34 @@ EXPORT_SYMBOL(LNetEQGet); * EQ has been dropped due to limited space in the EQ. */ int -LNetEQWait (lnet_handle_eq_t eventq, lnet_event_t *event) +LNetEQWait(struct lnet_handle_eq eventq, struct lnet_event *event) { int which; - return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER, - event, &which); + return LNetEQPoll(&eventq, 1, MAX_SCHEDULE_TIMEOUT, + event, &which); } EXPORT_SYMBOL(LNetEQWait); static int -lnet_eq_wait_locked(int *timeout_ms) +lnet_eq_wait_locked(signed long *timeout) __must_hold(&the_lnet.ln_eq_wait_lock) { - int tms = *timeout_ms; - int wait; - wait_queue_t wl; - cfs_time_t now; + signed long tms = *timeout; + wait_queue_entry_t wl; + int wait; if (tms == 0) return -ENXIO; /* don't want to wait and no new event */ init_waitqueue_entry(&wl, current); - set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&the_lnet.ln_eq_waitq, &wl); lnet_eq_wait_unlock(); - if (tms < 0) { - schedule(); - } else { - struct timeval tv; - - now = cfs_time_current(); - schedule_timeout(cfs_time_seconds(tms) / 1000); - cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv); - tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000); - if (tms < 0) /* no more wait but may have new event */ - tms = 0; - } - + tms = schedule_timeout_interruptible(tms); wait = tms != 0; /* might need to call here again */ - *timeout_ms = tms; + *timeout = tms; lnet_eq_wait_lock(); remove_wait_queue(&the_lnet.ln_eq_waitq, &wl); @@ -370,8 +356,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock) * fixed period, or block indefinitely. * * \param eventqs,neq An array of EQ handles, and size of the array. - * \param timeout_ms Time in milliseconds to wait for an event to occur on - * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an + * \param timeout Time in jiffies to wait for an event to occur on + * one of the EQs. The constant MAX_SCHEDULE_TIMEOUT can be used to indicate an * infinite timeout. * \param event,which On successful return (1 or -EOVERFLOW), \a event will * hold the next event in the EQs, and \a which will contain the index of the @@ -385,8 +371,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock) * \retval -ENOENT If there's an invalid handle in \a eventqs. */ int -LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, - lnet_event_t *event, int *which) +LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, signed long timeout, + struct lnet_event *event, int *which) { int wait = 1; int rc; @@ -402,7 +388,7 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, for (;;) { for (i = 0; i < neq; i++) { - lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); + struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]); if (eq == NULL) { lnet_eq_wait_unlock(); @@ -427,7 +413,7 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, * 0 : don't want to wait anymore, but might have new event * so need to call dequeue again */ - wait = lnet_eq_wait_locked(&timeout_ms); + wait = lnet_eq_wait_locked(&timeout); if (wait < 0) /* no new event */ break; }