1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Library level Event queue management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * This file is part of Lustre, http://www.lustre.org
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LNET
26 #include <lnet/lib-lnet.h>
29 LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
30 lnet_handle_eq_t *handle)
34 LASSERT (the_lnet.ln_init);
35 LASSERT (the_lnet.ln_refcount > 0);
37 /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
38 * overflow, they don't skip entries, so the queue has the same
39 * apparant capacity at all times */
41 if (count != LOWEST_BIT_SET(count)) { /* not a power of 2 already */
42 do { /* knock off all but the top bit... */
43 count &= ~LOWEST_BIT_SET (count);
44 } while (count != LOWEST_BIT_SET(count));
46 count <<= 1; /* ...and round up */
49 if (count == 0) /* catch bad parameter / overflow on roundup */
56 LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
57 if (eq->eq_events == NULL) {
65 /* NB this resets all event sequence numbers to 0, to be earlier
67 memset(eq->eq_events, 0, count * sizeof(lnet_event_t));
73 eq->eq_callback = callback;
77 lnet_initialise_handle (&eq->eq_lh, LNET_COOKIE_TYPE_EQ);
78 list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
82 lnet_eq2handle(handle, eq);
87 LNetEQFree(lnet_handle_eq_t eqh)
93 LASSERT (the_lnet.ln_init);
94 LASSERT (the_lnet.ln_refcount > 0);
98 eq = lnet_handle2eq(&eqh);
104 if (eq->eq_refcount != 0) {
105 CDEBUG(D_NET, "Event queue (%d) busy on destroy.\n",
111 /* stash for free after lock dropped */
112 events = eq->eq_events;
115 lnet_invalidate_handle (&eq->eq_lh);
116 list_del (&eq->eq_list);
121 LIBCFS_FREE(events, size * sizeof (lnet_event_t));
127 lib_get_event (lnet_eq_t *eq, lnet_event_t *ev)
129 int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
130 lnet_event_t *new_event = &eq->eq_events[new_index];
134 CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
135 new_event, eq->eq_deq_seq, eq->eq_size);
137 if (LNET_SEQ_GT (eq->eq_deq_seq, new_event->sequence)) {
141 /* We've got a new event... */
144 /* ...but did it overwrite an event we've not seen yet? */
145 if (eq->eq_deq_seq == new_event->sequence) {
148 /* don't complain with CERROR: some EQs are sized small
149 * anyway; if it's important, the caller should complain */
150 CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
151 eq->eq_deq_seq, new_event->sequence);
155 eq->eq_deq_seq = new_event->sequence + 1;
161 LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
165 return LNetEQPoll(&eventq, 1, 0,
170 LNetEQWait (lnet_handle_eq_t eventq, lnet_event_t *event)
174 return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
179 LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
180 lnet_event_t *event, int *which)
190 # ifdef HAVE_LIBPTHREAD
193 lnet_ni_t *eqwaitni = the_lnet.ln_eqwaitni;
197 LASSERT (the_lnet.ln_init);
198 LASSERT (the_lnet.ln_refcount > 0);
206 for (i = 0; i < neq; i++) {
207 lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
214 rc = lib_get_event (eq, event);
223 if (timeout_ms == 0) {
228 cfs_waitlink_init(&wl);
229 set_current_state(TASK_INTERRUPTIBLE);
230 cfs_waitq_add(&the_lnet.ln_waitq, &wl);
234 if (timeout_ms < 0) {
235 cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE);
239 now = cfs_time_current();
240 cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
241 cfs_time_seconds(timeout_ms)/1000);
242 cfs_duration_usec(cfs_time_sub(cfs_time_current(), now),
244 timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
250 cfs_waitq_del(&the_lnet.ln_waitq, &wl);
252 if (eqwaitni != NULL) {
253 /* I have a single NI that I have to call into, to get
254 * events queued, or to block. */
255 lnet_ni_addref_locked(eqwaitni);
258 if (timeout_ms <= 0) {
259 (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms);
261 gettimeofday(&then, NULL);
263 (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms);
265 gettimeofday(&now, NULL);
266 timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
267 (now.tv_usec - then.tv_usec) / 1000;
273 lnet_ni_decref_locked(eqwaitni);
275 /* don't call into eqwaitni again if timeout has
280 continue; /* go back and check for events */
283 if (timeout_ms == 0) {
288 # ifndef HAVE_LIBPTHREAD
289 /* If I'm single-threaded, LNET fails at startup if it can't
290 * set the_lnet.ln_eqwaitni correctly. */
293 if (timeout_ms < 0) {
294 pthread_cond_wait(&the_lnet.ln_cond,
297 gettimeofday(&then, NULL);
299 ts.tv_sec = then.tv_sec + timeout_ms/1000;
300 ts.tv_nsec = then.tv_usec * 1000 +
301 (timeout_ms%1000) * 1000000;
302 if (ts.tv_nsec >= 1000000000) {
304 ts.tv_nsec -= 1000000000;
307 pthread_cond_timedwait(&the_lnet.ln_cond,
308 &the_lnet.ln_lock, &ts);
310 gettimeofday(&now, NULL);
311 timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
312 (now.tv_usec - then.tv_usec) / 1000;