1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Library level Event queue management routines
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * This file is part of Lustre, http://www.lustre.org
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LNET
26 #include <lnet/lib-lnet.h>
29 LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
30 lnet_handle_eq_t *handle)
34 LASSERT (the_lnet.ln_init);
35 LASSERT (the_lnet.ln_refcount > 0);
37 /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
38 * overflow, they don't skip entries, so the queue has the same
39 * apparant capacity at all times */
41 if (count != LOWEST_BIT_SET(count)) { /* not a power of 2 already */
42 do { /* knock off all but the top bit... */
43 count &= ~LOWEST_BIT_SET (count);
44 } while (count != LOWEST_BIT_SET(count));
46 count <<= 1; /* ...and round up */
49 if (count == 0) /* catch bad parameter / overflow on roundup */
56 LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
57 if (eq->eq_events == NULL) {
65 /* NB this resets all event sequence numbers to 0, to be earlier
67 memset(eq->eq_events, 0, count * sizeof(lnet_event_t));
73 eq->eq_callback = callback;
77 lnet_initialise_handle (&eq->eq_lh, LNET_COOKIE_TYPE_EQ);
78 list_add (&eq->eq_list, &the_lnet.ln_active_eqs);
82 lnet_eq2handle(handle, eq);
87 LNetEQFree(lnet_handle_eq_t eqh)
93 LASSERT (the_lnet.ln_init);
94 LASSERT (the_lnet.ln_refcount > 0);
98 eq = lnet_handle2eq(&eqh);
104 if (eq->eq_refcount != 0) {
109 /* stash for free after lock dropped */
110 events = eq->eq_events;
113 lnet_invalidate_handle (&eq->eq_lh);
114 list_del (&eq->eq_list);
119 LIBCFS_FREE(events, size * sizeof (lnet_event_t));
125 lib_get_event (lnet_eq_t *eq, lnet_event_t *ev)
127 int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
128 lnet_event_t *new_event = &eq->eq_events[new_index];
132 CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
133 new_event, eq->eq_deq_seq, eq->eq_size);
135 if (LNET_SEQ_GT (eq->eq_deq_seq, new_event->sequence)) {
139 /* We've got a new event... */
142 /* ...but did it overwrite an event we've not seen yet? */
143 if (eq->eq_deq_seq == new_event->sequence) {
146 /* don't complain with CERROR: some EQs are sized small
147 * anyway; if it's important, the caller should complain */
148 CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
149 eq->eq_deq_seq, new_event->sequence);
153 eq->eq_deq_seq = new_event->sequence + 1;
159 LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
163 return LNetEQPoll(&eventq, 1, 0,
168 LNetEQWait (lnet_handle_eq_t eventq, lnet_event_t *event)
172 return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
177 LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
178 lnet_event_t *event, int *which)
188 # ifdef HAVE_LIBPTHREAD
191 lnet_ni_t *eqwaitni = the_lnet.ln_eqwaitni;
195 LASSERT (the_lnet.ln_init);
196 LASSERT (the_lnet.ln_refcount > 0);
204 for (i = 0; i < neq; i++) {
205 lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
212 rc = lib_get_event (eq, event);
221 if (timeout_ms == 0) {
226 cfs_waitlink_init(&wl);
227 set_current_state(TASK_INTERRUPTIBLE);
228 cfs_waitq_add(&the_lnet.ln_waitq, &wl);
232 if (timeout_ms < 0) {
233 cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE);
237 now = cfs_time_current();
238 cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
239 cfs_time_seconds(timeout_ms)/1000);
240 cfs_duration_usec(cfs_time_sub(cfs_time_current(), now),
242 timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
248 cfs_waitq_del(&the_lnet.ln_waitq, &wl);
250 if (eqwaitni != NULL) {
251 /* I have a single NI that I have to call into, to get
252 * events queued, or to block. */
253 lnet_ni_addref_locked(eqwaitni);
256 if (timeout_ms <= 0) {
257 (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms);
259 gettimeofday(&then, NULL);
261 (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms);
263 gettimeofday(&now, NULL);
264 timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
265 (now.tv_usec - then.tv_usec) / 1000;
271 lnet_ni_decref_locked(eqwaitni);
273 /* don't call into eqwaitni again if timeout has
278 continue; /* go back and check for events */
281 if (timeout_ms == 0) {
286 # ifndef HAVE_LIBPTHREAD
287 /* If I'm single-threaded, LNET fails at startup if it can't
288 * set the_lnet.ln_eqwaitni correctly. */
291 if (timeout_ms < 0) {
292 pthread_cond_wait(&the_lnet.ln_cond,
295 gettimeofday(&then, NULL);
297 ts.tv_sec = then.tv_sec + timeout_ms/1000;
298 ts.tv_nsec = then.tv_usec * 1000 +
299 (timeout_ms%1000) * 1000000;
300 if (ts.tv_nsec >= 1000000000) {
302 ts.tv_nsec -= 1000000000;
305 pthread_cond_timedwait(&the_lnet.ln_cond,
306 &the_lnet.ln_lock, &ts);
308 gettimeofday(&now, NULL);
309 timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 +
310 (now.tv_usec - then.tv_usec) / 1000;