Whamcloud - gitweb
file jbd-stats-2.6.9.patch was initially added on branch b1_4.
[fs/lustre-release.git] / lnet / klnds / gmlnd / gmlnd_utils.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Los Alamos National Laboratory (LANL)
5  *
6  *   This file is part of Lustre, http://www.lustre.org/
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 /*
22  *      All utilities required by lgmanl
23  */
24
25 #include "gmnal.h"
26
27 /*
28  *      Am I one of the gmnal rxthreads ?
29  */
30 int
31 gmnal_is_rxthread(gmnal_ni_t *gmnalni)
32 {
33         int i;
34
35         for (i = 0; i < gmnalni->gmni_nrxthreads; i++)
36                 if (gmnalni->gmni_rxthread_pid[i] == current->pid)
37                         return 1;
38         return 0;
39 }
40
41 gmnal_tx_t *
42 gmnal_alloc_tx (gmnal_ni_t *gmnalni) 
43 {
44         gmnal_tx_t  *tx;
45         void        *buffer;
46         
47         PORTAL_ALLOC(tx, sizeof(*tx));
48         if (tx == NULL) {
49                 CERROR ("Failed to allocate tx\n");
50                 return NULL;
51         }
52         
53         buffer = gm_dma_malloc(gmnalni->gmni_port, gmnalni->gmni_msg_size);
54         if (buffer == NULL) {
55                 CERROR("Failed to gm_dma_malloc tx buffer size [%d]\n", 
56                        gmnalni->gmni_msg_size);
57                 PORTAL_FREE(tx, sizeof(*tx));
58                 return NULL;
59         }
60
61         memset(tx, 0, sizeof(*tx));
62         tx->tx_msg = (gmnal_msg_t *)buffer;
63         tx->tx_buffer_size = gmnalni->gmni_msg_size;
64         tx->tx_gm_size = gm_min_size_for_length(tx->tx_buffer_size);
65         tx->tx_gmni = gmnalni;
66
67         CDEBUG(D_NET, "Created tx [%p] with buffer [%p], size [%d]\n", 
68                tx, tx->tx_msg, tx->tx_buffer_size);
69
70         return tx;
71 }
72
73 void
74 gmnal_free_tx (gmnal_tx_t *tx)
75 {
76         gmnal_ni_t *gmnalni = tx->tx_gmni;
77         
78         CDEBUG(D_NET, "Freeing tx [%p] with buffer [%p], size [%d]\n", 
79                tx, tx->tx_msg, tx->tx_buffer_size);
80 #if 0
81         /* We free buffers after we've closed the GM port */
82         gm_dma_free(gmnalni->gmni_port, tx->tx_msg);
83 #endif
84         PORTAL_FREE(tx, sizeof(*tx));
85 }
86
87 int
88 gmnal_alloc_txs(gmnal_ni_t *gmnalni)
89 {
90         int           ntxcred = gm_num_send_tokens(gmnalni->gmni_port);
91         int           ntx;
92         int           nrxt_tx;
93         int           i;
94         gmnal_tx_t   *tx;
95
96         CWARN("ntxcred: %d\n", ntxcred);
97
98         ntx = num_txds;
99         nrxt_tx = num_txds + 1;
100
101         if (ntx + nrxt_tx > ntxcred) {
102                 CERROR ("Asked for %d + %d tx credits, but only %d available\n",
103                         ntx, nrxt_tx, ntxcred);
104                 return -ENOMEM;
105         }
106         
107         /* A semaphore is initialised with the number of transmit tokens
108          * available.  To get a stxd, acquire the token semaphore.  this
109          * decrements the available token count (if no tokens you block here,
110          * someone returning a stxd will release the semaphore and wake you)
111          * When token is obtained acquire the spinlock to manipulate the
112          * list */
113         sema_init(&gmnalni->gmni_tx_token, ntx);
114         spin_lock_init(&gmnalni->gmni_tx_lock);
115         LASSERT (gmnalni->gmni_tx == NULL);
116
117         for (i = 0; i <= ntx; i++) {
118                 tx = gmnal_alloc_tx(gmnalni);
119                 if (tx == NULL) {
120                         CERROR("Failed to create tx %d\n", i);
121                         return -ENOMEM;
122                 }
123                 
124                 tx->tx_rxt = 0;
125                 tx->tx_next = gmnalni->gmni_tx;
126                 gmnalni->gmni_tx = tx;
127         }
128
129         sema_init(&gmnalni->gmni_rxt_tx_token, nrxt_tx);
130         spin_lock_init(&gmnalni->gmni_rxt_tx_lock);
131         LASSERT (gmnalni->gmni_rxt_tx == NULL);
132
133         for (i = 0; i <= nrxt_tx; i++) {
134                 tx = gmnal_alloc_tx(gmnalni);
135                 if (tx == NULL) {
136                         CERROR("Failed to create tx %d + %d\n", ntx, i);
137                         return -ENOMEM;
138                 }
139
140                 tx->tx_rxt = 1;
141                 tx->tx_next = gmnalni->gmni_rxt_tx;
142                 gmnalni->gmni_rxt_tx = tx;
143         }
144
145         return 0;
146 }
147
148 void
149 gmnal_free_txs(gmnal_ni_t *gmnalni)
150 {
151         gmnal_tx_t *tx;
152
153         while ((tx = gmnalni->gmni_tx) != NULL) {
154                 gmnalni->gmni_tx = tx->tx_next;
155                 gmnal_free_tx (tx);
156         }
157
158         while ((tx = gmnalni->gmni_rxt_tx) != NULL) {
159                 gmnalni->gmni_rxt_tx = tx->tx_next;
160                 gmnal_free_tx (tx);
161         }
162 }
163
164
165 /*
166  *      Get a tx from the list
167  *      This get us a wired and gm_registered small tx buffer.
168  *      This implicitly gets us a send token also.
169  */
170 gmnal_tx_t *
171 gmnal_get_tx(gmnal_ni_t *gmnalni, int block)
172 {
173
174         gmnal_tx_t      *tx = NULL;
175         pid_t           pid = current->pid;
176
177
178         CDEBUG(D_TRACE, "gmnal_get_tx gmnalni [%p] block[%d] pid [%d]\n", 
179                gmnalni, block, pid);
180
181         if (gmnal_is_rxthread(gmnalni)) {
182                 CDEBUG(D_NET, "RXTHREAD Attempting to get token\n");
183                 down(&gmnalni->gmni_rxt_tx_token);
184                 spin_lock(&gmnalni->gmni_rxt_tx_lock);
185                 tx = gmnalni->gmni_rxt_tx;
186                 gmnalni->gmni_rxt_tx = tx->tx_next;
187                 spin_unlock(&gmnalni->gmni_rxt_tx_lock);
188                 CDEBUG(D_NET, "RXTHREAD got [%p], head is [%p]\n", 
189                        tx, gmnalni->gmni_rxt_tx);
190                 tx->tx_rxt = 1;
191         } else {
192                 if (block) {
193                         CDEBUG(D_NET, "Attempting to get token\n");
194                         down(&gmnalni->gmni_tx_token);
195                         CDEBUG(D_PORTALS, "Got token\n");
196                 } else {
197                         if (down_trylock(&gmnalni->gmni_tx_token)) {
198                                 CERROR("can't get token\n");
199                                 return(NULL);
200                         }
201                 }
202                 spin_lock(&gmnalni->gmni_tx_lock);
203                 tx = gmnalni->gmni_tx;
204                 gmnalni->gmni_tx = tx->tx_next;
205                 spin_unlock(&gmnalni->gmni_tx_lock);
206                 CDEBUG(D_NET, "got [%p], head is [%p]\n", tx,
207                        gmnalni->gmni_tx);
208         }       /* general tx get */
209
210         return tx;
211 }
212
213 /*
214  *      Return a tx to the list
215  */
216 void
217 gmnal_return_tx(gmnal_ni_t *gmnalni, gmnal_tx_t *tx)
218 {
219         CDEBUG(D_TRACE, "gmnalni [%p], tx[%p] rxt[%d]\n", gmnalni,
220                tx, tx->tx_rxt);
221
222         /*
223          *      this transmit descriptor is 
224          *      for the rxthread
225          */
226         if (tx->tx_rxt) {
227                 spin_lock(&gmnalni->gmni_rxt_tx_lock);
228                 tx->tx_next = gmnalni->gmni_rxt_tx;
229                 gmnalni->gmni_rxt_tx = tx;
230                 spin_unlock(&gmnalni->gmni_rxt_tx_lock);
231                 up(&gmnalni->gmni_rxt_tx_token);
232                 CDEBUG(D_NET, "Returned tx to rxthread list\n");
233         } else {
234                 spin_lock(&gmnalni->gmni_tx_lock);
235                 tx->tx_next = gmnalni->gmni_tx;
236                 gmnalni->gmni_tx = tx;
237                 spin_unlock(&gmnalni->gmni_tx_lock);
238                 up(&gmnalni->gmni_tx_token);
239                 CDEBUG(D_NET, "Returned tx to general list\n");
240         }
241         return;
242 }
243
244
245 /*
246  *      allocate a number of small rx buffers and register with GM
247  *      so they are wired and set up for DMA. This is a costly operation.
248  *      Also allocate a corrosponding descriptor to keep track of 
249  *      the buffer.
250  *      Put all descriptors on singly linked list to be available to 
251  *      receive thread.
252  */
253 int
254 gmnal_alloc_rxs (gmnal_ni_t *gmnalni)
255 {
256         int          nrxcred = gm_num_receive_tokens(gmnalni->gmni_port);
257         int          nrx;
258         int          i;
259         gmnal_rx_t  *rxd;
260         void        *rxbuffer;
261
262         CWARN("nrxcred: %d\n", nrxcred);
263
264         nrx = num_txds*2 + 2;
265         if (nrx > nrxcred) {
266                 CERROR("Can't allocate %d rx credits: (%d available)\n",
267                        nrx, nrxcred);
268                 return -ENOMEM;
269         }
270
271         CDEBUG(D_NET, "Allocated [%d] receive tokens to small messages\n", nrx);
272
273         gmnalni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs, 
274                                                gm_hash_hash_ptr, 0, 0, nrx, 0);
275         if (gmnalni->gmni_rx_hash == NULL) {
276                 CERROR("Failed to create hash table\n");
277                 return -ENOMEM;
278         }
279
280         LASSERT (gmnalni->gmni_rx == NULL);
281
282         for (i=0; i <= nrx; i++) {
283
284                 PORTAL_ALLOC(rxd, sizeof(*rxd));
285                 if (rxd == NULL) {
286                         CERROR("Failed to malloc rxd [%d]\n", i);
287                         return -ENOMEM;
288                 }
289
290                 rxbuffer = gm_dma_malloc(gmnalni->gmni_port, 
291                                          gmnalni->gmni_msg_size);
292                 if (rxbuffer == NULL) {
293                         CERROR("Failed to gm_dma_malloc rxbuffer [%d], "
294                                "size [%d]\n",i ,gmnalni->gmni_msg_size);
295                         PORTAL_FREE(rxd, sizeof(*rxd));
296                         return -ENOMEM;
297                 }
298
299                 rxd->rx_msg = (gmnal_msg_t *)rxbuffer;
300                 rxd->rx_size = gmnalni->gmni_msg_size;
301                 rxd->rx_gmsize = gm_min_size_for_length(rxd->rx_size);
302
303                 rxd->rx_next = gmnalni->gmni_rx;
304                 gmnalni->gmni_rx = rxd;
305
306                 if (gm_hash_insert(gmnalni->gmni_rx_hash,
307                                    (void*)rxbuffer, (void*)rxd)) {
308                         CERROR("failed to create hash entry rxd[%p] "
309                                "for rxbuffer[%p]\n", rxd, rxbuffer);
310                         return -ENOMEM;
311                 }
312
313                 CDEBUG(D_NET, "Registered rxd [%p] with buffer [%p], "
314                        "size [%d]\n", rxd, rxd->rx_msg, rxd->rx_size);
315         }
316
317         return 0;
318 }
319
320 void
321 gmnal_free_rxs(gmnal_ni_t *gmnalni)
322 {
323         gmnal_rx_t *rx;
324
325         CDEBUG(D_TRACE, "gmnal_free_small rx\n");
326
327         while ((rx = gmnalni->gmni_rx) != NULL) {
328                 gmnalni->gmni_rx = rx->rx_next;
329
330                 CDEBUG(D_NET, "Freeing rxd [%p] buffer [%p], size [%d]\n",
331                        rx, rx->rx_msg, rx->rx_size);
332 #if 0
333                 /* We free buffers after we've shutdown the GM port */
334                 gm_dma_free(gmnalni->gmni_port, _rxd->rx_msg);
335 #endif
336                 PORTAL_FREE(rx, sizeof(*rx));
337         }
338
339 #if 0
340         /* see above */
341         if (gmnalni->gmni_rx_hash != NULL)
342                 gm_destroy_hash(gmnalni->gmni_rx_hash);
343 #endif
344 }
345
346 void
347 gmnal_stop_threads(gmnal_ni_t *gmnalni)
348 {
349         int count = 2;
350         int i;
351
352         gmnalni->gmni_thread_shutdown = 1;
353
354         /* wake ctthread with an alarm */
355         spin_lock(&gmnalni->gmni_gm_lock);
356         gm_set_alarm(gmnalni->gmni_port, &gmnalni->gmni_ctthread_alarm, 
357                      0, NULL, NULL);
358         spin_unlock(&gmnalni->gmni_gm_lock);
359
360         /* wake each rxthread */
361         for (i = 0; i < num_online_cpus(); i++)
362                 up(&gmnalni->gmni_rxq_wait);
363         
364         while (atomic_read(&gmnalni->gmni_nthreads) != 0) {
365                 count++;
366                 if ((count & (count - 1)) == 0)
367                         CWARN("Waiting for %d threads to stop\n",
368                               atomic_read(&gmnalni->gmni_nthreads));
369                 gmnal_yield(1);
370         }
371 }
372
373 /*
374  *      Start the caretaker thread and a number of receiver threads
375  *      The caretaker thread gets events from the gm library.
376  *      It passes receive events to the receiver threads via a work list.
377  *      It processes other events itself in gm_unknown. These will be
378  *      callback events or sleeps.
379  */
380 int
381 gmnal_start_threads(gmnal_ni_t *gmnalni)
382 {
383         int     i;
384         int     pid;
385
386         gmnalni->gmni_thread_shutdown = 0;
387         gmnalni->gmni_nrxthreads = 0;
388         atomic_set(&gmnalni->gmni_nthreads, 0);
389
390         INIT_LIST_HEAD(&gmnalni->gmni_rxq);
391         spin_lock_init(&gmnalni->gmni_rxq_lock);
392         sema_init(&gmnalni->gmni_rxq_wait, 0);
393
394         /*
395          *      the alarm is used to wake the caretaker thread from 
396          *      gm_unknown call (sleeping) to exit it.
397          */
398         CDEBUG(D_NET, "Initializing caretaker thread alarm and flag\n");
399         gm_initialize_alarm(&gmnalni->gmni_ctthread_alarm);
400
401         pid = kernel_thread(gmnal_ct_thread, (void*)gmnalni, 0);
402         if (pid < 0) {
403                 CERROR("Caretaker thread failed to start: %d\n", pid);
404                 return pid;
405         }
406         atomic_inc(&gmnalni->gmni_nthreads);
407
408         for (i = 0; i < num_online_cpus(); i++) {
409
410                 pid = kernel_thread(gmnal_rx_thread, (void*)gmnalni, 0);
411                 if (pid < 0) {
412                         CERROR("rx thread failed to start: %d\n", pid);
413                         gmnal_stop_threads(gmnalni);
414                         return pid;
415                 }
416
417                 atomic_inc(&gmnalni->gmni_nthreads);
418                 gmnalni->gmni_rxthread_pid[i] = pid;
419                 gmnalni->gmni_nrxthreads++;
420         }
421
422         return 0;
423 }
424
425 char * 
426 gmnal_gmstatus2str(gm_status_t status)
427 {
428         return(gm_strerror(status));
429
430         switch(status) {
431         case(GM_SUCCESS):
432                 return("SUCCESS");
433         case(GM_FAILURE):
434                 return("FAILURE");
435         case(GM_INPUT_BUFFER_TOO_SMALL):
436                 return("INPUT_BUFFER_TOO_SMALL");
437         case(GM_OUTPUT_BUFFER_TOO_SMALL):
438                 return("OUTPUT_BUFFER_TOO_SMALL");
439         case(GM_TRY_AGAIN ):
440                 return("TRY_AGAIN");
441         case(GM_BUSY):
442                 return("BUSY");
443         case(GM_MEMORY_FAULT):
444                 return("MEMORY_FAULT");
445         case(GM_INTERRUPTED):
446                 return("INTERRUPTED");
447         case(GM_INVALID_PARAMETER):
448                 return("INVALID_PARAMETER");
449         case(GM_OUT_OF_MEMORY):
450                 return("OUT_OF_MEMORY");
451         case(GM_INVALID_COMMAND):
452                 return("INVALID_COMMAND");
453         case(GM_PERMISSION_DENIED):
454                 return("PERMISSION_DENIED");
455         case(GM_INTERNAL_ERROR):
456                 return("INTERNAL_ERROR");
457         case(GM_UNATTACHED):
458                 return("UNATTACHED");
459         case(GM_UNSUPPORTED_DEVICE):
460                 return("UNSUPPORTED_DEVICE");
461         case(GM_SEND_TIMED_OUT):
462                 return("GM_SEND_TIMEDOUT");
463         case(GM_SEND_REJECTED):
464                 return("GM_SEND_REJECTED");
465         case(GM_SEND_TARGET_PORT_CLOSED):
466                 return("GM_SEND_TARGET_PORT_CLOSED");
467         case(GM_SEND_TARGET_NODE_UNREACHABLE):
468                 return("GM_SEND_TARGET_NODE_UNREACHABLE");
469         case(GM_SEND_DROPPED):
470                 return("GM_SEND_DROPPED");
471         case(GM_SEND_PORT_CLOSED):
472                 return("GM_SEND_PORT_CLOSED");
473         case(GM_NODE_ID_NOT_YET_SET):
474                 return("GM_NODE_ID_NOT_YET_SET");
475         case(GM_STILL_SHUTTING_DOWN):
476                 return("GM_STILL_SHUTTING_DOWN");
477         case(GM_CLONE_BUSY):
478                 return("GM_CLONE_BUSY");
479         case(GM_NO_SUCH_DEVICE):
480                 return("GM_NO_SUCH_DEVICE");
481         case(GM_ABORTED):
482                 return("GM_ABORTED");
483         case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
484                 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
485         case(GM_UNTRANSLATED_SYSTEM_ERROR):
486                 return("GM_UNTRANSLATED_SYSTEM_ERROR");
487         case(GM_ACCESS_DENIED):
488                 return("GM_ACCESS_DENIED");
489
490         
491         /*
492          *      These ones are in the docs but aren't in the header file 
493          case(GM_DEV_NOT_FOUND):
494          return("GM_DEV_NOT_FOUND");
495          case(GM_INVALID_PORT_NUMBER):
496          return("GM_INVALID_PORT_NUMBER");
497          case(GM_UC_ERROR):
498          return("GM_US_ERROR");
499          case(GM_PAGE_TABLE_FULL):
500          return("GM_PAGE_TABLE_FULL");
501          case(GM_MINOR_OVERFLOW):
502          return("GM_MINOR_OVERFLOW");
503          case(GM_SEND_ORPHANED):
504          return("GM_SEND_ORPHANED");
505          case(GM_HARDWARE_FAULT):
506          return("GM_HARDWARE_FAULT");
507          case(GM_DATA_CORRUPTED):
508          return("GM_DATA_CORRUPTED");
509          case(GM_TIMED_OUT):
510          return("GM_TIMED_OUT");
511          case(GM_USER_ERROR):
512          return("GM_USER_ERROR");
513          case(GM_NO_MATCH):
514          return("GM_NOMATCH");
515          case(GM_NOT_SUPPORTED_IN_KERNEL):
516          return("GM_NOT_SUPPORTED_IN_KERNEL");
517          case(GM_NOT_SUPPORTED_ON_ARCH):
518          return("GM_NOT_SUPPORTED_ON_ARCH");
519          case(GM_PTE_REF_CNT_OVERFLOW):
520          return("GM_PTR_REF_CNT_OVERFLOW");
521          case(GM_NO_DRIVER_SUPPORT):
522          return("GM_NO_DRIVER_SUPPORT");
523          case(GM_FIRMWARE_NOT_RUNNING):
524          return("GM_FIRMWARE_NOT_RUNNING");
525          *      These ones are in the docs but aren't in the header file 
526          */
527
528         default:
529                 return("UNKNOWN GM ERROR CODE");
530         }
531 }
532
533
534 char *
535 gmnal_rxevent2str(gm_recv_event_t *ev)
536 {
537         short   event;
538         event = GM_RECV_EVENT_TYPE(ev);
539         switch(event) {
540         case(GM_NO_RECV_EVENT):
541                 return("GM_NO_RECV_EVENT");
542         case(GM_SENDS_FAILED_EVENT):
543                 return("GM_SEND_FAILED_EVENT");
544         case(GM_ALARM_EVENT):
545                 return("GM_ALARM_EVENT");
546         case(GM_SENT_EVENT):
547                 return("GM_SENT_EVENT");
548         case(_GM_SLEEP_EVENT):
549                 return("_GM_SLEEP_EVENT");
550         case(GM_RAW_RECV_EVENT):
551                 return("GM_RAW_RECV_EVENT");
552         case(GM_BAD_SEND_DETECTED_EVENT):
553                 return("GM_BAD_SEND_DETECTED_EVENT");
554         case(GM_SEND_TOKEN_VIOLATION_EVENT):
555                 return("GM_SEND_TOKEN_VIOLATION_EVENT");
556         case(GM_RECV_TOKEN_VIOLATION_EVENT):
557                 return("GM_RECV_TOKEN_VIOLATION_EVENT");
558         case(GM_BAD_RECV_TOKEN_EVENT):
559                 return("GM_BAD_RECV_TOKEN_EVENT");
560         case(GM_ALARM_VIOLATION_EVENT):
561                 return("GM_ALARM_VIOLATION_EVENT");
562         case(GM_RECV_EVENT):
563                 return("GM_RECV_EVENT");
564         case(GM_HIGH_RECV_EVENT):
565                 return("GM_HIGH_RECV_EVENT");
566         case(GM_PEER_RECV_EVENT):
567                 return("GM_PEER_RECV_EVENT");
568         case(GM_HIGH_PEER_RECV_EVENT):
569                 return("GM_HIGH_PEER_RECV_EVENT");
570         case(GM_FAST_RECV_EVENT):
571                 return("GM_FAST_RECV_EVENT");
572         case(GM_FAST_HIGH_RECV_EVENT):
573                 return("GM_FAST_HIGH_RECV_EVENT");
574         case(GM_FAST_PEER_RECV_EVENT):
575                 return("GM_FAST_PEER_RECV_EVENT");
576         case(GM_FAST_HIGH_PEER_RECV_EVENT):
577                 return("GM_FAST_HIGH_PEER_RECV_EVENT");
578         case(GM_REJECTED_SEND_EVENT):
579                 return("GM_REJECTED_SEND_EVENT");
580         case(GM_ORPHANED_SEND_EVENT):
581                 return("GM_ORPHANED_SEND_EVENT");
582         case(GM_BAD_RESEND_DETECTED_EVENT):
583                 return("GM_BAD_RESEND_DETETED_EVENT");
584         case(GM_DROPPED_SEND_EVENT):
585                 return("GM_DROPPED_SEND_EVENT");
586         case(GM_BAD_SEND_VMA_EVENT):
587                 return("GM_BAD_SEND_VMA_EVENT");
588         case(GM_BAD_RECV_VMA_EVENT):
589                 return("GM_BAD_RECV_VMA_EVENT");
590         case(_GM_FLUSHED_ALARM_EVENT):
591                 return("GM_FLUSHED_ALARM_EVENT");
592         case(GM_SENT_TOKENS_EVENT):
593                 return("GM_SENT_TOKENS_EVENTS");
594         case(GM_IGNORE_RECV_EVENT):
595                 return("GM_IGNORE_RECV_EVENT");
596         case(GM_ETHERNET_RECV_EVENT):
597                 return("GM_ETHERNET_RECV_EVENT");
598         case(GM_NEW_NO_RECV_EVENT):
599                 return("GM_NEW_NO_RECV_EVENT");
600         case(GM_NEW_SENDS_FAILED_EVENT):
601                 return("GM_NEW_SENDS_FAILED_EVENT");
602         case(GM_NEW_ALARM_EVENT):
603                 return("GM_NEW_ALARM_EVENT");
604         case(GM_NEW_SENT_EVENT):
605                 return("GM_NEW_SENT_EVENT");
606         case(_GM_NEW_SLEEP_EVENT):
607                 return("GM_NEW_SLEEP_EVENT");
608         case(GM_NEW_RAW_RECV_EVENT):
609                 return("GM_NEW_RAW_RECV_EVENT");
610         case(GM_NEW_BAD_SEND_DETECTED_EVENT):
611                 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
612         case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
613                 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
614         case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
615                 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
616         case(GM_NEW_BAD_RECV_TOKEN_EVENT):
617                 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
618         case(GM_NEW_ALARM_VIOLATION_EVENT):
619                 return("GM_NEW_ALARM_VIOLATION_EVENT");
620         case(GM_NEW_RECV_EVENT):
621                 return("GM_NEW_RECV_EVENT");
622         case(GM_NEW_HIGH_RECV_EVENT):
623                 return("GM_NEW_HIGH_RECV_EVENT");
624         case(GM_NEW_PEER_RECV_EVENT):
625                 return("GM_NEW_PEER_RECV_EVENT");
626         case(GM_NEW_HIGH_PEER_RECV_EVENT):
627                 return("GM_NEW_HIGH_PEER_RECV_EVENT");
628         case(GM_NEW_FAST_RECV_EVENT):
629                 return("GM_NEW_FAST_RECV_EVENT");
630         case(GM_NEW_FAST_HIGH_RECV_EVENT):
631                 return("GM_NEW_FAST_HIGH_RECV_EVENT");
632         case(GM_NEW_FAST_PEER_RECV_EVENT):
633                 return("GM_NEW_FAST_PEER_RECV_EVENT");
634         case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
635                 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
636         case(GM_NEW_REJECTED_SEND_EVENT):
637                 return("GM_NEW_REJECTED_SEND_EVENT");
638         case(GM_NEW_ORPHANED_SEND_EVENT):
639                 return("GM_NEW_ORPHANED_SEND_EVENT");
640         case(_GM_NEW_PUT_NOTIFICATION_EVENT):
641                 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
642         case(GM_NEW_FREE_SEND_TOKEN_EVENT):
643                 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
644         case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
645                 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
646         case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
647                 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
648         case(GM_NEW_DROPPED_SEND_EVENT):
649                 return("GM_NEW_DROPPED_SEND_EVENT");
650         case(GM_NEW_BAD_SEND_VMA_EVENT):
651                 return("GM_NEW_BAD_SEND_VMA_EVENT");
652         case(GM_NEW_BAD_RECV_VMA_EVENT):
653                 return("GM_NEW_BAD_RECV_VMA_EVENT");
654         case(_GM_NEW_FLUSHED_ALARM_EVENT):
655                 return("GM_NEW_FLUSHED_ALARM_EVENT");
656         case(GM_NEW_SENT_TOKENS_EVENT):
657                 return("GM_NEW_SENT_TOKENS_EVENT");
658         case(GM_NEW_IGNORE_RECV_EVENT):
659                 return("GM_NEW_IGNORE_RECV_EVENT");
660         case(GM_NEW_ETHERNET_RECV_EVENT):
661                 return("GM_NEW_ETHERNET_RECV_EVENT");
662         default:
663                 return("Unknown Recv event");
664         /* _GM_PUT_NOTIFICATION_EVENT */
665         /* GM_FREE_SEND_TOKEN_EVENT */
666         /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
667         }
668 }
669
670
671 void
672 gmnal_yield(int delay)
673 {
674         set_current_state(TASK_INTERRUPTIBLE);
675         schedule_timeout(delay);
676 }
677
678 int
679 gmnal_enqueue_rx(gmnal_ni_t *gmnalni, gm_recv_t *recv)
680 {
681         void       *ptr = gm_ntohp(recv->buffer);
682         gmnal_rx_t *rx = gm_hash_find(gmnalni->gmni_rx_hash, ptr);
683
684         /* No locking; hash is read-only */
685
686         LASSERT (rx != NULL);
687         LASSERT (rx->rx_msg == (gmnal_msg_t *)ptr);
688
689         rx->rx_recv_nob = gm_ntohl(recv->length);
690         rx->rx_recv_gmid = gm_ntoh_u16(recv->sender_node_id);
691         rx->rx_recv_port = gm_ntoh_u8(recv->sender_port_id);
692         rx->rx_recv_type = gm_ntoh_u8(recv->type);
693         
694         spin_lock(&gmnalni->gmni_rxq_lock);
695         list_add_tail (&rx->rx_list, &gmnalni->gmni_rxq);
696         spin_unlock(&gmnalni->gmni_rxq_lock);
697
698         up(&gmnalni->gmni_rxq_wait);
699         return 0;
700 }
701
702 gmnal_rx_t *
703 gmnal_dequeue_rx(gmnal_ni_t *gmnalni)
704 {
705         gmnal_rx_t      *rx;
706
707         CDEBUG(D_NET, "Getting entry to list\n");
708
709         for (;;) {
710                 while(down_interruptible(&gmnalni->gmni_rxq_wait) != 0)
711                         /* do nothing */;
712
713                 if (gmnalni->gmni_thread_shutdown)
714                         return NULL;
715
716                 spin_lock(&gmnalni->gmni_rxq_lock);
717
718                 if (list_empty(&gmnalni->gmni_rxq)) {
719                         rx = NULL;
720                 } else {
721                         rx = list_entry(gmnalni->gmni_rxq.next,
722                                         gmnal_rx_t, rx_list);
723                         list_del(&rx->rx_list);
724                 }
725
726                 spin_unlock(&gmnalni->gmni_rxq_lock);
727
728                 if (rx != NULL)
729                         return rx;
730                 
731                 CWARN("woken but no work\n");
732         }
733 }
734
735