Whamcloud - gitweb
e007a32ee2f1d6d011089c0f8544935903f82adf
[fs/lustre-release.git] / lnet / klnds / gmlnd / gmlnd_utils.c
1 /*
2  * -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
3  * vim:expandtab:shiftwidth=8:tabstop=8:
4  *
5  * GPL HEADER START
6  *
7  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 only,
11  * as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License version 2 for more details (a copy is included
17  * in the LICENSE file that accompanied this code).
18  *
19  * You should have received a copy of the GNU General Public License
20  * version 2 along with this program; If not, see [sun.com URL with a
21  * copy of GPLv2].
22  *
23  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
24  * CA 95054 USA or visit www.sun.com if you need additional information or
25  * have any questions.
26  *
27  * GPL HEADER END
28  */
29 /*
30  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
31  * Use is subject to license terms.
32  *
33  * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  */
39
40 #include "gmlnd.h"
41
42 void
43 gmnal_free_netbuf_pages (gmnal_netbuf_t *nb, int npages) 
44 {
45         int     i;
46         
47         for (i = 0; i < npages; i++)
48                 __free_page(nb->nb_kiov[i].kiov_page);
49 }
50
51 int
52 gmnal_alloc_netbuf_pages (gmnal_ni_t *gmni, gmnal_netbuf_t *nb, int npages)
53 {
54         int          i;
55         gm_status_t  gmrc;
56
57         LASSERT (npages > 0);
58
59         for (i = 0; i < npages; i++) {
60                 nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
61                 nb->nb_kiov[i].kiov_offset = 0;
62                 nb->nb_kiov[i].kiov_len = PAGE_SIZE;
63
64                 if (nb->nb_kiov[i].kiov_page == NULL) {
65                         CERROR("Can't allocate page\n");
66                         gmnal_free_netbuf_pages(nb, i);
67                         return -ENOMEM;
68                 }
69
70                 CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
71                        i, nb->nb_kiov[i].kiov_page, 
72                        lnet_page2phys(nb->nb_kiov[i].kiov_page),
73                        gmni->gmni_netaddr_base);
74
75                 gmrc = gm_register_memory_ex_phys(
76                         gmni->gmni_port,
77                         lnet_page2phys(nb->nb_kiov[i].kiov_page),
78                         PAGE_SIZE,
79                         gmni->gmni_netaddr_base);
80                 CDEBUG(D_NET,"[%3d] page %p: %d\n", 
81                        i, nb->nb_kiov[i].kiov_page, gmrc);
82
83                 if (gmrc != GM_SUCCESS) {
84                         CERROR("Can't map page: %d(%s)\n", gmrc,
85                                gmnal_gmstatus2str(gmrc));
86                         gmnal_free_netbuf_pages(nb, i+1);
87                         return -ENOMEM;
88                 }
89
90                 if (i == 0) 
91                         nb->nb_netaddr = gmni->gmni_netaddr_base;
92
93                 gmni->gmni_netaddr_base += PAGE_SIZE;
94         }
95
96         return 0;
97 }
98
99 void
100 gmnal_free_ltxbuf (gmnal_ni_t *gmni, gmnal_txbuf_t *txb)
101 {
102         int            npages = gmni->gmni_large_pages;
103
104         LASSERT (gmni->gmni_port == NULL);
105         /* No unmapping; the port has been closed */
106
107         gmnal_free_netbuf_pages(&txb->txb_buf, gmni->gmni_large_pages);
108         LIBCFS_FREE(txb, offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]));
109 }
110
111 int
112 gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
113 {
114         int            npages = gmni->gmni_large_pages;
115         int            sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
116         gmnal_txbuf_t *txb;
117         int            rc;
118
119         LIBCFS_ALLOC(txb, sz);
120         if (txb == NULL) {
121                 CERROR("Can't allocate large txbuffer\n");
122                 return -ENOMEM;
123         }
124
125         rc = gmnal_alloc_netbuf_pages(gmni, &txb->txb_buf, npages);
126         if (rc != 0) {
127                 LIBCFS_FREE(txb, sz);
128                 return rc;
129         }
130
131         list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
132
133         txb->txb_next = gmni->gmni_ltxbs;
134         gmni->gmni_ltxbs = txb;
135
136         return 0;
137 }
138
139 void
140 gmnal_free_tx (gmnal_tx_t *tx)
141 {
142         LASSERT (tx->tx_gmni->gmni_port == NULL);
143
144         gmnal_free_netbuf_pages(&tx->tx_buf, 1);
145         LIBCFS_FREE(tx, sizeof(*tx));
146 }
147
148 int
149 gmnal_alloc_tx (gmnal_ni_t *gmni) 
150 {
151         gmnal_tx_t  *tx;
152         int          rc;
153         
154         LIBCFS_ALLOC(tx, sizeof(*tx));
155         if (tx == NULL) {
156                 CERROR("Failed to allocate tx\n");
157                 return -ENOMEM;
158         }
159         
160         memset(tx, 0, sizeof(*tx));
161
162         rc = gmnal_alloc_netbuf_pages(gmni, &tx->tx_buf, 1);
163         if (rc != 0) {
164                 LIBCFS_FREE(tx, sizeof(*tx));
165                 return -ENOMEM;
166         }
167
168         tx->tx_gmni = gmni;
169         
170         list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
171
172         tx->tx_next = gmni->gmni_txs;
173         gmni->gmni_txs = tx;
174                 
175         return 0;
176 }
177
178 void
179 gmnal_free_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
180 {
181         int   npages = rx->rx_islarge ? gmni->gmni_large_pages : 1;
182         
183         LASSERT (gmni->gmni_port == NULL);
184
185         gmnal_free_netbuf_pages(&rx->rx_buf, npages);
186         LIBCFS_FREE(rx, offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]));
187 }
188
189 int
190 gmnal_alloc_rx (gmnal_ni_t *gmni, int islarge)
191 {
192         int         npages = islarge ? gmni->gmni_large_pages : 1;
193         int         sz = offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]);
194         int         rc;
195         gmnal_rx_t *rx;
196         gm_status_t gmrc;
197         
198         LIBCFS_ALLOC(rx, sz);
199         if (rx == NULL) {
200                 CERROR("Failed to allocate rx\n");
201                 return -ENOMEM;
202         }
203         
204         memset(rx, 0, sizeof(*rx));
205
206         rc = gmnal_alloc_netbuf_pages(gmni, &rx->rx_buf, npages);
207         if (rc != 0) {
208                 LIBCFS_FREE(rx, sz);
209                 return rc;
210         }
211         
212         rx->rx_islarge = islarge;
213         rx->rx_next = gmni->gmni_rxs;
214         gmni->gmni_rxs = rx;
215
216         gmrc = gm_hash_insert(gmni->gmni_rx_hash, 
217                               GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf), rx);
218         if (gmrc != GM_SUCCESS) {
219                 CERROR("Couldn't add rx to hash table: %d\n", gmrc);
220                 return -ENOMEM;
221         }
222         
223         return 0;
224 }
225
226 void
227 gmnal_free_ltxbufs (gmnal_ni_t *gmni)
228 {
229         gmnal_txbuf_t *txb;
230         
231         while ((txb = gmni->gmni_ltxbs) != NULL) {
232                 gmni->gmni_ltxbs = txb->txb_next;
233                 gmnal_free_ltxbuf(gmni, txb);
234         }
235 }
236
237 int
238 gmnal_alloc_ltxbufs (gmnal_ni_t *gmni)
239 {
240         int     nlarge_tx_bufs = *gmnal_tunables.gm_nlarge_tx_bufs;
241         int     i;
242         int     rc;
243
244         for (i = 0; i < nlarge_tx_bufs; i++) {
245                 rc = gmnal_alloc_ltxbuf(gmni);
246
247                 if (rc != 0)
248                         return rc;
249         }
250
251         return 0;
252 }
253
254 void
255 gmnal_free_txs(gmnal_ni_t *gmni)
256 {
257         gmnal_tx_t *tx;
258
259         while ((tx = gmni->gmni_txs) != NULL) {
260                 gmni->gmni_txs = tx->tx_next;
261                 gmnal_free_tx (tx);
262         }
263 }
264
265 int
266 gmnal_alloc_txs(gmnal_ni_t *gmni)
267 {
268         int           ntxcred = gm_num_send_tokens(gmni->gmni_port);
269         int           ntx = *gmnal_tunables.gm_ntx;
270         int           i;
271         int           rc;
272
273         CDEBUG(D_NET, "ntxcred: %d\n", ntxcred);
274         gmni->gmni_tx_credits = ntxcred;
275
276         for (i = 0; i < ntx; i++) {
277                 rc = gmnal_alloc_tx(gmni);
278                 if (rc != 0)
279                         return rc;
280         }
281
282         return 0;
283 }
284
285 void
286 gmnal_free_rxs(gmnal_ni_t *gmni)
287 {
288         gmnal_rx_t *rx;
289
290         while ((rx = gmni->gmni_rxs) != NULL) {
291                 gmni->gmni_rxs = rx->rx_next;
292
293                 gmnal_free_rx(gmni, rx);
294         }
295
296         LASSERT (gmni->gmni_port == NULL);
297 #if 0
298         /* GM releases all resources allocated to a port when it closes */
299         if (gmni->gmni_rx_hash != NULL)
300                 gm_destroy_hash(gmni->gmni_rx_hash);
301 #endif
302 }
303
304 int
305 gmnal_alloc_rxs (gmnal_ni_t *gmni)
306 {
307         int          nrxcred = gm_num_receive_tokens(gmni->gmni_port);
308         int          nrx_small = *gmnal_tunables.gm_nrx_small;
309         int          nrx_large = *gmnal_tunables.gm_nrx_large;
310         int          nrx = nrx_large + nrx_small;
311         int          rc;
312         int          i;
313
314         CDEBUG(D_NET, "nrxcred: %d(%dL+%dS)\n", nrxcred, nrx_large, nrx_small);
315
316         if (nrx > nrxcred) {
317                 int nlarge = (nrx_large * nrxcred)/nrx;
318                 int nsmall = nrxcred - nlarge;
319                 
320                 CWARN("Only %d rx credits: "
321                       "reducing large %d->%d, small %d->%d\n", nrxcred,
322                       nrx_large, nlarge, nrx_small, nsmall);
323                 
324                 *gmnal_tunables.gm_nrx_large = nrx_large = nlarge;
325                 *gmnal_tunables.gm_nrx_small = nrx_small = nsmall;
326                 nrx = nlarge + nsmall;
327         }
328         
329         gmni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs, 
330                                             gm_hash_hash_ptr, 0, 0, nrx, 0);
331         if (gmni->gmni_rx_hash == NULL) {
332                 CERROR("Failed to create hash table\n");
333                 return -ENOMEM;
334         }
335
336         for (i = 0; i < nrx; i++ ) {
337                 rc = gmnal_alloc_rx(gmni, i < nrx_large);
338                 if (rc != 0)
339                         return rc;
340         }
341
342         return 0;
343 }
344
345 char * 
346 gmnal_gmstatus2str(gm_status_t status)
347 {
348         return(gm_strerror(status));
349
350         switch(status) {
351         case(GM_SUCCESS):
352                 return("SUCCESS");
353         case(GM_FAILURE):
354                 return("FAILURE");
355         case(GM_INPUT_BUFFER_TOO_SMALL):
356                 return("INPUT_BUFFER_TOO_SMALL");
357         case(GM_OUTPUT_BUFFER_TOO_SMALL):
358                 return("OUTPUT_BUFFER_TOO_SMALL");
359         case(GM_TRY_AGAIN ):
360                 return("TRY_AGAIN");
361         case(GM_BUSY):
362                 return("BUSY");
363         case(GM_MEMORY_FAULT):
364                 return("MEMORY_FAULT");
365         case(GM_INTERRUPTED):
366                 return("INTERRUPTED");
367         case(GM_INVALID_PARAMETER):
368                 return("INVALID_PARAMETER");
369         case(GM_OUT_OF_MEMORY):
370                 return("OUT_OF_MEMORY");
371         case(GM_INVALID_COMMAND):
372                 return("INVALID_COMMAND");
373         case(GM_PERMISSION_DENIED):
374                 return("PERMISSION_DENIED");
375         case(GM_INTERNAL_ERROR):
376                 return("INTERNAL_ERROR");
377         case(GM_UNATTACHED):
378                 return("UNATTACHED");
379         case(GM_UNSUPPORTED_DEVICE):
380                 return("UNSUPPORTED_DEVICE");
381         case(GM_SEND_TIMED_OUT):
382                 return("GM_SEND_TIMEDOUT");
383         case(GM_SEND_REJECTED):
384                 return("GM_SEND_REJECTED");
385         case(GM_SEND_TARGET_PORT_CLOSED):
386                 return("GM_SEND_TARGET_PORT_CLOSED");
387         case(GM_SEND_TARGET_NODE_UNREACHABLE):
388                 return("GM_SEND_TARGET_NODE_UNREACHABLE");
389         case(GM_SEND_DROPPED):
390                 return("GM_SEND_DROPPED");
391         case(GM_SEND_PORT_CLOSED):
392                 return("GM_SEND_PORT_CLOSED");
393         case(GM_NODE_ID_NOT_YET_SET):
394                 return("GM_NODE_ID_NOT_YET_SET");
395         case(GM_STILL_SHUTTING_DOWN):
396                 return("GM_STILL_SHUTTING_DOWN");
397         case(GM_CLONE_BUSY):
398                 return("GM_CLONE_BUSY");
399         case(GM_NO_SUCH_DEVICE):
400                 return("GM_NO_SUCH_DEVICE");
401         case(GM_ABORTED):
402                 return("GM_ABORTED");
403         case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
404                 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
405         case(GM_UNTRANSLATED_SYSTEM_ERROR):
406                 return("GM_UNTRANSLATED_SYSTEM_ERROR");
407         case(GM_ACCESS_DENIED):
408                 return("GM_ACCESS_DENIED");
409
410         
411         /*
412          *      These ones are in the docs but aren't in the header file 
413          case(GM_DEV_NOT_FOUND):
414          return("GM_DEV_NOT_FOUND");
415          case(GM_INVALID_PORT_NUMBER):
416          return("GM_INVALID_PORT_NUMBER");
417          case(GM_UC_ERROR):
418          return("GM_US_ERROR");
419          case(GM_PAGE_TABLE_FULL):
420          return("GM_PAGE_TABLE_FULL");
421          case(GM_MINOR_OVERFLOW):
422          return("GM_MINOR_OVERFLOW");
423          case(GM_SEND_ORPHANED):
424          return("GM_SEND_ORPHANED");
425          case(GM_HARDWARE_FAULT):
426          return("GM_HARDWARE_FAULT");
427          case(GM_DATA_CORRUPTED):
428          return("GM_DATA_CORRUPTED");
429          case(GM_TIMED_OUT):
430          return("GM_TIMED_OUT");
431          case(GM_USER_ERROR):
432          return("GM_USER_ERROR");
433          case(GM_NO_MATCH):
434          return("GM_NOMATCH");
435          case(GM_NOT_SUPPORTED_IN_KERNEL):
436          return("GM_NOT_SUPPORTED_IN_KERNEL");
437          case(GM_NOT_SUPPORTED_ON_ARCH):
438          return("GM_NOT_SUPPORTED_ON_ARCH");
439          case(GM_PTE_REF_CNT_OVERFLOW):
440          return("GM_PTR_REF_CNT_OVERFLOW");
441          case(GM_NO_DRIVER_SUPPORT):
442          return("GM_NO_DRIVER_SUPPORT");
443          case(GM_FIRMWARE_NOT_RUNNING):
444          return("GM_FIRMWARE_NOT_RUNNING");
445          *      These ones are in the docs but aren't in the header file 
446          */
447
448         default:
449                 return("UNKNOWN GM ERROR CODE");
450         }
451 }
452
453
454 char *
455 gmnal_rxevent2str(gm_recv_event_t *ev)
456 {
457         short   event;
458         event = GM_RECV_EVENT_TYPE(ev);
459         switch(event) {
460         case(GM_NO_RECV_EVENT):
461                 return("GM_NO_RECV_EVENT");
462         case(GM_SENDS_FAILED_EVENT):
463                 return("GM_SEND_FAILED_EVENT");
464         case(GM_ALARM_EVENT):
465                 return("GM_ALARM_EVENT");
466         case(GM_SENT_EVENT):
467                 return("GM_SENT_EVENT");
468         case(_GM_SLEEP_EVENT):
469                 return("_GM_SLEEP_EVENT");
470         case(GM_RAW_RECV_EVENT):
471                 return("GM_RAW_RECV_EVENT");
472         case(GM_BAD_SEND_DETECTED_EVENT):
473                 return("GM_BAD_SEND_DETECTED_EVENT");
474         case(GM_SEND_TOKEN_VIOLATION_EVENT):
475                 return("GM_SEND_TOKEN_VIOLATION_EVENT");
476         case(GM_RECV_TOKEN_VIOLATION_EVENT):
477                 return("GM_RECV_TOKEN_VIOLATION_EVENT");
478         case(GM_BAD_RECV_TOKEN_EVENT):
479                 return("GM_BAD_RECV_TOKEN_EVENT");
480         case(GM_ALARM_VIOLATION_EVENT):
481                 return("GM_ALARM_VIOLATION_EVENT");
482         case(GM_RECV_EVENT):
483                 return("GM_RECV_EVENT");
484         case(GM_HIGH_RECV_EVENT):
485                 return("GM_HIGH_RECV_EVENT");
486         case(GM_PEER_RECV_EVENT):
487                 return("GM_PEER_RECV_EVENT");
488         case(GM_HIGH_PEER_RECV_EVENT):
489                 return("GM_HIGH_PEER_RECV_EVENT");
490         case(GM_FAST_RECV_EVENT):
491                 return("GM_FAST_RECV_EVENT");
492         case(GM_FAST_HIGH_RECV_EVENT):
493                 return("GM_FAST_HIGH_RECV_EVENT");
494         case(GM_FAST_PEER_RECV_EVENT):
495                 return("GM_FAST_PEER_RECV_EVENT");
496         case(GM_FAST_HIGH_PEER_RECV_EVENT):
497                 return("GM_FAST_HIGH_PEER_RECV_EVENT");
498         case(GM_REJECTED_SEND_EVENT):
499                 return("GM_REJECTED_SEND_EVENT");
500         case(GM_ORPHANED_SEND_EVENT):
501                 return("GM_ORPHANED_SEND_EVENT");
502         case(GM_BAD_RESEND_DETECTED_EVENT):
503                 return("GM_BAD_RESEND_DETETED_EVENT");
504         case(GM_DROPPED_SEND_EVENT):
505                 return("GM_DROPPED_SEND_EVENT");
506         case(GM_BAD_SEND_VMA_EVENT):
507                 return("GM_BAD_SEND_VMA_EVENT");
508         case(GM_BAD_RECV_VMA_EVENT):
509                 return("GM_BAD_RECV_VMA_EVENT");
510         case(_GM_FLUSHED_ALARM_EVENT):
511                 return("GM_FLUSHED_ALARM_EVENT");
512         case(GM_SENT_TOKENS_EVENT):
513                 return("GM_SENT_TOKENS_EVENTS");
514         case(GM_IGNORE_RECV_EVENT):
515                 return("GM_IGNORE_RECV_EVENT");
516         case(GM_ETHERNET_RECV_EVENT):
517                 return("GM_ETHERNET_RECV_EVENT");
518         case(GM_NEW_NO_RECV_EVENT):
519                 return("GM_NEW_NO_RECV_EVENT");
520         case(GM_NEW_SENDS_FAILED_EVENT):
521                 return("GM_NEW_SENDS_FAILED_EVENT");
522         case(GM_NEW_ALARM_EVENT):
523                 return("GM_NEW_ALARM_EVENT");
524         case(GM_NEW_SENT_EVENT):
525                 return("GM_NEW_SENT_EVENT");
526         case(_GM_NEW_SLEEP_EVENT):
527                 return("GM_NEW_SLEEP_EVENT");
528         case(GM_NEW_RAW_RECV_EVENT):
529                 return("GM_NEW_RAW_RECV_EVENT");
530         case(GM_NEW_BAD_SEND_DETECTED_EVENT):
531                 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
532         case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
533                 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
534         case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
535                 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
536         case(GM_NEW_BAD_RECV_TOKEN_EVENT):
537                 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
538         case(GM_NEW_ALARM_VIOLATION_EVENT):
539                 return("GM_NEW_ALARM_VIOLATION_EVENT");
540         case(GM_NEW_RECV_EVENT):
541                 return("GM_NEW_RECV_EVENT");
542         case(GM_NEW_HIGH_RECV_EVENT):
543                 return("GM_NEW_HIGH_RECV_EVENT");
544         case(GM_NEW_PEER_RECV_EVENT):
545                 return("GM_NEW_PEER_RECV_EVENT");
546         case(GM_NEW_HIGH_PEER_RECV_EVENT):
547                 return("GM_NEW_HIGH_PEER_RECV_EVENT");
548         case(GM_NEW_FAST_RECV_EVENT):
549                 return("GM_NEW_FAST_RECV_EVENT");
550         case(GM_NEW_FAST_HIGH_RECV_EVENT):
551                 return("GM_NEW_FAST_HIGH_RECV_EVENT");
552         case(GM_NEW_FAST_PEER_RECV_EVENT):
553                 return("GM_NEW_FAST_PEER_RECV_EVENT");
554         case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
555                 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
556         case(GM_NEW_REJECTED_SEND_EVENT):
557                 return("GM_NEW_REJECTED_SEND_EVENT");
558         case(GM_NEW_ORPHANED_SEND_EVENT):
559                 return("GM_NEW_ORPHANED_SEND_EVENT");
560         case(_GM_NEW_PUT_NOTIFICATION_EVENT):
561                 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
562         case(GM_NEW_FREE_SEND_TOKEN_EVENT):
563                 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
564         case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
565                 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
566         case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
567                 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
568         case(GM_NEW_DROPPED_SEND_EVENT):
569                 return("GM_NEW_DROPPED_SEND_EVENT");
570         case(GM_NEW_BAD_SEND_VMA_EVENT):
571                 return("GM_NEW_BAD_SEND_VMA_EVENT");
572         case(GM_NEW_BAD_RECV_VMA_EVENT):
573                 return("GM_NEW_BAD_RECV_VMA_EVENT");
574         case(_GM_NEW_FLUSHED_ALARM_EVENT):
575                 return("GM_NEW_FLUSHED_ALARM_EVENT");
576         case(GM_NEW_SENT_TOKENS_EVENT):
577                 return("GM_NEW_SENT_TOKENS_EVENT");
578         case(GM_NEW_IGNORE_RECV_EVENT):
579                 return("GM_NEW_IGNORE_RECV_EVENT");
580         case(GM_NEW_ETHERNET_RECV_EVENT):
581                 return("GM_NEW_ETHERNET_RECV_EVENT");
582         default:
583                 return("Unknown Recv event");
584         /* _GM_PUT_NOTIFICATION_EVENT */
585         /* GM_FREE_SEND_TOKEN_EVENT */
586         /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
587         }
588 }
589
590
591 void
592 gmnal_yield(int delay)
593 {
594         set_current_state(TASK_INTERRUPTIBLE);
595         schedule_timeout(delay);
596 }