Whamcloud - gitweb
Landing b_hd_newconfig on HEAD
[fs/lustre-release.git] / lnet / klnds / gmlnd / gmlnd_utils.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Los Alamos National Laboratory (LANL)
5  *
6  *   This file is part of Lustre, http://www.lustre.org/
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include "gmlnd.h"
23
24 void
25 gmnal_free_netbuf_pages (gmnal_netbuf_t *nb, int npages) 
26 {
27         int     i;
28         
29         for (i = 0; i < npages; i++)
30                 __free_page(nb->nb_kiov[i].kiov_page);
31 }
32
33 int
34 gmnal_alloc_netbuf_pages (gmnal_ni_t *gmni, gmnal_netbuf_t *nb, int npages)
35 {
36         int          i;
37         gm_status_t  gmrc;
38
39         LASSERT (npages > 0);
40
41         for (i = 0; i < npages; i++) {
42                 
43                 nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
44                 nb->nb_kiov[i].kiov_offset = 0;
45                 nb->nb_kiov[i].kiov_len = PAGE_SIZE;
46
47                 if (nb->nb_kiov[i].kiov_page == NULL) {
48                         CERROR("Can't allocate page\n");
49                         gmnal_free_netbuf_pages(nb, i);
50                         return -ENOMEM;
51                 }
52
53                 CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
54                        i, nb->nb_kiov[i].kiov_page, 
55                        lnet_page2phys(nb->nb_kiov[i].kiov_page),
56                        gmni->gmni_netaddr_base);
57
58                 gmrc = gm_register_memory_ex_phys(
59                         gmni->gmni_port,
60                         lnet_page2phys(nb->nb_kiov[i].kiov_page),
61                         PAGE_SIZE,
62                         gmni->gmni_netaddr_base);
63                 CDEBUG(D_NET,"[%3d] page %p: %d\n", 
64                        i, nb->nb_kiov[i].kiov_page, gmrc);
65
66                 if (gmrc != GM_SUCCESS) {
67                         CERROR("Can't map page: %d(%s)\n", gmrc,
68                                gmnal_gmstatus2str(gmrc));
69                         gmnal_free_netbuf_pages(nb, i+1);
70                         return -ENOMEM;
71                 }
72                 
73                 if (i == 0) 
74                         nb->nb_netaddr = gmni->gmni_netaddr_base;
75                 
76                 gmni->gmni_netaddr_base += PAGE_SIZE;
77         }
78         
79         return 0;
80 }
81
82 void
83 gmnal_free_ltxbuf (gmnal_ni_t *gmni, gmnal_txbuf_t *txb)
84 {
85         int            npages = gmni->gmni_large_pages;
86
87         LASSERT (gmni->gmni_port == NULL);
88         /* No unmapping; the port has been closed */
89
90         gmnal_free_netbuf_pages(&txb->txb_buf, gmni->gmni_large_pages);
91         LIBCFS_FREE(txb, offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]));
92 }
93
94 int
95 gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
96 {
97         int            npages = gmni->gmni_large_pages;
98         int            sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
99         gmnal_txbuf_t *txb;
100         int            rc;
101         
102         LIBCFS_ALLOC(txb, sz);
103         if (txb == NULL) {
104                 CERROR("Can't allocate large txbuffer\n");
105                 return -ENOMEM;
106         }
107
108         rc = gmnal_alloc_netbuf_pages(gmni, &txb->txb_buf, npages);
109         if (rc != 0) {
110                 LIBCFS_FREE(txb, sz);
111                 return rc;
112         }
113
114         list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
115
116         txb->txb_next = gmni->gmni_ltxbs;
117         gmni->gmni_ltxbs = txb;
118
119         return 0;
120 }
121
122 void
123 gmnal_free_tx (gmnal_tx_t *tx)
124 {
125         LASSERT (tx->tx_gmni->gmni_port == NULL);
126
127         gmnal_free_netbuf_pages(&tx->tx_buf, 1);
128         LIBCFS_FREE(tx, sizeof(*tx));
129 }
130
131 int
132 gmnal_alloc_tx (gmnal_ni_t *gmni) 
133 {
134         gmnal_tx_t  *tx;
135         int          rc;
136         
137         LIBCFS_ALLOC(tx, sizeof(*tx));
138         if (tx == NULL) {
139                 CERROR("Failed to allocate tx\n");
140                 return -ENOMEM;
141         }
142         
143         memset(tx, 0, sizeof(*tx));
144
145         rc = gmnal_alloc_netbuf_pages(gmni, &tx->tx_buf, 1);
146         if (rc != 0) {
147                 LIBCFS_FREE(tx, sizeof(*tx));
148                 return -ENOMEM;
149         }
150
151         tx->tx_gmni = gmni;
152         
153         list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
154
155         tx->tx_next = gmni->gmni_txs;
156         gmni->gmni_txs = tx;
157                 
158         return 0;
159 }
160
161 void
162 gmnal_free_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
163 {
164         int   npages = rx->rx_islarge ? gmni->gmni_large_pages : 1;
165         
166         LASSERT (gmni->gmni_port == NULL);
167
168         gmnal_free_netbuf_pages(&rx->rx_buf, npages);
169         LIBCFS_FREE(rx, offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]));
170 }
171
172 int
173 gmnal_alloc_rx (gmnal_ni_t *gmni, int islarge)
174 {
175         int         npages = islarge ? gmni->gmni_large_pages : 1;
176         int         sz = offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]);
177         int         rc;
178         gmnal_rx_t *rx;
179         gm_status_t gmrc;
180         
181         LIBCFS_ALLOC(rx, sz);
182         if (rx == NULL) {
183                 CERROR("Failed to allocate rx\n");
184                 return -ENOMEM;
185         }
186         
187         memset(rx, 0, sizeof(*rx));
188
189         rc = gmnal_alloc_netbuf_pages(gmni, &rx->rx_buf, npages);
190         if (rc != 0) {
191                 LIBCFS_FREE(rx, sz);
192                 return rc;
193         }
194         
195         rx->rx_islarge = islarge;
196         rx->rx_next = gmni->gmni_rxs;
197         gmni->gmni_rxs = rx;
198
199         gmrc = gm_hash_insert(gmni->gmni_rx_hash, 
200                               GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf), rx);
201         if (gmrc != GM_SUCCESS) {
202                 CERROR("Couldn't add rx to hash table: %d\n", gmrc);
203                 return -ENOMEM;
204         }
205         
206         return 0;
207 }
208
209 void
210 gmnal_free_ltxbufs (gmnal_ni_t *gmni)
211 {
212         gmnal_txbuf_t *txb;
213         
214         while ((txb = gmni->gmni_ltxbs) != NULL) {
215                 gmni->gmni_ltxbs = txb->txb_next;
216                 gmnal_free_ltxbuf(gmni, txb);
217         }
218 }
219
220 int
221 gmnal_alloc_ltxbufs (gmnal_ni_t *gmni)
222 {
223         int     nlarge_tx_bufs = *gmnal_tunables.gm_nlarge_tx_bufs;
224         int     i;
225         int     rc;
226
227         for (i = 0; i < nlarge_tx_bufs; i++) {
228                 rc = gmnal_alloc_ltxbuf(gmni);
229                 
230                 if (rc != 0)
231                         return rc;
232         }
233
234         return 0;
235 }
236
237 void
238 gmnal_free_txs(gmnal_ni_t *gmni)
239 {
240         gmnal_tx_t *tx;
241
242         while ((tx = gmni->gmni_txs) != NULL) {
243                 gmni->gmni_txs = tx->tx_next;
244                 gmnal_free_tx (tx);
245         }
246 }
247
248 int
249 gmnal_alloc_txs(gmnal_ni_t *gmni)
250 {
251         int           ntxcred = gm_num_send_tokens(gmni->gmni_port);
252         int           ntx = *gmnal_tunables.gm_ntx;
253         int           i;
254         int           rc;
255
256         CDEBUG(D_NET, "ntxcred: %d\n", ntxcred);
257         gmni->gmni_tx_credits = ntxcred;
258
259         for (i = 0; i < ntx; i++) {
260                 rc = gmnal_alloc_tx(gmni);
261                 if (rc != 0)
262                         return rc;
263         }
264
265         return 0;
266 }
267
268 void
269 gmnal_free_rxs(gmnal_ni_t *gmni)
270 {
271         gmnal_rx_t *rx;
272
273         while ((rx = gmni->gmni_rxs) != NULL) {
274                 gmni->gmni_rxs = rx->rx_next;
275
276                 gmnal_free_rx(gmni, rx);
277         }
278
279         LASSERT (gmni->gmni_port == NULL);
280 #if 0
281         /* GM releases all resources allocated to a port when it closes */
282         if (gmni->gmni_rx_hash != NULL)
283                 gm_destroy_hash(gmni->gmni_rx_hash);
284 #endif
285 }
286
287 int
288 gmnal_alloc_rxs (gmnal_ni_t *gmni)
289 {
290         int          nrxcred = gm_num_receive_tokens(gmni->gmni_port);
291         int          nrx_small = *gmnal_tunables.gm_nrx_small;
292         int          nrx_large = *gmnal_tunables.gm_nrx_large;
293         int          nrx = nrx_large + nrx_small;
294         int          rc;
295         int          i;
296
297         CDEBUG(D_NET, "nrxcred: %d(%dL+%dS)\n", nrxcred, nrx_large, nrx_small);
298
299         if (nrx > nrxcred) {
300                 int nlarge = (nrx_large * nrxcred)/nrx;
301                 int nsmall = nrxcred - nlarge;
302                 
303                 CWARN("Only %d rx credits: "
304                       "reducing large %d->%d, small %d->%d\n", nrxcred,
305                       nrx_large, nlarge, nrx_small, nsmall);
306                 
307                 *gmnal_tunables.gm_nrx_large = nrx_large = nlarge;
308                 *gmnal_tunables.gm_nrx_small = nrx_small = nsmall;
309                 nrx = nlarge + nsmall;
310         }
311         
312         gmni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs, 
313                                             gm_hash_hash_ptr, 0, 0, nrx, 0);
314         if (gmni->gmni_rx_hash == NULL) {
315                 CERROR("Failed to create hash table\n");
316                 return -ENOMEM;
317         }
318
319         for (i = 0; i < nrx; i++ ) {
320                 rc = gmnal_alloc_rx(gmni, i < nrx_large);
321                 if (rc != 0)
322                         return rc;
323         }
324
325         return 0;
326 }
327
328 char * 
329 gmnal_gmstatus2str(gm_status_t status)
330 {
331         return(gm_strerror(status));
332
333         switch(status) {
334         case(GM_SUCCESS):
335                 return("SUCCESS");
336         case(GM_FAILURE):
337                 return("FAILURE");
338         case(GM_INPUT_BUFFER_TOO_SMALL):
339                 return("INPUT_BUFFER_TOO_SMALL");
340         case(GM_OUTPUT_BUFFER_TOO_SMALL):
341                 return("OUTPUT_BUFFER_TOO_SMALL");
342         case(GM_TRY_AGAIN ):
343                 return("TRY_AGAIN");
344         case(GM_BUSY):
345                 return("BUSY");
346         case(GM_MEMORY_FAULT):
347                 return("MEMORY_FAULT");
348         case(GM_INTERRUPTED):
349                 return("INTERRUPTED");
350         case(GM_INVALID_PARAMETER):
351                 return("INVALID_PARAMETER");
352         case(GM_OUT_OF_MEMORY):
353                 return("OUT_OF_MEMORY");
354         case(GM_INVALID_COMMAND):
355                 return("INVALID_COMMAND");
356         case(GM_PERMISSION_DENIED):
357                 return("PERMISSION_DENIED");
358         case(GM_INTERNAL_ERROR):
359                 return("INTERNAL_ERROR");
360         case(GM_UNATTACHED):
361                 return("UNATTACHED");
362         case(GM_UNSUPPORTED_DEVICE):
363                 return("UNSUPPORTED_DEVICE");
364         case(GM_SEND_TIMED_OUT):
365                 return("GM_SEND_TIMEDOUT");
366         case(GM_SEND_REJECTED):
367                 return("GM_SEND_REJECTED");
368         case(GM_SEND_TARGET_PORT_CLOSED):
369                 return("GM_SEND_TARGET_PORT_CLOSED");
370         case(GM_SEND_TARGET_NODE_UNREACHABLE):
371                 return("GM_SEND_TARGET_NODE_UNREACHABLE");
372         case(GM_SEND_DROPPED):
373                 return("GM_SEND_DROPPED");
374         case(GM_SEND_PORT_CLOSED):
375                 return("GM_SEND_PORT_CLOSED");
376         case(GM_NODE_ID_NOT_YET_SET):
377                 return("GM_NODE_ID_NOT_YET_SET");
378         case(GM_STILL_SHUTTING_DOWN):
379                 return("GM_STILL_SHUTTING_DOWN");
380         case(GM_CLONE_BUSY):
381                 return("GM_CLONE_BUSY");
382         case(GM_NO_SUCH_DEVICE):
383                 return("GM_NO_SUCH_DEVICE");
384         case(GM_ABORTED):
385                 return("GM_ABORTED");
386         case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
387                 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
388         case(GM_UNTRANSLATED_SYSTEM_ERROR):
389                 return("GM_UNTRANSLATED_SYSTEM_ERROR");
390         case(GM_ACCESS_DENIED):
391                 return("GM_ACCESS_DENIED");
392
393         
394         /*
395          *      These ones are in the docs but aren't in the header file 
396          case(GM_DEV_NOT_FOUND):
397          return("GM_DEV_NOT_FOUND");
398          case(GM_INVALID_PORT_NUMBER):
399          return("GM_INVALID_PORT_NUMBER");
400          case(GM_UC_ERROR):
401          return("GM_US_ERROR");
402          case(GM_PAGE_TABLE_FULL):
403          return("GM_PAGE_TABLE_FULL");
404          case(GM_MINOR_OVERFLOW):
405          return("GM_MINOR_OVERFLOW");
406          case(GM_SEND_ORPHANED):
407          return("GM_SEND_ORPHANED");
408          case(GM_HARDWARE_FAULT):
409          return("GM_HARDWARE_FAULT");
410          case(GM_DATA_CORRUPTED):
411          return("GM_DATA_CORRUPTED");
412          case(GM_TIMED_OUT):
413          return("GM_TIMED_OUT");
414          case(GM_USER_ERROR):
415          return("GM_USER_ERROR");
416          case(GM_NO_MATCH):
417          return("GM_NOMATCH");
418          case(GM_NOT_SUPPORTED_IN_KERNEL):
419          return("GM_NOT_SUPPORTED_IN_KERNEL");
420          case(GM_NOT_SUPPORTED_ON_ARCH):
421          return("GM_NOT_SUPPORTED_ON_ARCH");
422          case(GM_PTE_REF_CNT_OVERFLOW):
423          return("GM_PTR_REF_CNT_OVERFLOW");
424          case(GM_NO_DRIVER_SUPPORT):
425          return("GM_NO_DRIVER_SUPPORT");
426          case(GM_FIRMWARE_NOT_RUNNING):
427          return("GM_FIRMWARE_NOT_RUNNING");
428          *      These ones are in the docs but aren't in the header file 
429          */
430
431         default:
432                 return("UNKNOWN GM ERROR CODE");
433         }
434 }
435
436
437 char *
438 gmnal_rxevent2str(gm_recv_event_t *ev)
439 {
440         short   event;
441         event = GM_RECV_EVENT_TYPE(ev);
442         switch(event) {
443         case(GM_NO_RECV_EVENT):
444                 return("GM_NO_RECV_EVENT");
445         case(GM_SENDS_FAILED_EVENT):
446                 return("GM_SEND_FAILED_EVENT");
447         case(GM_ALARM_EVENT):
448                 return("GM_ALARM_EVENT");
449         case(GM_SENT_EVENT):
450                 return("GM_SENT_EVENT");
451         case(_GM_SLEEP_EVENT):
452                 return("_GM_SLEEP_EVENT");
453         case(GM_RAW_RECV_EVENT):
454                 return("GM_RAW_RECV_EVENT");
455         case(GM_BAD_SEND_DETECTED_EVENT):
456                 return("GM_BAD_SEND_DETECTED_EVENT");
457         case(GM_SEND_TOKEN_VIOLATION_EVENT):
458                 return("GM_SEND_TOKEN_VIOLATION_EVENT");
459         case(GM_RECV_TOKEN_VIOLATION_EVENT):
460                 return("GM_RECV_TOKEN_VIOLATION_EVENT");
461         case(GM_BAD_RECV_TOKEN_EVENT):
462                 return("GM_BAD_RECV_TOKEN_EVENT");
463         case(GM_ALARM_VIOLATION_EVENT):
464                 return("GM_ALARM_VIOLATION_EVENT");
465         case(GM_RECV_EVENT):
466                 return("GM_RECV_EVENT");
467         case(GM_HIGH_RECV_EVENT):
468                 return("GM_HIGH_RECV_EVENT");
469         case(GM_PEER_RECV_EVENT):
470                 return("GM_PEER_RECV_EVENT");
471         case(GM_HIGH_PEER_RECV_EVENT):
472                 return("GM_HIGH_PEER_RECV_EVENT");
473         case(GM_FAST_RECV_EVENT):
474                 return("GM_FAST_RECV_EVENT");
475         case(GM_FAST_HIGH_RECV_EVENT):
476                 return("GM_FAST_HIGH_RECV_EVENT");
477         case(GM_FAST_PEER_RECV_EVENT):
478                 return("GM_FAST_PEER_RECV_EVENT");
479         case(GM_FAST_HIGH_PEER_RECV_EVENT):
480                 return("GM_FAST_HIGH_PEER_RECV_EVENT");
481         case(GM_REJECTED_SEND_EVENT):
482                 return("GM_REJECTED_SEND_EVENT");
483         case(GM_ORPHANED_SEND_EVENT):
484                 return("GM_ORPHANED_SEND_EVENT");
485         case(GM_BAD_RESEND_DETECTED_EVENT):
486                 return("GM_BAD_RESEND_DETETED_EVENT");
487         case(GM_DROPPED_SEND_EVENT):
488                 return("GM_DROPPED_SEND_EVENT");
489         case(GM_BAD_SEND_VMA_EVENT):
490                 return("GM_BAD_SEND_VMA_EVENT");
491         case(GM_BAD_RECV_VMA_EVENT):
492                 return("GM_BAD_RECV_VMA_EVENT");
493         case(_GM_FLUSHED_ALARM_EVENT):
494                 return("GM_FLUSHED_ALARM_EVENT");
495         case(GM_SENT_TOKENS_EVENT):
496                 return("GM_SENT_TOKENS_EVENTS");
497         case(GM_IGNORE_RECV_EVENT):
498                 return("GM_IGNORE_RECV_EVENT");
499         case(GM_ETHERNET_RECV_EVENT):
500                 return("GM_ETHERNET_RECV_EVENT");
501         case(GM_NEW_NO_RECV_EVENT):
502                 return("GM_NEW_NO_RECV_EVENT");
503         case(GM_NEW_SENDS_FAILED_EVENT):
504                 return("GM_NEW_SENDS_FAILED_EVENT");
505         case(GM_NEW_ALARM_EVENT):
506                 return("GM_NEW_ALARM_EVENT");
507         case(GM_NEW_SENT_EVENT):
508                 return("GM_NEW_SENT_EVENT");
509         case(_GM_NEW_SLEEP_EVENT):
510                 return("GM_NEW_SLEEP_EVENT");
511         case(GM_NEW_RAW_RECV_EVENT):
512                 return("GM_NEW_RAW_RECV_EVENT");
513         case(GM_NEW_BAD_SEND_DETECTED_EVENT):
514                 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
515         case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
516                 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
517         case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
518                 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
519         case(GM_NEW_BAD_RECV_TOKEN_EVENT):
520                 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
521         case(GM_NEW_ALARM_VIOLATION_EVENT):
522                 return("GM_NEW_ALARM_VIOLATION_EVENT");
523         case(GM_NEW_RECV_EVENT):
524                 return("GM_NEW_RECV_EVENT");
525         case(GM_NEW_HIGH_RECV_EVENT):
526                 return("GM_NEW_HIGH_RECV_EVENT");
527         case(GM_NEW_PEER_RECV_EVENT):
528                 return("GM_NEW_PEER_RECV_EVENT");
529         case(GM_NEW_HIGH_PEER_RECV_EVENT):
530                 return("GM_NEW_HIGH_PEER_RECV_EVENT");
531         case(GM_NEW_FAST_RECV_EVENT):
532                 return("GM_NEW_FAST_RECV_EVENT");
533         case(GM_NEW_FAST_HIGH_RECV_EVENT):
534                 return("GM_NEW_FAST_HIGH_RECV_EVENT");
535         case(GM_NEW_FAST_PEER_RECV_EVENT):
536                 return("GM_NEW_FAST_PEER_RECV_EVENT");
537         case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
538                 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
539         case(GM_NEW_REJECTED_SEND_EVENT):
540                 return("GM_NEW_REJECTED_SEND_EVENT");
541         case(GM_NEW_ORPHANED_SEND_EVENT):
542                 return("GM_NEW_ORPHANED_SEND_EVENT");
543         case(_GM_NEW_PUT_NOTIFICATION_EVENT):
544                 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
545         case(GM_NEW_FREE_SEND_TOKEN_EVENT):
546                 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
547         case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
548                 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
549         case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
550                 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
551         case(GM_NEW_DROPPED_SEND_EVENT):
552                 return("GM_NEW_DROPPED_SEND_EVENT");
553         case(GM_NEW_BAD_SEND_VMA_EVENT):
554                 return("GM_NEW_BAD_SEND_VMA_EVENT");
555         case(GM_NEW_BAD_RECV_VMA_EVENT):
556                 return("GM_NEW_BAD_RECV_VMA_EVENT");
557         case(_GM_NEW_FLUSHED_ALARM_EVENT):
558                 return("GM_NEW_FLUSHED_ALARM_EVENT");
559         case(GM_NEW_SENT_TOKENS_EVENT):
560                 return("GM_NEW_SENT_TOKENS_EVENT");
561         case(GM_NEW_IGNORE_RECV_EVENT):
562                 return("GM_NEW_IGNORE_RECV_EVENT");
563         case(GM_NEW_ETHERNET_RECV_EVENT):
564                 return("GM_NEW_ETHERNET_RECV_EVENT");
565         default:
566                 return("Unknown Recv event");
567         /* _GM_PUT_NOTIFICATION_EVENT */
568         /* GM_FREE_SEND_TOKEN_EVENT */
569         /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
570         }
571 }
572
573
574 void
575 gmnal_yield(int delay)
576 {
577         set_current_state(TASK_INTERRUPTIBLE);
578         schedule_timeout(delay);
579 }