Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lnet / klnds / gmlnd / gmlnd_utils.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  */
38
39 #include "gmlnd.h"
40
41 void
42 gmnal_free_netbuf_pages (gmnal_netbuf_t *nb, int npages) 
43 {
44         int     i;
45         
46         for (i = 0; i < npages; i++)
47                 __free_page(nb->nb_kiov[i].kiov_page);
48 }
49
50 int
51 gmnal_alloc_netbuf_pages (gmnal_ni_t *gmni, gmnal_netbuf_t *nb, int npages)
52 {
53         int          i;
54         gm_status_t  gmrc;
55
56         LASSERT (npages > 0);
57
58         for (i = 0; i < npages; i++) {
59                 nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
60                 nb->nb_kiov[i].kiov_offset = 0;
61                 nb->nb_kiov[i].kiov_len = PAGE_SIZE;
62
63                 if (nb->nb_kiov[i].kiov_page == NULL) {
64                         CERROR("Can't allocate page\n");
65                         gmnal_free_netbuf_pages(nb, i);
66                         return -ENOMEM;
67                 }
68
69                 CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
70                        i, nb->nb_kiov[i].kiov_page, 
71                        lnet_page2phys(nb->nb_kiov[i].kiov_page),
72                        gmni->gmni_netaddr_base);
73
74                 gmrc = gm_register_memory_ex_phys(
75                         gmni->gmni_port,
76                         lnet_page2phys(nb->nb_kiov[i].kiov_page),
77                         PAGE_SIZE,
78                         gmni->gmni_netaddr_base);
79                 CDEBUG(D_NET,"[%3d] page %p: %d\n", 
80                        i, nb->nb_kiov[i].kiov_page, gmrc);
81
82                 if (gmrc != GM_SUCCESS) {
83                         CERROR("Can't map page: %d(%s)\n", gmrc,
84                                gmnal_gmstatus2str(gmrc));
85                         gmnal_free_netbuf_pages(nb, i+1);
86                         return -ENOMEM;
87                 }
88
89                 if (i == 0) 
90                         nb->nb_netaddr = gmni->gmni_netaddr_base;
91
92                 gmni->gmni_netaddr_base += PAGE_SIZE;
93         }
94
95         return 0;
96 }
97
98 void
99 gmnal_free_ltxbuf (gmnal_ni_t *gmni, gmnal_txbuf_t *txb)
100 {
101         int            npages = gmni->gmni_large_pages;
102
103         LASSERT (gmni->gmni_port == NULL);
104         /* No unmapping; the port has been closed */
105
106         gmnal_free_netbuf_pages(&txb->txb_buf, gmni->gmni_large_pages);
107         LIBCFS_FREE(txb, offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]));
108 }
109
110 int
111 gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
112 {
113         int            npages = gmni->gmni_large_pages;
114         int            sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
115         gmnal_txbuf_t *txb;
116         int            rc;
117
118         LIBCFS_ALLOC(txb, sz);
119         if (txb == NULL) {
120                 CERROR("Can't allocate large txbuffer\n");
121                 return -ENOMEM;
122         }
123
124         rc = gmnal_alloc_netbuf_pages(gmni, &txb->txb_buf, npages);
125         if (rc != 0) {
126                 LIBCFS_FREE(txb, sz);
127                 return rc;
128         }
129
130         cfs_list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
131
132         txb->txb_next = gmni->gmni_ltxbs;
133         gmni->gmni_ltxbs = txb;
134
135         return 0;
136 }
137
138 void
139 gmnal_free_tx (gmnal_tx_t *tx)
140 {
141         LASSERT (tx->tx_gmni->gmni_port == NULL);
142
143         gmnal_free_netbuf_pages(&tx->tx_buf, 1);
144         LIBCFS_FREE(tx, sizeof(*tx));
145 }
146
147 int
148 gmnal_alloc_tx (gmnal_ni_t *gmni) 
149 {
150         gmnal_tx_t  *tx;
151         int          rc;
152         
153         LIBCFS_ALLOC(tx, sizeof(*tx));
154         if (tx == NULL) {
155                 CERROR("Failed to allocate tx\n");
156                 return -ENOMEM;
157         }
158         
159         memset(tx, 0, sizeof(*tx));
160
161         rc = gmnal_alloc_netbuf_pages(gmni, &tx->tx_buf, 1);
162         if (rc != 0) {
163                 LIBCFS_FREE(tx, sizeof(*tx));
164                 return -ENOMEM;
165         }
166
167         tx->tx_gmni = gmni;
168         
169         cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
170
171         tx->tx_next = gmni->gmni_txs;
172         gmni->gmni_txs = tx;
173                 
174         return 0;
175 }
176
177 void
178 gmnal_free_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
179 {
180         int   npages = rx->rx_islarge ? gmni->gmni_large_pages : 1;
181         
182         LASSERT (gmni->gmni_port == NULL);
183
184         gmnal_free_netbuf_pages(&rx->rx_buf, npages);
185         LIBCFS_FREE(rx, offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]));
186 }
187
188 int
189 gmnal_alloc_rx (gmnal_ni_t *gmni, int islarge)
190 {
191         int         npages = islarge ? gmni->gmni_large_pages : 1;
192         int         sz = offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]);
193         int         rc;
194         gmnal_rx_t *rx;
195         gm_status_t gmrc;
196         
197         LIBCFS_ALLOC(rx, sz);
198         if (rx == NULL) {
199                 CERROR("Failed to allocate rx\n");
200                 return -ENOMEM;
201         }
202         
203         memset(rx, 0, sizeof(*rx));
204
205         rc = gmnal_alloc_netbuf_pages(gmni, &rx->rx_buf, npages);
206         if (rc != 0) {
207                 LIBCFS_FREE(rx, sz);
208                 return rc;
209         }
210         
211         rx->rx_islarge = islarge;
212         rx->rx_next = gmni->gmni_rxs;
213         gmni->gmni_rxs = rx;
214
215         gmrc = gm_hash_insert(gmni->gmni_rx_hash, 
216                               GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf), rx);
217         if (gmrc != GM_SUCCESS) {
218                 CERROR("Couldn't add rx to hash table: %d\n", gmrc);
219                 return -ENOMEM;
220         }
221         
222         return 0;
223 }
224
225 void
226 gmnal_free_ltxbufs (gmnal_ni_t *gmni)
227 {
228         gmnal_txbuf_t *txb;
229         
230         while ((txb = gmni->gmni_ltxbs) != NULL) {
231                 gmni->gmni_ltxbs = txb->txb_next;
232                 gmnal_free_ltxbuf(gmni, txb);
233         }
234 }
235
236 int
237 gmnal_alloc_ltxbufs (gmnal_ni_t *gmni)
238 {
239         int     nlarge_tx_bufs = *gmnal_tunables.gm_nlarge_tx_bufs;
240         int     i;
241         int     rc;
242
243         for (i = 0; i < nlarge_tx_bufs; i++) {
244                 rc = gmnal_alloc_ltxbuf(gmni);
245
246                 if (rc != 0)
247                         return rc;
248         }
249
250         return 0;
251 }
252
253 void
254 gmnal_free_txs(gmnal_ni_t *gmni)
255 {
256         gmnal_tx_t *tx;
257
258         while ((tx = gmni->gmni_txs) != NULL) {
259                 gmni->gmni_txs = tx->tx_next;
260                 gmnal_free_tx (tx);
261         }
262 }
263
264 int
265 gmnal_alloc_txs(gmnal_ni_t *gmni)
266 {
267         int           ntxcred = gm_num_send_tokens(gmni->gmni_port);
268         int           ntx = *gmnal_tunables.gm_ntx;
269         int           i;
270         int           rc;
271
272         CDEBUG(D_NET, "ntxcred: %d\n", ntxcred);
273         gmni->gmni_tx_credits = ntxcred;
274
275         for (i = 0; i < ntx; i++) {
276                 rc = gmnal_alloc_tx(gmni);
277                 if (rc != 0)
278                         return rc;
279         }
280
281         return 0;
282 }
283
284 void
285 gmnal_free_rxs(gmnal_ni_t *gmni)
286 {
287         gmnal_rx_t *rx;
288
289         while ((rx = gmni->gmni_rxs) != NULL) {
290                 gmni->gmni_rxs = rx->rx_next;
291
292                 gmnal_free_rx(gmni, rx);
293         }
294
295         LASSERT (gmni->gmni_port == NULL);
296 #if 0
297         /* GM releases all resources allocated to a port when it closes */
298         if (gmni->gmni_rx_hash != NULL)
299                 gm_destroy_hash(gmni->gmni_rx_hash);
300 #endif
301 }
302
303 int
304 gmnal_alloc_rxs (gmnal_ni_t *gmni)
305 {
306         int          nrxcred = gm_num_receive_tokens(gmni->gmni_port);
307         int          nrx_small = *gmnal_tunables.gm_nrx_small;
308         int          nrx_large = *gmnal_tunables.gm_nrx_large;
309         int          nrx = nrx_large + nrx_small;
310         int          rc;
311         int          i;
312
313         CDEBUG(D_NET, "nrxcred: %d(%dL+%dS)\n", nrxcred, nrx_large, nrx_small);
314
315         if (nrx > nrxcred) {
316                 int nlarge = (nrx_large * nrxcred)/nrx;
317                 int nsmall = nrxcred - nlarge;
318                 
319                 CWARN("Only %d rx credits: "
320                       "reducing large %d->%d, small %d->%d\n", nrxcred,
321                       nrx_large, nlarge, nrx_small, nsmall);
322                 
323                 *gmnal_tunables.gm_nrx_large = nrx_large = nlarge;
324                 *gmnal_tunables.gm_nrx_small = nrx_small = nsmall;
325                 nrx = nlarge + nsmall;
326         }
327         
328         gmni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs, 
329                                             gm_hash_hash_ptr, 0, 0, nrx, 0);
330         if (gmni->gmni_rx_hash == NULL) {
331                 CERROR("Failed to create hash table\n");
332                 return -ENOMEM;
333         }
334
335         for (i = 0; i < nrx; i++ ) {
336                 rc = gmnal_alloc_rx(gmni, i < nrx_large);
337                 if (rc != 0)
338                         return rc;
339         }
340
341         return 0;
342 }
343
344 char * 
345 gmnal_gmstatus2str(gm_status_t status)
346 {
347         return(gm_strerror(status));
348
349         switch(status) {
350         case(GM_SUCCESS):
351                 return("SUCCESS");
352         case(GM_FAILURE):
353                 return("FAILURE");
354         case(GM_INPUT_BUFFER_TOO_SMALL):
355                 return("INPUT_BUFFER_TOO_SMALL");
356         case(GM_OUTPUT_BUFFER_TOO_SMALL):
357                 return("OUTPUT_BUFFER_TOO_SMALL");
358         case(GM_TRY_AGAIN ):
359                 return("TRY_AGAIN");
360         case(GM_BUSY):
361                 return("BUSY");
362         case(GM_MEMORY_FAULT):
363                 return("MEMORY_FAULT");
364         case(GM_INTERRUPTED):
365                 return("INTERRUPTED");
366         case(GM_INVALID_PARAMETER):
367                 return("INVALID_PARAMETER");
368         case(GM_OUT_OF_MEMORY):
369                 return("OUT_OF_MEMORY");
370         case(GM_INVALID_COMMAND):
371                 return("INVALID_COMMAND");
372         case(GM_PERMISSION_DENIED):
373                 return("PERMISSION_DENIED");
374         case(GM_INTERNAL_ERROR):
375                 return("INTERNAL_ERROR");
376         case(GM_UNATTACHED):
377                 return("UNATTACHED");
378         case(GM_UNSUPPORTED_DEVICE):
379                 return("UNSUPPORTED_DEVICE");
380         case(GM_SEND_TIMED_OUT):
381                 return("GM_SEND_TIMEDOUT");
382         case(GM_SEND_REJECTED):
383                 return("GM_SEND_REJECTED");
384         case(GM_SEND_TARGET_PORT_CLOSED):
385                 return("GM_SEND_TARGET_PORT_CLOSED");
386         case(GM_SEND_TARGET_NODE_UNREACHABLE):
387                 return("GM_SEND_TARGET_NODE_UNREACHABLE");
388         case(GM_SEND_DROPPED):
389                 return("GM_SEND_DROPPED");
390         case(GM_SEND_PORT_CLOSED):
391                 return("GM_SEND_PORT_CLOSED");
392         case(GM_NODE_ID_NOT_YET_SET):
393                 return("GM_NODE_ID_NOT_YET_SET");
394         case(GM_STILL_SHUTTING_DOWN):
395                 return("GM_STILL_SHUTTING_DOWN");
396         case(GM_CLONE_BUSY):
397                 return("GM_CLONE_BUSY");
398         case(GM_NO_SUCH_DEVICE):
399                 return("GM_NO_SUCH_DEVICE");
400         case(GM_ABORTED):
401                 return("GM_ABORTED");
402         case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
403                 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
404         case(GM_UNTRANSLATED_SYSTEM_ERROR):
405                 return("GM_UNTRANSLATED_SYSTEM_ERROR");
406         case(GM_ACCESS_DENIED):
407                 return("GM_ACCESS_DENIED");
408
409         
410         /*
411          *      These ones are in the docs but aren't in the header file 
412          case(GM_DEV_NOT_FOUND):
413          return("GM_DEV_NOT_FOUND");
414          case(GM_INVALID_PORT_NUMBER):
415          return("GM_INVALID_PORT_NUMBER");
416          case(GM_UC_ERROR):
417          return("GM_US_ERROR");
418          case(GM_PAGE_TABLE_FULL):
419          return("GM_PAGE_TABLE_FULL");
420          case(GM_MINOR_OVERFLOW):
421          return("GM_MINOR_OVERFLOW");
422          case(GM_SEND_ORPHANED):
423          return("GM_SEND_ORPHANED");
424          case(GM_HARDWARE_FAULT):
425          return("GM_HARDWARE_FAULT");
426          case(GM_DATA_CORRUPTED):
427          return("GM_DATA_CORRUPTED");
428          case(GM_TIMED_OUT):
429          return("GM_TIMED_OUT");
430          case(GM_USER_ERROR):
431          return("GM_USER_ERROR");
432          case(GM_NO_MATCH):
433          return("GM_NOMATCH");
434          case(GM_NOT_SUPPORTED_IN_KERNEL):
435          return("GM_NOT_SUPPORTED_IN_KERNEL");
436          case(GM_NOT_SUPPORTED_ON_ARCH):
437          return("GM_NOT_SUPPORTED_ON_ARCH");
438          case(GM_PTE_REF_CNT_OVERFLOW):
439          return("GM_PTR_REF_CNT_OVERFLOW");
440          case(GM_NO_DRIVER_SUPPORT):
441          return("GM_NO_DRIVER_SUPPORT");
442          case(GM_FIRMWARE_NOT_RUNNING):
443          return("GM_FIRMWARE_NOT_RUNNING");
444          *      These ones are in the docs but aren't in the header file 
445          */
446
447         default:
448                 return("UNKNOWN GM ERROR CODE");
449         }
450 }
451
452
453 char *
454 gmnal_rxevent2str(gm_recv_event_t *ev)
455 {
456         short   event;
457         event = GM_RECV_EVENT_TYPE(ev);
458         switch(event) {
459         case(GM_NO_RECV_EVENT):
460                 return("GM_NO_RECV_EVENT");
461         case(GM_SENDS_FAILED_EVENT):
462                 return("GM_SEND_FAILED_EVENT");
463         case(GM_ALARM_EVENT):
464                 return("GM_ALARM_EVENT");
465         case(GM_SENT_EVENT):
466                 return("GM_SENT_EVENT");
467         case(_GM_SLEEP_EVENT):
468                 return("_GM_SLEEP_EVENT");
469         case(GM_RAW_RECV_EVENT):
470                 return("GM_RAW_RECV_EVENT");
471         case(GM_BAD_SEND_DETECTED_EVENT):
472                 return("GM_BAD_SEND_DETECTED_EVENT");
473         case(GM_SEND_TOKEN_VIOLATION_EVENT):
474                 return("GM_SEND_TOKEN_VIOLATION_EVENT");
475         case(GM_RECV_TOKEN_VIOLATION_EVENT):
476                 return("GM_RECV_TOKEN_VIOLATION_EVENT");
477         case(GM_BAD_RECV_TOKEN_EVENT):
478                 return("GM_BAD_RECV_TOKEN_EVENT");
479         case(GM_ALARM_VIOLATION_EVENT):
480                 return("GM_ALARM_VIOLATION_EVENT");
481         case(GM_RECV_EVENT):
482                 return("GM_RECV_EVENT");
483         case(GM_HIGH_RECV_EVENT):
484                 return("GM_HIGH_RECV_EVENT");
485         case(GM_PEER_RECV_EVENT):
486                 return("GM_PEER_RECV_EVENT");
487         case(GM_HIGH_PEER_RECV_EVENT):
488                 return("GM_HIGH_PEER_RECV_EVENT");
489         case(GM_FAST_RECV_EVENT):
490                 return("GM_FAST_RECV_EVENT");
491         case(GM_FAST_HIGH_RECV_EVENT):
492                 return("GM_FAST_HIGH_RECV_EVENT");
493         case(GM_FAST_PEER_RECV_EVENT):
494                 return("GM_FAST_PEER_RECV_EVENT");
495         case(GM_FAST_HIGH_PEER_RECV_EVENT):
496                 return("GM_FAST_HIGH_PEER_RECV_EVENT");
497         case(GM_REJECTED_SEND_EVENT):
498                 return("GM_REJECTED_SEND_EVENT");
499         case(GM_ORPHANED_SEND_EVENT):
500                 return("GM_ORPHANED_SEND_EVENT");
501         case(GM_BAD_RESEND_DETECTED_EVENT):
502                 return("GM_BAD_RESEND_DETETED_EVENT");
503         case(GM_DROPPED_SEND_EVENT):
504                 return("GM_DROPPED_SEND_EVENT");
505         case(GM_BAD_SEND_VMA_EVENT):
506                 return("GM_BAD_SEND_VMA_EVENT");
507         case(GM_BAD_RECV_VMA_EVENT):
508                 return("GM_BAD_RECV_VMA_EVENT");
509         case(_GM_FLUSHED_ALARM_EVENT):
510                 return("GM_FLUSHED_ALARM_EVENT");
511         case(GM_SENT_TOKENS_EVENT):
512                 return("GM_SENT_TOKENS_EVENTS");
513         case(GM_IGNORE_RECV_EVENT):
514                 return("GM_IGNORE_RECV_EVENT");
515         case(GM_ETHERNET_RECV_EVENT):
516                 return("GM_ETHERNET_RECV_EVENT");
517         case(GM_NEW_NO_RECV_EVENT):
518                 return("GM_NEW_NO_RECV_EVENT");
519         case(GM_NEW_SENDS_FAILED_EVENT):
520                 return("GM_NEW_SENDS_FAILED_EVENT");
521         case(GM_NEW_ALARM_EVENT):
522                 return("GM_NEW_ALARM_EVENT");
523         case(GM_NEW_SENT_EVENT):
524                 return("GM_NEW_SENT_EVENT");
525         case(_GM_NEW_SLEEP_EVENT):
526                 return("GM_NEW_SLEEP_EVENT");
527         case(GM_NEW_RAW_RECV_EVENT):
528                 return("GM_NEW_RAW_RECV_EVENT");
529         case(GM_NEW_BAD_SEND_DETECTED_EVENT):
530                 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
531         case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
532                 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
533         case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
534                 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
535         case(GM_NEW_BAD_RECV_TOKEN_EVENT):
536                 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
537         case(GM_NEW_ALARM_VIOLATION_EVENT):
538                 return("GM_NEW_ALARM_VIOLATION_EVENT");
539         case(GM_NEW_RECV_EVENT):
540                 return("GM_NEW_RECV_EVENT");
541         case(GM_NEW_HIGH_RECV_EVENT):
542                 return("GM_NEW_HIGH_RECV_EVENT");
543         case(GM_NEW_PEER_RECV_EVENT):
544                 return("GM_NEW_PEER_RECV_EVENT");
545         case(GM_NEW_HIGH_PEER_RECV_EVENT):
546                 return("GM_NEW_HIGH_PEER_RECV_EVENT");
547         case(GM_NEW_FAST_RECV_EVENT):
548                 return("GM_NEW_FAST_RECV_EVENT");
549         case(GM_NEW_FAST_HIGH_RECV_EVENT):
550                 return("GM_NEW_FAST_HIGH_RECV_EVENT");
551         case(GM_NEW_FAST_PEER_RECV_EVENT):
552                 return("GM_NEW_FAST_PEER_RECV_EVENT");
553         case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
554                 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
555         case(GM_NEW_REJECTED_SEND_EVENT):
556                 return("GM_NEW_REJECTED_SEND_EVENT");
557         case(GM_NEW_ORPHANED_SEND_EVENT):
558                 return("GM_NEW_ORPHANED_SEND_EVENT");
559         case(_GM_NEW_PUT_NOTIFICATION_EVENT):
560                 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
561         case(GM_NEW_FREE_SEND_TOKEN_EVENT):
562                 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
563         case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
564                 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
565         case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
566                 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
567         case(GM_NEW_DROPPED_SEND_EVENT):
568                 return("GM_NEW_DROPPED_SEND_EVENT");
569         case(GM_NEW_BAD_SEND_VMA_EVENT):
570                 return("GM_NEW_BAD_SEND_VMA_EVENT");
571         case(GM_NEW_BAD_RECV_VMA_EVENT):
572                 return("GM_NEW_BAD_RECV_VMA_EVENT");
573         case(_GM_NEW_FLUSHED_ALARM_EVENT):
574                 return("GM_NEW_FLUSHED_ALARM_EVENT");
575         case(GM_NEW_SENT_TOKENS_EVENT):
576                 return("GM_NEW_SENT_TOKENS_EVENT");
577         case(GM_NEW_IGNORE_RECV_EVENT):
578                 return("GM_NEW_IGNORE_RECV_EVENT");
579         case(GM_NEW_ETHERNET_RECV_EVENT):
580                 return("GM_NEW_ETHERNET_RECV_EVENT");
581         default:
582                 return("Unknown Recv event");
583         /* _GM_PUT_NOTIFICATION_EVENT */
584         /* GM_FREE_SEND_TOKEN_EVENT */
585         /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
586         }
587 }
588
589
590 void
591 gmnal_yield(int delay)
592 {
593         cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
594         cfs_schedule_timeout(delay);
595 }