2 * This program was prepared by the Regents of the University of
3 * California at Los Alamos National Laboratory (the University) under
4 * contract number W-7405-ENG-36 with the U.S. Department of Energy
5 * (DoE). Neither the U.S. Government nor the
6 * University makes any warranty, express or implied, or assumes any
7 * liability or responsibility for the use of this software.
11 * All utilities required by lgmanl
17 * print a console message
18 * the header of each messages specifies
19 * the function, file and line number of the caller
23 * TO DO lgmnal_print find how to determine the caller function
26 #define DEFAULT_LEN 64
27 void lgmnal_print(const char *fmt, ...)
30 char *varbuf = NULL, fixedbuf[DEFAULT_LEN];
35 sprintf(fixedbuf, "LGMNAL::");
36 len = vsnprintf(fixedbuf+8, DEFAULT_LEN-8, fmt, ap);
37 if ((len+8) >= DEFAULT_LEN) {
38 PORTAL_ALLOC(varbuf, len+1+8);
40 printk("lgmnal_cb_printf Failed to malloc\n");
41 printk("Truncated message is\n");
46 sprintf(varbuf, "LGMNAL::");
47 len = vsnprintf(varbuf+8, len+1, fmt, ap);
53 if (fixedbuf != varbuf)
54 PORTAL_FREE(varbuf, len+1+8);
60 * allocate a number of small tx buffers and register with GM
61 * so they are wired and set up for DMA. This is a costly operation.
62 * Also allocate a corrosponding descriptor to keep track of
64 * Put all descriptors on singly linked list to be available to send function.
65 * This function is only called when the API mutex is held (init or shutdown),
66 * so there is no need to hold the txd spinlock.
69 lgmnal_alloc_stxd(lgmnal_data_t *nal_data)
71 int ntx = 0, nstx = 0, i = 0;
72 lgmnal_stxd_t *txd = NULL;
73 void *txbuffer = NULL;
75 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_alloc_small tx\n"));
77 LGMNAL_GM_LOCK(nal_data);
78 ntx = gm_num_send_tokens(nal_data->gm_port);
79 LGMNAL_GM_UNLOCK(nal_data);
80 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("total number of send tokens available is [%d]\n", ntx));
84 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Allocated [%d] send tokens to small messages\n", nstx));
87 #ifdef LGMNAL_USE_GM_HASH
88 nal_data->stxd_hash = gm_create_hash(gm_hash_compare_ptrs, gm_hash_hash_ptr, 0, sizeof(void*), nstx, 0);
89 if (!nal_data->srxd_hash) {
90 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to create hash table\n\n"));
91 return(LGMNAL_STATUS_NOMEM);
94 nal_data->stxd_hash = NULL;
98 * A semaphore is initialised with the
99 * number of transmit tokens available.
100 * To get a stxd, acquire the token semaphore.
101 * this decrements the available token count
102 * (if no tokens you block here, someone returning a
103 * stxd will release the semaphore and wake you)
104 * When token is obtained acquire the spinlock
105 * to manipulate the list
107 LGMNAL_TXD_TOKEN_INIT(nal_data, nstx);
108 LGMNAL_TXD_LOCK_INIT(nal_data);
110 for (i=0; i<=nstx; i++) {
111 PORTAL_ALLOC(txd, sizeof(lgmnal_stxd_t));
113 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc txd [%d]\n", i));
114 return(LGMNAL_STATUS_NOMEM);
117 PORTAL_ALLOC(txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
119 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc txbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
120 PORTAL_FREE(txd, sizeof(lgmnal_stxd_t));
121 return(LGMNAL_STATUS_FAIL);
123 LGMNAL_PRINT(LGMNAL_DEBUG_V, ("Calling gm_register_memory with port [%p] txbuffer [%p], size [%d]\n",
124 nal_data->gm_port, txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data)));
125 LGMNAL_GM_LOCK(nal_data);
126 gm_status = gm_register_memory(nal_data->gm_port, txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
127 LGMNAL_GM_UNLOCK(nal_data);
128 if (gm_status != GM_SUCCESS) {
129 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("gm_register_memory failed buffer [%p], index [%d]\n", txbuffer, i));
132 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_FAILURE\n"));
134 case(GM_PERMISSION_DENIED):
135 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_PERMISSION_DENIED\n"));
137 case(GM_INVALID_PARAMETER):
138 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_INVALID_PARAMETER\n"));
141 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Unknown error\n"));
144 return(LGMNAL_STATUS_FAIL);
146 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("gm_register_memory ok for buffer [%p], index [%d]\n", txbuffer, i));
149 LGMNAL_GM_LOCK(nal_data);
150 txbuffer = gm_dma_malloc(nal_data->gm_port, LGMNAL_SMALL_MSG_SIZE(nal_data));
151 LGMNAL_GM_UNLOCK(nal_data);
153 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to gm_dma_malloc txbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
154 PORTAL_FREE(txd, sizeof(lgmnal_stxd_t));
155 return(LGMNAL_STATUS_FAIL);
159 txd->buffer = txbuffer;
160 txd->size = LGMNAL_SMALL_MSG_SIZE(nal_data);
161 txd->gmsize = gm_min_size_for_length(txd->size);
162 txd->nal_data = (struct _lgmnal_data_t*)nal_data;
164 if (lgmnal_hash_add(&nal_data->stxd_hash, (void*)txbuffer, (void*)txd)) {
165 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("failed to create hash entry\n"));
166 return(LGMNAL_STATUS_FAIL);
170 txd->next = nal_data->stxd;
171 nal_data->stxd = txd;
172 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Registered txd [%p] with buffer [%p], size [%d]\n", txd, txd->buffer, txd->size));
175 return(LGMNAL_STATUS_OK);
178 /* Free the list of wired and gm_registered small tx buffers and the tx descriptors
179 that go along with them.
180 * This function is only called when the API mutex is held (init or shutdown),
181 * so there is no need to hold the txd spinlock.
184 lgmnal_free_stxd(lgmnal_data_t *nal_data)
186 lgmnal_stxd_t *txd = nal_data->stxd, *_txd = NULL;
188 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_free_small tx\n"));
191 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Freeing txd [%p] with buffer [%p], size [%d]\n", txd, txd->buffer, txd->size));
195 LGMNAL_GM_LOCK(nal_data);
196 gm_deregister_memory(nal_data->gm_port, _txd->buffer, _txd->size);
197 LGMNAL_GM_UNLOCK(nal_data);
198 PORTAL_FREE(_txd->buffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
200 LGMNAL_GM_LOCK(nal_data);
201 gm_dma_free(nal_data->gm_port, _txd->buffer);
202 LGMNAL_GM_UNLOCK(nal_data);
204 PORTAL_FREE(_txd, sizeof(lgmnal_stxd_t));
211 * Get a txd from the list
212 * This get us a wired and gm_registered small tx buffer.
213 * This implicitly gets us a send token also.
216 lgmnal_get_stxd(lgmnal_data_t *nal_data, int block)
219 lgmnal_stxd_t *txd = NULL;
220 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_get_stxd nal_data [%p] block[%d]\n",
224 LGMNAL_TXD_GETTOKEN(nal_data);
226 if (LGMNAL_TXD_TRYGETTOKEN(nal_data)) {
227 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_get_stxd can't get token\n"));
231 LGMNAL_TXD_LOCK(nal_data);
232 txd = nal_data->stxd;
234 nal_data->stxd = txd->next;
235 LGMNAL_TXD_UNLOCK(nal_data);
236 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_get_stxd got [%p], head is [%p]\n", txd, nal_data->stxd));
241 * Return a txd to the list
244 lgmnal_return_stxd(lgmnal_data_t *nal_data, lgmnal_stxd_t *txd)
246 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_return_stxd nal_data [%p], txd[%p]\n", nal_data, txd));
248 LGMNAL_TXD_LOCK(nal_data);
249 txd->next = nal_data->stxd;
250 nal_data->stxd = txd;
251 LGMNAL_TXD_UNLOCK(nal_data);
252 LGMNAL_TXD_RETURNTOKEN(nal_data);
258 * allocate a number of small rx buffers and register with GM
259 * so they are wired and set up for DMA. This is a costly operation.
260 * Also allocate a corrosponding descriptor to keep track of
262 * Put all descriptors on singly linked list to be available to receive thread.
263 * This function is only called when the API mutex is held (init or shutdown),
264 * so there is no need to hold the rxd spinlock.
267 lgmnal_alloc_srxd(lgmnal_data_t *nal_data)
269 int nrx = 0, nsrx = 0, i = 0;
270 lgmnal_srxd_t *rxd = NULL;
271 void *rxbuffer = NULL;
273 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_alloc_small rx\n"));
275 LGMNAL_GM_LOCK(nal_data);
276 nrx = gm_num_receive_tokens(nal_data->gm_port);
277 LGMNAL_GM_UNLOCK(nal_data);
278 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("total number of receive tokens available is [%d]\n", nrx));
282 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Allocated [%d] receive tokens to small messages\n", nsrx));
285 #ifdef LGMNAL_USE_GM_HASH
286 LGMNAL_GM_LOCK(nal_data);
287 nal_data->srxd_hash = gm_create_hash(gm_hash_compare_ptrs, gm_hash_hash_ptr, 0, sizeof(void*), nsrx, 0);
288 LGMNAL_GM_UNLOCK(nal_data);
289 if (!nal_data->srxd_hash) {
290 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to create hash table\n"));
291 return(LGMNAL_STATUS_NOMEM);
294 nal_data->srxd_hash = NULL;
297 LGMNAL_RXD_TOKEN_INIT(nal_data, nsrx);
298 LGMNAL_RXD_LOCK_INIT(nal_data);
300 for (i=0; i<=nsrx; i++) {
301 PORTAL_ALLOC(rxd, sizeof(lgmnal_srxd_t));
303 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc rxd [%d]\n", i));
304 return(LGMNAL_STATUS_NOMEM);
307 PORTAL_ALLOC(rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
309 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc rxbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
310 PORTAL_FREE(rxd, sizeof(lgmnal_srxd_t));
311 return(LGMNAL_STATUS_FAIL);
313 LGMNAL_PRINT(LGMNAL_DEBUG_V, ("Calling gm_register_memory with port [%p] rxbuffer [%p], size [%d]\n",
314 nal_data->gm_port, rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data)));
315 LGMNAL_GM_LOCK(nal_data);
316 gm_status = gm_register_memory(nal_data->gm_port, rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
317 LGMNAL_GM_UNLOCK(nal_data);
318 if (gm_status != GM_SUCCESS) {
319 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("gm_register_memory failed buffer [%p], index [%d]\n", rxbuffer, i));
322 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_FAILURE\n"));
324 case(GM_PERMISSION_DENIED):
325 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_PERMISSION_DENIED\n"));
327 case(GM_INVALID_PARAMETER):
328 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_INVALID_PARAMETER\n"));
331 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Unknown GM error[%d]\n", gm_status));
335 return(LGMNAL_STATUS_FAIL);
338 LGMNAL_GM_LOCK(nal_data);
339 rxbuffer = gm_dma_malloc(nal_data->gm_port, LGMNAL_SMALL_MSG_SIZE(nal_data));
340 LGMNAL_GM_UNLOCK(nal_data);
342 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to gm_dma_malloc rxbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
343 PORTAL_FREE(rxd, sizeof(lgmnal_srxd_t));
344 return(LGMNAL_STATUS_FAIL);
348 rxd->buffer = rxbuffer;
349 rxd->size = LGMNAL_SMALL_MSG_SIZE(nal_data);
350 rxd->gmsize = gm_min_size_for_length(rxd->size);
352 if (lgmnal_hash_add(&nal_data->srxd_hash, (void*)rxbuffer, (void*)rxd) != GM_SUCCESS) {
353 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("failed to create hash entry rxd[%p] for rxbuffer[%p]\n", rxd, rxbuffer));
354 return(LGMNAL_STATUS_FAIL);
357 rxd->next = nal_data->srxd;
358 nal_data->srxd = rxd;
359 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Registered rxd [%p] with buffer [%p], size [%d]\n", rxd, rxd->buffer, rxd->size));
362 return(LGMNAL_STATUS_OK);
367 /* Free the list of wired and gm_registered small rx buffers and the rx descriptors
368 * that go along with them.
369 * This function is only called when the API mutex is held (init or shutdown),
370 * so there is no need to hold the rxd spinlock.
373 lgmnal_free_srxd(lgmnal_data_t *nal_data)
375 lgmnal_srxd_t *rxd = nal_data->srxd, *_rxd = NULL;
377 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_free_small rx\n"));
380 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Freeing rxd [%p] with buffer [%p], size [%d]\n", rxd, rxd->buffer, rxd->size));
385 LGMNAL_GM_LOCK(nal_data);
386 gm_deregister_memory(nal_data->gm_port, _rxd->buffer, _rxd->size);
387 LGMNAL_GM_UNLOCK(nal_data);
388 PORTAL_FREE(_rxd->buffer, LGMNAL_SMALL_RXBUFFER_SIZE);
390 LGMNAL_GM_LOCK(nal_data);
391 gm_dma_free(nal_data->gm_port, _rxd->buffer);
392 LGMNAL_GM_UNLOCK(nal_data);
394 PORTAL_FREE(_rxd, sizeof(lgmnal_srxd_t));
401 * Get a rxd from the free list
402 * This get us a wired and gm_registered small rx buffer.
403 * This implicitly gets us a receive token also.
406 lgmnal_get_srxd(lgmnal_data_t *nal_data, int block)
409 lgmnal_srxd_t *rxd = NULL;
410 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_get_srxd nal_data [%p] block [%d]\n", nal_data, block));
413 LGMNAL_RXD_GETTOKEN(nal_data);
415 if (LGMNAL_RXD_TRYGETTOKEN(nal_data)) {
416 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_get_srxd Can't get token\n"));
420 LGMNAL_RXD_LOCK(nal_data);
421 rxd = nal_data->srxd;
423 nal_data->srxd = rxd->next;
424 LGMNAL_RXD_UNLOCK(nal_data);
425 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_get_srxd got [%p], head is [%p]\n", rxd, nal_data->srxd));
430 * Return an rxd to the list
433 lgmnal_return_srxd(lgmnal_data_t *nal_data, lgmnal_srxd_t *rxd)
435 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_return_srxd nal_data [%p], rxd[%p]\n", nal_data, rxd));
437 LGMNAL_RXD_LOCK(nal_data);
438 rxd->next = nal_data->srxd;
439 nal_data->srxd = rxd;
440 LGMNAL_RXD_UNLOCK(nal_data);
441 LGMNAL_RXD_RETURNTOKEN(nal_data);
446 * Given a pointer to a srxd find
447 * the relevant descriptor for it
448 * This is done by searching a hash
449 * list that is created when the srxd's
453 lgmnal_rxbuffer_to_srxd(lgmnal_data_t *nal_data, void *rxbuffer)
455 lgmnal_srxd_t *srxd = NULL;
456 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_rxbuffer_to_srxd nal_data [%p], rxbuffer [%p]\n", nal_data, rxbuffer));
457 #ifdef LGMNAL_USE_GM_HASH
458 srxd = gm_hash_find(nal_data->srxd_hash, rxbuffer);
460 srxd = lgmnal_hash_find(nal_data->srxd_hash, rxbuffer);
462 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("srxd is [%p]\n", srxd));
468 lgmnal_stop_rxthread(lgmnal_data_t *nal_data)
474 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("Attempting to stop rxthread nal_data [%p]\n", nal_data));
476 if (nal_data->rxthread_flag != LGMNAL_THREAD_CONTINUE) {
477 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("thread flag not correctly set\n"));
480 nal_data->rxthread_flag = LGMNAL_THREAD_STOP;
481 LGMNAL_GM_LOCK(nal_data);
482 gm_set_alarm(nal_data->gm_port, &nal_data->rxthread_alarm, 10, NULL, NULL);
483 LGMNAL_GM_UNLOCK(nal_data);
485 while(nal_data->rxthread_flag == LGMNAL_THREAD_STOP && delay--) {
486 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_stop_rxthread sleeping\n"));
487 current->state = TASK_INTERRUPTIBLE;
488 schedule_timeout(1024);
491 if (nal_data->rxthread_flag == LGMNAL_THREAD_STOP) {
492 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("I DON'T KNOW HOW TO WAKE THE THREAD\n"));
494 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("RX THREAD SEEMS TO HAVE STOPPED\n"));
502 lgmnal_gm_error(gm_status_t status)
509 case(GM_INPUT_BUFFER_TOO_SMALL):
510 return("INPUT_BUFFER_TOO_SMALL");
511 case(GM_OUTPUT_BUFFER_TOO_SMALL):
512 return("OUTPUT_BUFFER_TOO_SMALL");
517 case(GM_MEMORY_FAULT):
518 return("MEMORY_FAULT");
519 case(GM_INTERRUPTED):
520 return("INTERRUPTED");
521 case(GM_INVALID_PARAMETER):
522 return("INVALID_PARAMETER");
523 case(GM_OUT_OF_MEMORY):
524 return("OUT_OF_MEMORY");
525 case(GM_INVALID_COMMAND):
526 return("INVALID_COMMAND");
527 case(GM_PERMISSION_DENIED):
528 return("PERMISSION_DENIED");
529 case(GM_INTERNAL_ERROR):
530 return("INTERNAL_ERROR");
532 return("UNATTACHED");
533 case(GM_UNSUPPORTED_DEVICE):
534 return("UNSUPPORTED_DEVICE");
535 case(GM_SEND_TIMED_OUT):
536 return("GM_SEND_TIMEDOUT");
537 case(GM_SEND_REJECTED):
538 return("GM_SEND_REJECTED");
539 case(GM_SEND_TARGET_PORT_CLOSED):
540 return("GM_SEND_TARGET_PORT_CLOSED");
541 case(GM_SEND_TARGET_NODE_UNREACHABLE):
542 return("GM_SEND_TARGET_NODE_UNREACHABLE");
543 case(GM_SEND_DROPPED):
544 return("GM_SEND_DROPPED");
545 case(GM_SEND_PORT_CLOSED):
546 return("GM_SEND_PORT_CLOSED");
547 case(GM_NODE_ID_NOT_YET_SET):
548 return("GM_NODE_ID_NOT_YET_SET");
549 case(GM_STILL_SHUTTING_DOWN):
550 return("GM_STILL_SHUTTING_DOWN");
552 return("GM_CLONE_BUSY");
553 case(GM_NO_SUCH_DEVICE):
554 return("GM_NO_SUCH_DEVICE");
556 return("GM_ABORTED");
557 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
558 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
559 case(GM_UNTRANSLATED_SYSTEM_ERROR):
560 return("GM_UNTRANSLATED_SYSTEM_ERROR");
561 case(GM_ACCESS_DENIED):
562 return("GM_ACCESS_DENIED");
566 * These ones are in the docs but aren't in the header file
567 case(GM_DEV_NOT_FOUND):
568 return("GM_DEV_NOT_FOUND");
569 case(GM_INVALID_PORT_NUMBER):
570 return("GM_INVALID_PORT_NUMBER");
572 return("GM_US_ERROR");
573 case(GM_PAGE_TABLE_FULL):
574 return("GM_PAGE_TABLE_FULL");
575 case(GM_MINOR_OVERFLOW):
576 return("GM_MINOR_OVERFLOW");
577 case(GM_SEND_ORPHANED):
578 return("GM_SEND_ORPHANED");
579 case(GM_HARDWARE_FAULT):
580 return("GM_HARDWARE_FAULT");
581 case(GM_DATA_CORRUPTED):
582 return("GM_DATA_CORRUPTED");
584 return("GM_TIMED_OUT");
586 return("GM_USER_ERROR");
588 return("GM_NOMATCH");
589 case(GM_NOT_SUPPORTED_IN_KERNEL):
590 return("GM_NOT_SUPPORTED_IN_KERNEL");
591 case(GM_NOT_SUPPORTED_ON_ARCH):
592 return("GM_NOT_SUPPORTED_ON_ARCH");
593 case(GM_PTE_REF_CNT_OVERFLOW):
594 return("GM_PTR_REF_CNT_OVERFLOW");
595 case(GM_NO_DRIVER_SUPPORT):
596 return("GM_NO_DRIVER_SUPPORT");
597 case(GM_FIRMWARE_NOT_RUNNING):
598 return("GM_FIRMWARE_NOT_RUNNING");
600 * These ones are in the docs but aren't in the header file
603 return("UNKNOWN GM ERROR CODE");
609 lgmnal_rxevent(gm_recv_event_t *ev)
613 event = GM_RECV_EVENT_TYPE(ev);
615 case(GM_NO_RECV_EVENT):
616 return("GM_NO_RECV_EVENT");
617 case(GM_SENDS_FAILED_EVENT):
618 return("GM_SEND_FAILED_EVENT");
619 case(GM_ALARM_EVENT):
620 return("GM_ALARM_EVENT");
622 return("GM_SENT_EVENT");
623 case(_GM_SLEEP_EVENT):
624 return("_GM_SLEEP_EVENT");
625 case(GM_RAW_RECV_EVENT):
626 return("GM_RAW_RECV_EVENT");
627 case(GM_BAD_SEND_DETECTED_EVENT):
628 return("GM_BAD_SEND_DETECTED_EVENT");
629 case(GM_SEND_TOKEN_VIOLATION_EVENT):
630 return("GM_SEND_TOKEN_VIOLATION_EVENT");
631 case(GM_RECV_TOKEN_VIOLATION_EVENT):
632 return("GM_RECV_TOKEN_VIOLATION_EVENT");
633 case(GM_BAD_RECV_TOKEN_EVENT):
634 return("GM_BAD_RECV_TOKEN_EVENT");
635 case(GM_ALARM_VIOLATION_EVENT):
636 return("GM_ALARM_VIOLATION_EVENT");
638 return("GM_RECV_EVENT");
639 case(GM_HIGH_RECV_EVENT):
640 return("GM_HIGH_RECV_EVENT");
641 case(GM_PEER_RECV_EVENT):
642 return("GM_PEER_RECV_EVENT");
643 case(GM_HIGH_PEER_RECV_EVENT):
644 return("GM_HIGH_PEER_RECV_EVENT");
645 case(GM_FAST_RECV_EVENT):
646 return("GM_FAST_RECV_EVENT");
647 case(GM_FAST_HIGH_RECV_EVENT):
648 return("GM_FAST_HIGH_RECV_EVENT");
649 case(GM_FAST_PEER_RECV_EVENT):
650 return("GM_FAST_PEER_RECV_EVENT");
651 case(GM_FAST_HIGH_PEER_RECV_EVENT):
652 return("GM_FAST_HIGH_PEER_RECV_EVENT");
653 case(GM_REJECTED_SEND_EVENT):
654 return("GM_REJECTED_SEND_EVENT");
655 case(GM_ORPHANED_SEND_EVENT):
656 return("GM_ORPHANED_SEND_EVENT");
657 case(GM_BAD_RESEND_DETECTED_EVENT):
658 return("GM_BAD_RESEND_DETETED_EVENT");
659 case(GM_DROPPED_SEND_EVENT):
660 return("GM_DROPPED_SEND_EVENT");
661 case(GM_BAD_SEND_VMA_EVENT):
662 return("GM_BAD_SEND_VMA_EVENT");
663 case(GM_BAD_RECV_VMA_EVENT):
664 return("GM_BAD_RECV_VMA_EVENT");
665 case(_GM_FLUSHED_ALARM_EVENT):
666 return("GM_FLUSHED_ALARM_EVENT");
667 case(GM_SENT_TOKENS_EVENT):
668 return("GM_SENT_TOKENS_EVENTS");
669 case(GM_IGNORE_RECV_EVENT):
670 return("GM_IGNORE_RECV_EVENT");
671 case(GM_ETHERNET_RECV_EVENT):
672 return("GM_ETHERNET_RECV_EVENT");
673 case(GM_NEW_NO_RECV_EVENT):
674 return("GM_NEW_NO_RECV_EVENT");
675 case(GM_NEW_SENDS_FAILED_EVENT):
676 return("GM_NEW_SENDS_FAILED_EVENT");
677 case(GM_NEW_ALARM_EVENT):
678 return("GM_NEW_ALARM_EVENT");
679 case(GM_NEW_SENT_EVENT):
680 return("GM_NEW_SENT_EVENT");
681 case(_GM_NEW_SLEEP_EVENT):
682 return("GM_NEW_SLEEP_EVENT");
683 case(GM_NEW_RAW_RECV_EVENT):
684 return("GM_NEW_RAW_RECV_EVENT");
685 case(GM_NEW_BAD_SEND_DETECTED_EVENT):
686 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
687 case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
688 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
689 case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
690 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
691 case(GM_NEW_BAD_RECV_TOKEN_EVENT):
692 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
693 case(GM_NEW_ALARM_VIOLATION_EVENT):
694 return("GM_NEW_ALARM_VIOLATION_EVENT");
695 case(GM_NEW_RECV_EVENT):
696 return("GM_NEW_RECV_EVENT");
697 case(GM_NEW_HIGH_RECV_EVENT):
698 return("GM_NEW_HIGH_RECV_EVENT");
699 case(GM_NEW_PEER_RECV_EVENT):
700 return("GM_NEW_PEER_RECV_EVENT");
701 case(GM_NEW_HIGH_PEER_RECV_EVENT):
702 return("GM_NEW_HIGH_PEER_RECV_EVENT");
703 case(GM_NEW_FAST_RECV_EVENT):
704 return("GM_NEW_FAST_RECV_EVENT");
705 case(GM_NEW_FAST_HIGH_RECV_EVENT):
706 return("GM_NEW_FAST_HIGH_RECV_EVENT");
707 case(GM_NEW_FAST_PEER_RECV_EVENT):
708 return("GM_NEW_FAST_PEER_RECV_EVENT");
709 case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
710 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
711 case(GM_NEW_REJECTED_SEND_EVENT):
712 return("GM_NEW_REJECTED_SEND_EVENT");
713 case(GM_NEW_ORPHANED_SEND_EVENT):
714 return("GM_NEW_ORPHANED_SEND_EVENT");
715 case(_GM_NEW_PUT_NOTIFICATION_EVENT):
716 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
717 case(GM_NEW_FREE_SEND_TOKEN_EVENT):
718 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
719 case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
720 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
721 case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
722 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
723 case(GM_NEW_DROPPED_SEND_EVENT):
724 return("GM_NEW_DROPPED_SEND_EVENT");
725 case(GM_NEW_BAD_SEND_VMA_EVENT):
726 return("GM_NEW_BAD_SEND_VMA_EVENT");
727 case(GM_NEW_BAD_RECV_VMA_EVENT):
728 return("GM_NEW_BAD_RECV_VMA_EVENT");
729 case(_GM_NEW_FLUSHED_ALARM_EVENT):
730 return("GM_NEW_FLUSHED_ALARM_EVENT");
731 case(GM_NEW_SENT_TOKENS_EVENT):
732 return("GM_NEW_SENT_TOKENS_EVENT");
733 case(GM_NEW_IGNORE_RECV_EVENT):
734 return("GM_NEW_IGNORE_RECV_EVENT");
735 case(GM_NEW_ETHERNET_RECV_EVENT):
736 return("GM_NEW_ETHERNET_RECV_EVENT");
738 snprintf(msg, 24, "Unknown Recv event [%d]", event);
741 case(/* _GM_PUT_NOTIFICATION_EVENT */
742 case(/* GM_FREE_SEND_TOKEN_EVENT */
743 case(/* GM_FREE_HIGH_SEND_TOKEN_EVENT */
750 lgmnal_yield(int delay)
752 set_current_state(TASK_INTERRUPTIBLE);
753 schedule_timeout(delay);
757 lgmnal_is_small_message(lgmnal_data_t *nal_data, int niov, struct iovec *iov, int len)
760 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_is_small_message len is [%d]\n", len));
761 if (len < LGMNAL_SMALL_MSG_SIZE(nal_data)) {
762 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Yep, small message]\n"));
765 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("No, not small message]\n"));
771 lgmnal_hash_find(lgmnal_hash_t *hash, void *key)
775 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_find hash [%p] key [%p]\n", hash, key));
778 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find Stepping [%d]\n", count++));
779 if (hash->key == key) {
781 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find hash got data[%p]\n", data));
786 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find data not found\n"));
791 * TO DO hash. figure out why getting bad stuff from gm_hash and thne use it.
795 lgmnal_hash_add(lgmnal_hash_t **hash, void *key, void *data)
798 #ifdef LGMNAL_USE_GM_HASH
799 return(gm_hash_insert(*hash, (void*)key, (void*)data);
801 lgmnal_hash_t *new = NULL;
802 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_add hash [%p]\n", *hash));
803 PORTAL_ALLOC(new, sizeof(lgmnal_hash_t));
804 memset(new, 0, sizeof(lgmnal_hash_t));
806 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_hash_add :: can't get memory\n"));
813 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_add hash head [%p]\n", *hash));
819 lgmnal_hash_free(lgmnal_hash_t **hash)
822 lgmnal_hash_t *_hash = NULL;
823 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_free hash [p%]\n", *hash));
826 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_free freeing hash [p%]\n", _hash));
829 PORTAL_FREE(_hash, sizeof(lgmnal_hash_t));
835 EXPORT_SYMBOL(lgmnal_yield);
836 EXPORT_SYMBOL(lgmnal_print);
837 EXPORT_SYMBOL(lgmnal_alloc_srxd);
838 EXPORT_SYMBOL(lgmnal_get_srxd);
839 EXPORT_SYMBOL(lgmnal_return_srxd);
840 EXPORT_SYMBOL(lgmnal_free_srxd);
841 EXPORT_SYMBOL(lgmnal_alloc_stxd);
842 EXPORT_SYMBOL(lgmnal_get_stxd);
843 EXPORT_SYMBOL(lgmnal_return_stxd);
844 EXPORT_SYMBOL(lgmnal_free_stxd);
845 EXPORT_SYMBOL(lgmnal_rxbuffer_to_srxd);
846 EXPORT_SYMBOL(lgmnal_rxevent);
847 EXPORT_SYMBOL(lgmnal_gm_error);
848 EXPORT_SYMBOL(lgmnal_stop_rxthread);