1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
6 * This file is part of Lustre, http://www.lustre.org/
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * All utilities required by lgmanl
29 * print a console message
30 * the header of each messages specifies
31 * the function, file and line number of the caller
35 * TO DO lgmnal_print find how to determine the caller function
38 #define DEFAULT_LEN 64
39 void lgmnal_print(const char *fmt, ...)
42 char *varbuf = NULL, fixedbuf[DEFAULT_LEN];
47 sprintf(fixedbuf, "LGMNAL::");
48 len = vsnprintf(fixedbuf+8, DEFAULT_LEN-8, fmt, ap);
49 if ((len+8) >= DEFAULT_LEN) {
50 PORTAL_ALLOC(varbuf, len+1+8);
52 printk("lgmnal_cb_printf Failed to malloc\n");
53 printk("Truncated message is\n");
58 sprintf(varbuf, "LGMNAL::");
59 len = vsnprintf(varbuf+8, len+1, fmt, ap);
65 if (fixedbuf != varbuf)
66 PORTAL_FREE(varbuf, len+1+8);
72 * allocate a number of small tx buffers and register with GM
73 * so they are wired and set up for DMA. This is a costly operation.
74 * Also allocate a corrosponding descriptor to keep track of
76 * Put all descriptors on singly linked list to be available to send function.
77 * This function is only called when the API mutex is held (init or shutdown),
78 * so there is no need to hold the txd spinlock.
81 lgmnal_alloc_stxd(lgmnal_data_t *nal_data)
83 int ntx = 0, nstx = 0, i = 0;
84 lgmnal_stxd_t *txd = NULL;
85 void *txbuffer = NULL;
87 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_alloc_small tx\n"));
89 LGMNAL_GM_LOCK(nal_data);
90 ntx = gm_num_send_tokens(nal_data->gm_port);
91 LGMNAL_GM_UNLOCK(nal_data);
92 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("total number of send tokens available is [%d]\n", ntx));
96 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Allocated [%d] send tokens to small messages\n", nstx));
99 #ifdef LGMNAL_USE_GM_HASH
100 nal_data->stxd_hash = gm_create_hash(gm_hash_compare_ptrs, gm_hash_hash_ptr, 0, sizeof(void*), nstx, 0);
101 if (!nal_data->srxd_hash) {
102 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to create hash table\n\n"));
103 return(LGMNAL_STATUS_NOMEM);
106 nal_data->stxd_hash = NULL;
110 * A semaphore is initialised with the
111 * number of transmit tokens available.
112 * To get a stxd, acquire the token semaphore.
113 * this decrements the available token count
114 * (if no tokens you block here, someone returning a
115 * stxd will release the semaphore and wake you)
116 * When token is obtained acquire the spinlock
117 * to manipulate the list
119 LGMNAL_TXD_TOKEN_INIT(nal_data, nstx);
120 LGMNAL_TXD_LOCK_INIT(nal_data);
122 for (i=0; i<=nstx; i++) {
123 PORTAL_ALLOC(txd, sizeof(lgmnal_stxd_t));
125 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc txd [%d]\n", i));
126 return(LGMNAL_STATUS_NOMEM);
129 PORTAL_ALLOC(txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
131 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc txbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
132 PORTAL_FREE(txd, sizeof(lgmnal_stxd_t));
133 return(LGMNAL_STATUS_FAIL);
135 LGMNAL_PRINT(LGMNAL_DEBUG_V, ("Calling gm_register_memory with port [%p] txbuffer [%p], size [%d]\n",
136 nal_data->gm_port, txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data)));
137 LGMNAL_GM_LOCK(nal_data);
138 gm_status = gm_register_memory(nal_data->gm_port, txbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
139 LGMNAL_GM_UNLOCK(nal_data);
140 if (gm_status != GM_SUCCESS) {
141 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("gm_register_memory failed buffer [%p], index [%d]\n", txbuffer, i));
144 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_FAILURE\n"));
146 case(GM_PERMISSION_DENIED):
147 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_PERMISSION_DENIED\n"));
149 case(GM_INVALID_PARAMETER):
150 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_INVALID_PARAMETER\n"));
153 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Unknown error\n"));
156 return(LGMNAL_STATUS_FAIL);
158 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("gm_register_memory ok for buffer [%p], index [%d]\n", txbuffer, i));
161 LGMNAL_GM_LOCK(nal_data);
162 txbuffer = gm_dma_malloc(nal_data->gm_port, LGMNAL_SMALL_MSG_SIZE(nal_data));
163 LGMNAL_GM_UNLOCK(nal_data);
165 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to gm_dma_malloc txbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
166 PORTAL_FREE(txd, sizeof(lgmnal_stxd_t));
167 return(LGMNAL_STATUS_FAIL);
171 txd->buffer = txbuffer;
172 txd->size = LGMNAL_SMALL_MSG_SIZE(nal_data);
173 txd->gmsize = gm_min_size_for_length(txd->size);
174 txd->nal_data = (struct _lgmnal_data_t*)nal_data;
176 if (lgmnal_hash_add(&nal_data->stxd_hash, (void*)txbuffer, (void*)txd)) {
177 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("failed to create hash entry\n"));
178 return(LGMNAL_STATUS_FAIL);
182 txd->next = nal_data->stxd;
183 nal_data->stxd = txd;
184 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Registered txd [%p] with buffer [%p], size [%d]\n", txd, txd->buffer, txd->size));
187 return(LGMNAL_STATUS_OK);
190 /* Free the list of wired and gm_registered small tx buffers and the tx descriptors
191 that go along with them.
192 * This function is only called when the API mutex is held (init or shutdown),
193 * so there is no need to hold the txd spinlock.
196 lgmnal_free_stxd(lgmnal_data_t *nal_data)
198 lgmnal_stxd_t *txd = nal_data->stxd, *_txd = NULL;
200 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_free_small tx\n"));
203 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Freeing txd [%p] with buffer [%p], size [%d]\n", txd, txd->buffer, txd->size));
207 LGMNAL_GM_LOCK(nal_data);
208 gm_deregister_memory(nal_data->gm_port, _txd->buffer, _txd->size);
209 LGMNAL_GM_UNLOCK(nal_data);
210 PORTAL_FREE(_txd->buffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
212 LGMNAL_GM_LOCK(nal_data);
213 gm_dma_free(nal_data->gm_port, _txd->buffer);
214 LGMNAL_GM_UNLOCK(nal_data);
216 PORTAL_FREE(_txd, sizeof(lgmnal_stxd_t));
223 * Get a txd from the list
224 * This get us a wired and gm_registered small tx buffer.
225 * This implicitly gets us a send token also.
228 lgmnal_get_stxd(lgmnal_data_t *nal_data, int block)
231 lgmnal_stxd_t *txd = NULL;
232 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_get_stxd nal_data [%p] block[%d]\n",
236 LGMNAL_TXD_GETTOKEN(nal_data);
238 if (LGMNAL_TXD_TRYGETTOKEN(nal_data)) {
239 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_get_stxd can't get token\n"));
243 LGMNAL_TXD_LOCK(nal_data);
244 txd = nal_data->stxd;
246 nal_data->stxd = txd->next;
247 LGMNAL_TXD_UNLOCK(nal_data);
248 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_get_stxd got [%p], head is [%p]\n", txd, nal_data->stxd));
253 * Return a txd to the list
256 lgmnal_return_stxd(lgmnal_data_t *nal_data, lgmnal_stxd_t *txd)
258 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_return_stxd nal_data [%p], txd[%p]\n", nal_data, txd));
260 LGMNAL_TXD_LOCK(nal_data);
261 txd->next = nal_data->stxd;
262 nal_data->stxd = txd;
263 LGMNAL_TXD_UNLOCK(nal_data);
264 LGMNAL_TXD_RETURNTOKEN(nal_data);
270 * allocate a number of small rx buffers and register with GM
271 * so they are wired and set up for DMA. This is a costly operation.
272 * Also allocate a corrosponding descriptor to keep track of
274 * Put all descriptors on singly linked list to be available to receive thread.
275 * This function is only called when the API mutex is held (init or shutdown),
276 * so there is no need to hold the rxd spinlock.
279 lgmnal_alloc_srxd(lgmnal_data_t *nal_data)
281 int nrx = 0, nsrx = 0, i = 0;
282 lgmnal_srxd_t *rxd = NULL;
283 void *rxbuffer = NULL;
285 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_alloc_small rx\n"));
287 LGMNAL_GM_LOCK(nal_data);
288 nrx = gm_num_receive_tokens(nal_data->gm_port);
289 LGMNAL_GM_UNLOCK(nal_data);
290 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("total number of receive tokens available is [%d]\n", nrx));
294 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Allocated [%d] receive tokens to small messages\n", nsrx));
297 #ifdef LGMNAL_USE_GM_HASH
298 LGMNAL_GM_LOCK(nal_data);
299 nal_data->srxd_hash = gm_create_hash(gm_hash_compare_ptrs, gm_hash_hash_ptr, 0, sizeof(void*), nsrx, 0);
300 LGMNAL_GM_UNLOCK(nal_data);
301 if (!nal_data->srxd_hash) {
302 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to create hash table\n"));
303 return(LGMNAL_STATUS_NOMEM);
306 nal_data->srxd_hash = NULL;
309 LGMNAL_RXD_TOKEN_INIT(nal_data, nsrx);
310 LGMNAL_RXD_LOCK_INIT(nal_data);
312 for (i=0; i<=nsrx; i++) {
313 PORTAL_ALLOC(rxd, sizeof(lgmnal_srxd_t));
315 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc rxd [%d]\n", i));
316 return(LGMNAL_STATUS_NOMEM);
319 PORTAL_ALLOC(rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
321 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to malloc rxbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
322 PORTAL_FREE(rxd, sizeof(lgmnal_srxd_t));
323 return(LGMNAL_STATUS_FAIL);
325 LGMNAL_PRINT(LGMNAL_DEBUG_V, ("Calling gm_register_memory with port [%p] rxbuffer [%p], size [%d]\n",
326 nal_data->gm_port, rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data)));
327 LGMNAL_GM_LOCK(nal_data);
328 gm_status = gm_register_memory(nal_data->gm_port, rxbuffer, LGMNAL_SMALL_MSG_SIZE(nal_data));
329 LGMNAL_GM_UNLOCK(nal_data);
330 if (gm_status != GM_SUCCESS) {
331 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("gm_register_memory failed buffer [%p], index [%d]\n", rxbuffer, i));
334 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_FAILURE\n"));
336 case(GM_PERMISSION_DENIED):
337 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_PERMISSION_DENIED\n"));
339 case(GM_INVALID_PARAMETER):
340 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("GM_INVALID_PARAMETER\n"));
343 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Unknown GM error[%d]\n", gm_status));
347 return(LGMNAL_STATUS_FAIL);
350 LGMNAL_GM_LOCK(nal_data);
351 rxbuffer = gm_dma_malloc(nal_data->gm_port, LGMNAL_SMALL_MSG_SIZE(nal_data));
352 LGMNAL_GM_UNLOCK(nal_data);
354 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("Failed to gm_dma_malloc rxbuffer [%d], size [%d]\n", i, LGMNAL_SMALL_MSG_SIZE(nal_data)));
355 PORTAL_FREE(rxd, sizeof(lgmnal_srxd_t));
356 return(LGMNAL_STATUS_FAIL);
360 rxd->buffer = rxbuffer;
361 rxd->size = LGMNAL_SMALL_MSG_SIZE(nal_data);
362 rxd->gmsize = gm_min_size_for_length(rxd->size);
364 if (lgmnal_hash_add(&nal_data->srxd_hash, (void*)rxbuffer, (void*)rxd) != GM_SUCCESS) {
365 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("failed to create hash entry rxd[%p] for rxbuffer[%p]\n", rxd, rxbuffer));
366 return(LGMNAL_STATUS_FAIL);
369 rxd->next = nal_data->srxd;
370 nal_data->srxd = rxd;
371 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Registered rxd [%p] with buffer [%p], size [%d]\n", rxd, rxd->buffer, rxd->size));
374 return(LGMNAL_STATUS_OK);
379 /* Free the list of wired and gm_registered small rx buffers and the rx descriptors
380 * that go along with them.
381 * This function is only called when the API mutex is held (init or shutdown),
382 * so there is no need to hold the rxd spinlock.
385 lgmnal_free_srxd(lgmnal_data_t *nal_data)
387 lgmnal_srxd_t *rxd = nal_data->srxd, *_rxd = NULL;
389 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_free_small rx\n"));
392 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Freeing rxd [%p] with buffer [%p], size [%d]\n", rxd, rxd->buffer, rxd->size));
397 LGMNAL_GM_LOCK(nal_data);
398 gm_deregister_memory(nal_data->gm_port, _rxd->buffer, _rxd->size);
399 LGMNAL_GM_UNLOCK(nal_data);
400 PORTAL_FREE(_rxd->buffer, LGMNAL_SMALL_RXBUFFER_SIZE);
402 LGMNAL_GM_LOCK(nal_data);
403 gm_dma_free(nal_data->gm_port, _rxd->buffer);
404 LGMNAL_GM_UNLOCK(nal_data);
406 PORTAL_FREE(_rxd, sizeof(lgmnal_srxd_t));
413 * Get a rxd from the free list
414 * This get us a wired and gm_registered small rx buffer.
415 * This implicitly gets us a receive token also.
418 lgmnal_get_srxd(lgmnal_data_t *nal_data, int block)
421 lgmnal_srxd_t *rxd = NULL;
422 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_get_srxd nal_data [%p] block [%d]\n", nal_data, block));
425 LGMNAL_RXD_GETTOKEN(nal_data);
427 if (LGMNAL_RXD_TRYGETTOKEN(nal_data)) {
428 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_get_srxd Can't get token\n"));
432 LGMNAL_RXD_LOCK(nal_data);
433 rxd = nal_data->srxd;
435 nal_data->srxd = rxd->next;
436 LGMNAL_RXD_UNLOCK(nal_data);
437 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_get_srxd got [%p], head is [%p]\n", rxd, nal_data->srxd));
442 * Return an rxd to the list
445 lgmnal_return_srxd(lgmnal_data_t *nal_data, lgmnal_srxd_t *rxd)
447 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_return_srxd nal_data [%p], rxd[%p]\n", nal_data, rxd));
449 LGMNAL_RXD_LOCK(nal_data);
450 rxd->next = nal_data->srxd;
451 nal_data->srxd = rxd;
452 LGMNAL_RXD_UNLOCK(nal_data);
453 LGMNAL_RXD_RETURNTOKEN(nal_data);
458 * Given a pointer to a srxd find
459 * the relevant descriptor for it
460 * This is done by searching a hash
461 * list that is created when the srxd's
465 lgmnal_rxbuffer_to_srxd(lgmnal_data_t *nal_data, void *rxbuffer)
467 lgmnal_srxd_t *srxd = NULL;
468 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_rxbuffer_to_srxd nal_data [%p], rxbuffer [%p]\n", nal_data, rxbuffer));
469 #ifdef LGMNAL_USE_GM_HASH
470 srxd = gm_hash_find(nal_data->srxd_hash, rxbuffer);
472 srxd = lgmnal_hash_find(nal_data->srxd_hash, rxbuffer);
474 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("srxd is [%p]\n", srxd));
480 lgmnal_stop_rxthread(lgmnal_data_t *nal_data)
486 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("Attempting to stop rxthread nal_data [%p]\n", nal_data));
488 if (nal_data->rxthread_flag != LGMNAL_THREAD_CONTINUE) {
489 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("thread flag not correctly set\n"));
492 nal_data->rxthread_flag = LGMNAL_THREAD_STOP;
493 LGMNAL_GM_LOCK(nal_data);
494 gm_set_alarm(nal_data->gm_port, &nal_data->rxthread_alarm, 10, NULL, NULL);
495 LGMNAL_GM_UNLOCK(nal_data);
497 while(nal_data->rxthread_flag == LGMNAL_THREAD_STOP && delay--) {
498 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_stop_rxthread sleeping\n"));
499 current->state = TASK_INTERRUPTIBLE;
500 schedule_timeout(1024);
503 if (nal_data->rxthread_flag == LGMNAL_THREAD_STOP) {
504 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("I DON'T KNOW HOW TO WAKE THE THREAD\n"));
506 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("RX THREAD SEEMS TO HAVE STOPPED\n"));
514 lgmnal_gm_error(gm_status_t status)
521 case(GM_INPUT_BUFFER_TOO_SMALL):
522 return("INPUT_BUFFER_TOO_SMALL");
523 case(GM_OUTPUT_BUFFER_TOO_SMALL):
524 return("OUTPUT_BUFFER_TOO_SMALL");
529 case(GM_MEMORY_FAULT):
530 return("MEMORY_FAULT");
531 case(GM_INTERRUPTED):
532 return("INTERRUPTED");
533 case(GM_INVALID_PARAMETER):
534 return("INVALID_PARAMETER");
535 case(GM_OUT_OF_MEMORY):
536 return("OUT_OF_MEMORY");
537 case(GM_INVALID_COMMAND):
538 return("INVALID_COMMAND");
539 case(GM_PERMISSION_DENIED):
540 return("PERMISSION_DENIED");
541 case(GM_INTERNAL_ERROR):
542 return("INTERNAL_ERROR");
544 return("UNATTACHED");
545 case(GM_UNSUPPORTED_DEVICE):
546 return("UNSUPPORTED_DEVICE");
547 case(GM_SEND_TIMED_OUT):
548 return("GM_SEND_TIMEDOUT");
549 case(GM_SEND_REJECTED):
550 return("GM_SEND_REJECTED");
551 case(GM_SEND_TARGET_PORT_CLOSED):
552 return("GM_SEND_TARGET_PORT_CLOSED");
553 case(GM_SEND_TARGET_NODE_UNREACHABLE):
554 return("GM_SEND_TARGET_NODE_UNREACHABLE");
555 case(GM_SEND_DROPPED):
556 return("GM_SEND_DROPPED");
557 case(GM_SEND_PORT_CLOSED):
558 return("GM_SEND_PORT_CLOSED");
559 case(GM_NODE_ID_NOT_YET_SET):
560 return("GM_NODE_ID_NOT_YET_SET");
561 case(GM_STILL_SHUTTING_DOWN):
562 return("GM_STILL_SHUTTING_DOWN");
564 return("GM_CLONE_BUSY");
565 case(GM_NO_SUCH_DEVICE):
566 return("GM_NO_SUCH_DEVICE");
568 return("GM_ABORTED");
569 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
570 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
571 case(GM_UNTRANSLATED_SYSTEM_ERROR):
572 return("GM_UNTRANSLATED_SYSTEM_ERROR");
573 case(GM_ACCESS_DENIED):
574 return("GM_ACCESS_DENIED");
578 * These ones are in the docs but aren't in the header file
579 case(GM_DEV_NOT_FOUND):
580 return("GM_DEV_NOT_FOUND");
581 case(GM_INVALID_PORT_NUMBER):
582 return("GM_INVALID_PORT_NUMBER");
584 return("GM_US_ERROR");
585 case(GM_PAGE_TABLE_FULL):
586 return("GM_PAGE_TABLE_FULL");
587 case(GM_MINOR_OVERFLOW):
588 return("GM_MINOR_OVERFLOW");
589 case(GM_SEND_ORPHANED):
590 return("GM_SEND_ORPHANED");
591 case(GM_HARDWARE_FAULT):
592 return("GM_HARDWARE_FAULT");
593 case(GM_DATA_CORRUPTED):
594 return("GM_DATA_CORRUPTED");
596 return("GM_TIMED_OUT");
598 return("GM_USER_ERROR");
600 return("GM_NOMATCH");
601 case(GM_NOT_SUPPORTED_IN_KERNEL):
602 return("GM_NOT_SUPPORTED_IN_KERNEL");
603 case(GM_NOT_SUPPORTED_ON_ARCH):
604 return("GM_NOT_SUPPORTED_ON_ARCH");
605 case(GM_PTE_REF_CNT_OVERFLOW):
606 return("GM_PTR_REF_CNT_OVERFLOW");
607 case(GM_NO_DRIVER_SUPPORT):
608 return("GM_NO_DRIVER_SUPPORT");
609 case(GM_FIRMWARE_NOT_RUNNING):
610 return("GM_FIRMWARE_NOT_RUNNING");
612 * These ones are in the docs but aren't in the header file
615 return("UNKNOWN GM ERROR CODE");
621 lgmnal_rxevent(gm_recv_event_t *ev)
625 event = GM_RECV_EVENT_TYPE(ev);
627 case(GM_NO_RECV_EVENT):
628 return("GM_NO_RECV_EVENT");
629 case(GM_SENDS_FAILED_EVENT):
630 return("GM_SEND_FAILED_EVENT");
631 case(GM_ALARM_EVENT):
632 return("GM_ALARM_EVENT");
634 return("GM_SENT_EVENT");
635 case(_GM_SLEEP_EVENT):
636 return("_GM_SLEEP_EVENT");
637 case(GM_RAW_RECV_EVENT):
638 return("GM_RAW_RECV_EVENT");
639 case(GM_BAD_SEND_DETECTED_EVENT):
640 return("GM_BAD_SEND_DETECTED_EVENT");
641 case(GM_SEND_TOKEN_VIOLATION_EVENT):
642 return("GM_SEND_TOKEN_VIOLATION_EVENT");
643 case(GM_RECV_TOKEN_VIOLATION_EVENT):
644 return("GM_RECV_TOKEN_VIOLATION_EVENT");
645 case(GM_BAD_RECV_TOKEN_EVENT):
646 return("GM_BAD_RECV_TOKEN_EVENT");
647 case(GM_ALARM_VIOLATION_EVENT):
648 return("GM_ALARM_VIOLATION_EVENT");
650 return("GM_RECV_EVENT");
651 case(GM_HIGH_RECV_EVENT):
652 return("GM_HIGH_RECV_EVENT");
653 case(GM_PEER_RECV_EVENT):
654 return("GM_PEER_RECV_EVENT");
655 case(GM_HIGH_PEER_RECV_EVENT):
656 return("GM_HIGH_PEER_RECV_EVENT");
657 case(GM_FAST_RECV_EVENT):
658 return("GM_FAST_RECV_EVENT");
659 case(GM_FAST_HIGH_RECV_EVENT):
660 return("GM_FAST_HIGH_RECV_EVENT");
661 case(GM_FAST_PEER_RECV_EVENT):
662 return("GM_FAST_PEER_RECV_EVENT");
663 case(GM_FAST_HIGH_PEER_RECV_EVENT):
664 return("GM_FAST_HIGH_PEER_RECV_EVENT");
665 case(GM_REJECTED_SEND_EVENT):
666 return("GM_REJECTED_SEND_EVENT");
667 case(GM_ORPHANED_SEND_EVENT):
668 return("GM_ORPHANED_SEND_EVENT");
669 case(GM_BAD_RESEND_DETECTED_EVENT):
670 return("GM_BAD_RESEND_DETETED_EVENT");
671 case(GM_DROPPED_SEND_EVENT):
672 return("GM_DROPPED_SEND_EVENT");
673 case(GM_BAD_SEND_VMA_EVENT):
674 return("GM_BAD_SEND_VMA_EVENT");
675 case(GM_BAD_RECV_VMA_EVENT):
676 return("GM_BAD_RECV_VMA_EVENT");
677 case(_GM_FLUSHED_ALARM_EVENT):
678 return("GM_FLUSHED_ALARM_EVENT");
679 case(GM_SENT_TOKENS_EVENT):
680 return("GM_SENT_TOKENS_EVENTS");
681 case(GM_IGNORE_RECV_EVENT):
682 return("GM_IGNORE_RECV_EVENT");
683 case(GM_ETHERNET_RECV_EVENT):
684 return("GM_ETHERNET_RECV_EVENT");
685 case(GM_NEW_NO_RECV_EVENT):
686 return("GM_NEW_NO_RECV_EVENT");
687 case(GM_NEW_SENDS_FAILED_EVENT):
688 return("GM_NEW_SENDS_FAILED_EVENT");
689 case(GM_NEW_ALARM_EVENT):
690 return("GM_NEW_ALARM_EVENT");
691 case(GM_NEW_SENT_EVENT):
692 return("GM_NEW_SENT_EVENT");
693 case(_GM_NEW_SLEEP_EVENT):
694 return("GM_NEW_SLEEP_EVENT");
695 case(GM_NEW_RAW_RECV_EVENT):
696 return("GM_NEW_RAW_RECV_EVENT");
697 case(GM_NEW_BAD_SEND_DETECTED_EVENT):
698 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
699 case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
700 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
701 case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
702 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
703 case(GM_NEW_BAD_RECV_TOKEN_EVENT):
704 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
705 case(GM_NEW_ALARM_VIOLATION_EVENT):
706 return("GM_NEW_ALARM_VIOLATION_EVENT");
707 case(GM_NEW_RECV_EVENT):
708 return("GM_NEW_RECV_EVENT");
709 case(GM_NEW_HIGH_RECV_EVENT):
710 return("GM_NEW_HIGH_RECV_EVENT");
711 case(GM_NEW_PEER_RECV_EVENT):
712 return("GM_NEW_PEER_RECV_EVENT");
713 case(GM_NEW_HIGH_PEER_RECV_EVENT):
714 return("GM_NEW_HIGH_PEER_RECV_EVENT");
715 case(GM_NEW_FAST_RECV_EVENT):
716 return("GM_NEW_FAST_RECV_EVENT");
717 case(GM_NEW_FAST_HIGH_RECV_EVENT):
718 return("GM_NEW_FAST_HIGH_RECV_EVENT");
719 case(GM_NEW_FAST_PEER_RECV_EVENT):
720 return("GM_NEW_FAST_PEER_RECV_EVENT");
721 case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
722 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
723 case(GM_NEW_REJECTED_SEND_EVENT):
724 return("GM_NEW_REJECTED_SEND_EVENT");
725 case(GM_NEW_ORPHANED_SEND_EVENT):
726 return("GM_NEW_ORPHANED_SEND_EVENT");
727 case(_GM_NEW_PUT_NOTIFICATION_EVENT):
728 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
729 case(GM_NEW_FREE_SEND_TOKEN_EVENT):
730 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
731 case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
732 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
733 case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
734 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
735 case(GM_NEW_DROPPED_SEND_EVENT):
736 return("GM_NEW_DROPPED_SEND_EVENT");
737 case(GM_NEW_BAD_SEND_VMA_EVENT):
738 return("GM_NEW_BAD_SEND_VMA_EVENT");
739 case(GM_NEW_BAD_RECV_VMA_EVENT):
740 return("GM_NEW_BAD_RECV_VMA_EVENT");
741 case(_GM_NEW_FLUSHED_ALARM_EVENT):
742 return("GM_NEW_FLUSHED_ALARM_EVENT");
743 case(GM_NEW_SENT_TOKENS_EVENT):
744 return("GM_NEW_SENT_TOKENS_EVENT");
745 case(GM_NEW_IGNORE_RECV_EVENT):
746 return("GM_NEW_IGNORE_RECV_EVENT");
747 case(GM_NEW_ETHERNET_RECV_EVENT):
748 return("GM_NEW_ETHERNET_RECV_EVENT");
750 snprintf(msg, 24, "Unknown Recv event [%d]", event);
753 case(/* _GM_PUT_NOTIFICATION_EVENT */
754 case(/* GM_FREE_SEND_TOKEN_EVENT */
755 case(/* GM_FREE_HIGH_SEND_TOKEN_EVENT */
762 lgmnal_yield(int delay)
764 set_current_state(TASK_INTERRUPTIBLE);
765 schedule_timeout(delay);
769 lgmnal_is_small_message(lgmnal_data_t *nal_data, int niov, struct iovec *iov, int len)
772 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_is_small_message len is [%d]\n", len));
773 if (len < LGMNAL_SMALL_MSG_SIZE(nal_data)) {
774 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("Yep, small message]\n"));
777 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("No, not small message]\n"));
783 lgmnal_hash_find(lgmnal_hash_t *hash, void *key)
787 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_find hash [%p] key [%p]\n", hash, key));
790 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find Stepping [%d]\n", count++));
791 if (hash->key == key) {
793 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find hash got data[%p]\n", data));
798 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_find data not found\n"));
803 * TO DO hash. figure out why getting bad stuff from gm_hash and thne use it.
807 lgmnal_hash_add(lgmnal_hash_t **hash, void *key, void *data)
810 #ifdef LGMNAL_USE_GM_HASH
811 return(gm_hash_insert(*hash, (void*)key, (void*)data);
813 lgmnal_hash_t *new = NULL;
814 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_add hash [%p]\n", *hash));
815 PORTAL_ALLOC(new, sizeof(lgmnal_hash_t));
816 memset(new, 0, sizeof(lgmnal_hash_t));
818 LGMNAL_PRINT(LGMNAL_DEBUG_ERR, ("lgmnal_hash_add :: can't get memory\n"));
825 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_add hash head [%p]\n", *hash));
831 lgmnal_hash_free(lgmnal_hash_t **hash)
834 lgmnal_hash_t *_hash = NULL;
835 LGMNAL_PRINT(LGMNAL_DEBUG_TRACE, ("lgmnal_hash_free hash [p%]\n", *hash));
838 LGMNAL_PRINT(LGMNAL_DEBUG_VV, ("lgmnal_hash_free freeing hash [p%]\n", _hash));
841 PORTAL_FREE(_hash, sizeof(lgmnal_hash_t));
847 EXPORT_SYMBOL(lgmnal_yield);
848 EXPORT_SYMBOL(lgmnal_print);
849 EXPORT_SYMBOL(lgmnal_alloc_srxd);
850 EXPORT_SYMBOL(lgmnal_get_srxd);
851 EXPORT_SYMBOL(lgmnal_return_srxd);
852 EXPORT_SYMBOL(lgmnal_free_srxd);
853 EXPORT_SYMBOL(lgmnal_alloc_stxd);
854 EXPORT_SYMBOL(lgmnal_get_stxd);
855 EXPORT_SYMBOL(lgmnal_return_stxd);
856 EXPORT_SYMBOL(lgmnal_free_stxd);
857 EXPORT_SYMBOL(lgmnal_rxbuffer_to_srxd);
858 EXPORT_SYMBOL(lgmnal_rxevent);
859 EXPORT_SYMBOL(lgmnal_gm_error);
860 EXPORT_SYMBOL(lgmnal_stop_rxthread);