1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
6 * This file is part of Lustre, http://www.lustre.org/
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * All utilities required by lgmanl
28 * Am I one of the gmnal rxthreads ?
31 gmnal_is_rxthread(gmnal_ni_t *gmnalni)
35 for (i = 0; i < gmnalni->gmni_nrxthreads; i++)
36 if (gmnalni->gmni_rxthread_pid[i] == current->pid)
42 gmnal_alloc_tx (gmnal_ni_t *gmnalni)
47 PORTAL_ALLOC(tx, sizeof(*tx));
49 CERROR ("Failed to allocate tx\n");
53 buffer = gm_dma_malloc(gmnalni->gmni_port, gmnalni->gmni_msg_size);
55 CERROR("Failed to gm_dma_malloc tx buffer size [%d]\n",
56 gmnalni->gmni_msg_size);
57 PORTAL_FREE(tx, sizeof(*tx));
61 memset(tx, 0, sizeof(*tx));
62 tx->tx_msg = (gmnal_msg_t *)buffer;
63 tx->tx_buffer_size = gmnalni->gmni_msg_size;
64 tx->tx_gm_size = gm_min_size_for_length(tx->tx_buffer_size);
65 tx->tx_gmni = gmnalni;
67 CDEBUG(D_NET, "Created tx [%p] with buffer [%p], size [%d]\n",
68 tx, tx->tx_msg, tx->tx_buffer_size);
74 gmnal_free_tx (gmnal_tx_t *tx)
76 gmnal_ni_t *gmnalni = tx->tx_gmni;
78 CDEBUG(D_NET, "Freeing tx [%p] with buffer [%p], size [%d]\n",
79 tx, tx->tx_msg, tx->tx_buffer_size);
81 /* We free buffers after we've closed the GM port */
82 gm_dma_free(gmnalni->gmni_port, tx->tx_msg);
84 PORTAL_FREE(tx, sizeof(*tx));
88 gmnal_alloc_txs(gmnal_ni_t *gmnalni)
90 int ntxcred = gm_num_send_tokens(gmnalni->gmni_port);
96 CWARN("ntxcred: %d\n", ntxcred);
99 nrxt_tx = num_txds + 1;
101 if (ntx + nrxt_tx > ntxcred) {
102 CERROR ("Asked for %d + %d tx credits, but only %d available\n",
103 ntx, nrxt_tx, ntxcred);
107 /* A semaphore is initialised with the number of transmit tokens
108 * available. To get a stxd, acquire the token semaphore. this
109 * decrements the available token count (if no tokens you block here,
110 * someone returning a stxd will release the semaphore and wake you)
111 * When token is obtained acquire the spinlock to manipulate the
113 sema_init(&gmnalni->gmni_tx_token, ntx);
114 spin_lock_init(&gmnalni->gmni_tx_lock);
115 LASSERT (gmnalni->gmni_tx == NULL);
117 for (i = 0; i <= ntx; i++) {
118 tx = gmnal_alloc_tx(gmnalni);
120 CERROR("Failed to create tx %d\n", i);
125 tx->tx_next = gmnalni->gmni_tx;
126 gmnalni->gmni_tx = tx;
129 sema_init(&gmnalni->gmni_rxt_tx_token, nrxt_tx);
130 spin_lock_init(&gmnalni->gmni_rxt_tx_lock);
131 LASSERT (gmnalni->gmni_rxt_tx == NULL);
133 for (i = 0; i <= nrxt_tx; i++) {
134 tx = gmnal_alloc_tx(gmnalni);
136 CERROR("Failed to create tx %d + %d\n", ntx, i);
141 tx->tx_next = gmnalni->gmni_rxt_tx;
142 gmnalni->gmni_rxt_tx = tx;
149 gmnal_free_txs(gmnal_ni_t *gmnalni)
153 while ((tx = gmnalni->gmni_tx) != NULL) {
154 gmnalni->gmni_tx = tx->tx_next;
158 while ((tx = gmnalni->gmni_rxt_tx) != NULL) {
159 gmnalni->gmni_rxt_tx = tx->tx_next;
166 * Get a tx from the list
167 * This get us a wired and gm_registered small tx buffer.
168 * This implicitly gets us a send token also.
171 gmnal_get_tx(gmnal_ni_t *gmnalni, int block)
174 gmnal_tx_t *tx = NULL;
175 pid_t pid = current->pid;
178 CDEBUG(D_TRACE, "gmnal_get_tx gmnalni [%p] block[%d] pid [%d]\n",
179 gmnalni, block, pid);
181 if (gmnal_is_rxthread(gmnalni)) {
182 CDEBUG(D_NET, "RXTHREAD Attempting to get token\n");
183 down(&gmnalni->gmni_rxt_tx_token);
184 spin_lock(&gmnalni->gmni_rxt_tx_lock);
185 tx = gmnalni->gmni_rxt_tx;
186 gmnalni->gmni_rxt_tx = tx->tx_next;
187 spin_unlock(&gmnalni->gmni_rxt_tx_lock);
188 CDEBUG(D_NET, "RXTHREAD got [%p], head is [%p]\n",
189 tx, gmnalni->gmni_rxt_tx);
193 CDEBUG(D_NET, "Attempting to get token\n");
194 down(&gmnalni->gmni_tx_token);
195 CDEBUG(D_PORTALS, "Got token\n");
197 if (down_trylock(&gmnalni->gmni_tx_token)) {
198 CERROR("can't get token\n");
202 spin_lock(&gmnalni->gmni_tx_lock);
203 tx = gmnalni->gmni_tx;
204 gmnalni->gmni_tx = tx->tx_next;
205 spin_unlock(&gmnalni->gmni_tx_lock);
206 CDEBUG(D_NET, "got [%p], head is [%p]\n", tx,
208 } /* general tx get */
214 * Return a tx to the list
217 gmnal_return_tx(gmnal_ni_t *gmnalni, gmnal_tx_t *tx)
219 CDEBUG(D_TRACE, "gmnalni [%p], tx[%p] rxt[%d]\n", gmnalni,
223 * this transmit descriptor is
227 spin_lock(&gmnalni->gmni_rxt_tx_lock);
228 tx->tx_next = gmnalni->gmni_rxt_tx;
229 gmnalni->gmni_rxt_tx = tx;
230 spin_unlock(&gmnalni->gmni_rxt_tx_lock);
231 up(&gmnalni->gmni_rxt_tx_token);
232 CDEBUG(D_NET, "Returned tx to rxthread list\n");
234 spin_lock(&gmnalni->gmni_tx_lock);
235 tx->tx_next = gmnalni->gmni_tx;
236 gmnalni->gmni_tx = tx;
237 spin_unlock(&gmnalni->gmni_tx_lock);
238 up(&gmnalni->gmni_tx_token);
239 CDEBUG(D_NET, "Returned tx to general list\n");
246 * allocate a number of small rx buffers and register with GM
247 * so they are wired and set up for DMA. This is a costly operation.
248 * Also allocate a corrosponding descriptor to keep track of
250 * Put all descriptors on singly linked list to be available to
254 gmnal_alloc_rxs (gmnal_ni_t *gmnalni)
256 int nrxcred = gm_num_receive_tokens(gmnalni->gmni_port);
262 CWARN("nrxcred: %d\n", nrxcred);
264 nrx = num_txds*2 + 2;
266 CERROR("Can't allocate %d rx credits: (%d available)\n",
271 CDEBUG(D_NET, "Allocated [%d] receive tokens to small messages\n", nrx);
273 gmnalni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs,
274 gm_hash_hash_ptr, 0, 0, nrx, 0);
275 if (gmnalni->gmni_rx_hash == NULL) {
276 CERROR("Failed to create hash table\n");
280 LASSERT (gmnalni->gmni_rx == NULL);
282 for (i=0; i <= nrx; i++) {
284 PORTAL_ALLOC(rxd, sizeof(*rxd));
286 CERROR("Failed to malloc rxd [%d]\n", i);
290 rxbuffer = gm_dma_malloc(gmnalni->gmni_port,
291 gmnalni->gmni_msg_size);
292 if (rxbuffer == NULL) {
293 CERROR("Failed to gm_dma_malloc rxbuffer [%d], "
294 "size [%d]\n",i ,gmnalni->gmni_msg_size);
295 PORTAL_FREE(rxd, sizeof(*rxd));
299 rxd->rx_msg = (gmnal_msg_t *)rxbuffer;
300 rxd->rx_size = gmnalni->gmni_msg_size;
301 rxd->rx_gmsize = gm_min_size_for_length(rxd->rx_size);
303 rxd->rx_next = gmnalni->gmni_rx;
304 gmnalni->gmni_rx = rxd;
306 if (gm_hash_insert(gmnalni->gmni_rx_hash,
307 (void*)rxbuffer, (void*)rxd)) {
308 CERROR("failed to create hash entry rxd[%p] "
309 "for rxbuffer[%p]\n", rxd, rxbuffer);
313 CDEBUG(D_NET, "Registered rxd [%p] with buffer [%p], "
314 "size [%d]\n", rxd, rxd->rx_msg, rxd->rx_size);
321 gmnal_free_rxs(gmnal_ni_t *gmnalni)
325 CDEBUG(D_TRACE, "gmnal_free_small rx\n");
327 while ((rx = gmnalni->gmni_rx) != NULL) {
328 gmnalni->gmni_rx = rx->rx_next;
330 CDEBUG(D_NET, "Freeing rxd [%p] buffer [%p], size [%d]\n",
331 rx, rx->rx_msg, rx->rx_size);
333 /* We free buffers after we've shutdown the GM port */
334 gm_dma_free(gmnalni->gmni_port, _rxd->rx_msg);
336 PORTAL_FREE(rx, sizeof(*rx));
341 if (gmnalni->gmni_rx_hash != NULL)
342 gm_destroy_hash(gmnalni->gmni_rx_hash);
347 gmnal_stop_threads(gmnal_ni_t *gmnalni)
352 gmnalni->gmni_thread_shutdown = 1;
354 /* wake ctthread with an alarm */
355 spin_lock(&gmnalni->gmni_gm_lock);
356 gm_set_alarm(gmnalni->gmni_port, &gmnalni->gmni_ctthread_alarm,
358 spin_unlock(&gmnalni->gmni_gm_lock);
360 /* wake each rxthread */
361 for (i = 0; i < num_online_cpus(); i++)
362 up(&gmnalni->gmni_rxq_wait);
364 while (atomic_read(&gmnalni->gmni_nthreads) != 0) {
366 if ((count & (count - 1)) == 0)
367 CWARN("Waiting for %d threads to stop\n",
368 atomic_read(&gmnalni->gmni_nthreads));
374 * Start the caretaker thread and a number of receiver threads
375 * The caretaker thread gets events from the gm library.
376 * It passes receive events to the receiver threads via a work list.
377 * It processes other events itself in gm_unknown. These will be
378 * callback events or sleeps.
381 gmnal_start_threads(gmnal_ni_t *gmnalni)
386 gmnalni->gmni_thread_shutdown = 0;
387 gmnalni->gmni_nrxthreads = 0;
388 atomic_set(&gmnalni->gmni_nthreads, 0);
390 INIT_LIST_HEAD(&gmnalni->gmni_rxq);
391 spin_lock_init(&gmnalni->gmni_rxq_lock);
392 sema_init(&gmnalni->gmni_rxq_wait, 0);
395 * the alarm is used to wake the caretaker thread from
396 * gm_unknown call (sleeping) to exit it.
398 CDEBUG(D_NET, "Initializing caretaker thread alarm and flag\n");
399 gm_initialize_alarm(&gmnalni->gmni_ctthread_alarm);
401 pid = kernel_thread(gmnal_ct_thread, (void*)gmnalni, 0);
403 CERROR("Caretaker thread failed to start: %d\n", pid);
406 atomic_inc(&gmnalni->gmni_nthreads);
408 for (i = 0; i < num_online_cpus(); i++) {
410 pid = kernel_thread(gmnal_rx_thread, (void*)gmnalni, 0);
412 CERROR("rx thread failed to start: %d\n", pid);
413 gmnal_stop_threads(gmnalni);
417 atomic_inc(&gmnalni->gmni_nthreads);
418 gmnalni->gmni_rxthread_pid[i] = pid;
419 gmnalni->gmni_nrxthreads++;
426 gmnal_gmstatus2str(gm_status_t status)
428 return(gm_strerror(status));
435 case(GM_INPUT_BUFFER_TOO_SMALL):
436 return("INPUT_BUFFER_TOO_SMALL");
437 case(GM_OUTPUT_BUFFER_TOO_SMALL):
438 return("OUTPUT_BUFFER_TOO_SMALL");
443 case(GM_MEMORY_FAULT):
444 return("MEMORY_FAULT");
445 case(GM_INTERRUPTED):
446 return("INTERRUPTED");
447 case(GM_INVALID_PARAMETER):
448 return("INVALID_PARAMETER");
449 case(GM_OUT_OF_MEMORY):
450 return("OUT_OF_MEMORY");
451 case(GM_INVALID_COMMAND):
452 return("INVALID_COMMAND");
453 case(GM_PERMISSION_DENIED):
454 return("PERMISSION_DENIED");
455 case(GM_INTERNAL_ERROR):
456 return("INTERNAL_ERROR");
458 return("UNATTACHED");
459 case(GM_UNSUPPORTED_DEVICE):
460 return("UNSUPPORTED_DEVICE");
461 case(GM_SEND_TIMED_OUT):
462 return("GM_SEND_TIMEDOUT");
463 case(GM_SEND_REJECTED):
464 return("GM_SEND_REJECTED");
465 case(GM_SEND_TARGET_PORT_CLOSED):
466 return("GM_SEND_TARGET_PORT_CLOSED");
467 case(GM_SEND_TARGET_NODE_UNREACHABLE):
468 return("GM_SEND_TARGET_NODE_UNREACHABLE");
469 case(GM_SEND_DROPPED):
470 return("GM_SEND_DROPPED");
471 case(GM_SEND_PORT_CLOSED):
472 return("GM_SEND_PORT_CLOSED");
473 case(GM_NODE_ID_NOT_YET_SET):
474 return("GM_NODE_ID_NOT_YET_SET");
475 case(GM_STILL_SHUTTING_DOWN):
476 return("GM_STILL_SHUTTING_DOWN");
478 return("GM_CLONE_BUSY");
479 case(GM_NO_SUCH_DEVICE):
480 return("GM_NO_SUCH_DEVICE");
482 return("GM_ABORTED");
483 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
484 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
485 case(GM_UNTRANSLATED_SYSTEM_ERROR):
486 return("GM_UNTRANSLATED_SYSTEM_ERROR");
487 case(GM_ACCESS_DENIED):
488 return("GM_ACCESS_DENIED");
492 * These ones are in the docs but aren't in the header file
493 case(GM_DEV_NOT_FOUND):
494 return("GM_DEV_NOT_FOUND");
495 case(GM_INVALID_PORT_NUMBER):
496 return("GM_INVALID_PORT_NUMBER");
498 return("GM_US_ERROR");
499 case(GM_PAGE_TABLE_FULL):
500 return("GM_PAGE_TABLE_FULL");
501 case(GM_MINOR_OVERFLOW):
502 return("GM_MINOR_OVERFLOW");
503 case(GM_SEND_ORPHANED):
504 return("GM_SEND_ORPHANED");
505 case(GM_HARDWARE_FAULT):
506 return("GM_HARDWARE_FAULT");
507 case(GM_DATA_CORRUPTED):
508 return("GM_DATA_CORRUPTED");
510 return("GM_TIMED_OUT");
512 return("GM_USER_ERROR");
514 return("GM_NOMATCH");
515 case(GM_NOT_SUPPORTED_IN_KERNEL):
516 return("GM_NOT_SUPPORTED_IN_KERNEL");
517 case(GM_NOT_SUPPORTED_ON_ARCH):
518 return("GM_NOT_SUPPORTED_ON_ARCH");
519 case(GM_PTE_REF_CNT_OVERFLOW):
520 return("GM_PTR_REF_CNT_OVERFLOW");
521 case(GM_NO_DRIVER_SUPPORT):
522 return("GM_NO_DRIVER_SUPPORT");
523 case(GM_FIRMWARE_NOT_RUNNING):
524 return("GM_FIRMWARE_NOT_RUNNING");
525 * These ones are in the docs but aren't in the header file
529 return("UNKNOWN GM ERROR CODE");
535 gmnal_rxevent2str(gm_recv_event_t *ev)
538 event = GM_RECV_EVENT_TYPE(ev);
540 case(GM_NO_RECV_EVENT):
541 return("GM_NO_RECV_EVENT");
542 case(GM_SENDS_FAILED_EVENT):
543 return("GM_SEND_FAILED_EVENT");
544 case(GM_ALARM_EVENT):
545 return("GM_ALARM_EVENT");
547 return("GM_SENT_EVENT");
548 case(_GM_SLEEP_EVENT):
549 return("_GM_SLEEP_EVENT");
550 case(GM_RAW_RECV_EVENT):
551 return("GM_RAW_RECV_EVENT");
552 case(GM_BAD_SEND_DETECTED_EVENT):
553 return("GM_BAD_SEND_DETECTED_EVENT");
554 case(GM_SEND_TOKEN_VIOLATION_EVENT):
555 return("GM_SEND_TOKEN_VIOLATION_EVENT");
556 case(GM_RECV_TOKEN_VIOLATION_EVENT):
557 return("GM_RECV_TOKEN_VIOLATION_EVENT");
558 case(GM_BAD_RECV_TOKEN_EVENT):
559 return("GM_BAD_RECV_TOKEN_EVENT");
560 case(GM_ALARM_VIOLATION_EVENT):
561 return("GM_ALARM_VIOLATION_EVENT");
563 return("GM_RECV_EVENT");
564 case(GM_HIGH_RECV_EVENT):
565 return("GM_HIGH_RECV_EVENT");
566 case(GM_PEER_RECV_EVENT):
567 return("GM_PEER_RECV_EVENT");
568 case(GM_HIGH_PEER_RECV_EVENT):
569 return("GM_HIGH_PEER_RECV_EVENT");
570 case(GM_FAST_RECV_EVENT):
571 return("GM_FAST_RECV_EVENT");
572 case(GM_FAST_HIGH_RECV_EVENT):
573 return("GM_FAST_HIGH_RECV_EVENT");
574 case(GM_FAST_PEER_RECV_EVENT):
575 return("GM_FAST_PEER_RECV_EVENT");
576 case(GM_FAST_HIGH_PEER_RECV_EVENT):
577 return("GM_FAST_HIGH_PEER_RECV_EVENT");
578 case(GM_REJECTED_SEND_EVENT):
579 return("GM_REJECTED_SEND_EVENT");
580 case(GM_ORPHANED_SEND_EVENT):
581 return("GM_ORPHANED_SEND_EVENT");
582 case(GM_BAD_RESEND_DETECTED_EVENT):
583 return("GM_BAD_RESEND_DETETED_EVENT");
584 case(GM_DROPPED_SEND_EVENT):
585 return("GM_DROPPED_SEND_EVENT");
586 case(GM_BAD_SEND_VMA_EVENT):
587 return("GM_BAD_SEND_VMA_EVENT");
588 case(GM_BAD_RECV_VMA_EVENT):
589 return("GM_BAD_RECV_VMA_EVENT");
590 case(_GM_FLUSHED_ALARM_EVENT):
591 return("GM_FLUSHED_ALARM_EVENT");
592 case(GM_SENT_TOKENS_EVENT):
593 return("GM_SENT_TOKENS_EVENTS");
594 case(GM_IGNORE_RECV_EVENT):
595 return("GM_IGNORE_RECV_EVENT");
596 case(GM_ETHERNET_RECV_EVENT):
597 return("GM_ETHERNET_RECV_EVENT");
598 case(GM_NEW_NO_RECV_EVENT):
599 return("GM_NEW_NO_RECV_EVENT");
600 case(GM_NEW_SENDS_FAILED_EVENT):
601 return("GM_NEW_SENDS_FAILED_EVENT");
602 case(GM_NEW_ALARM_EVENT):
603 return("GM_NEW_ALARM_EVENT");
604 case(GM_NEW_SENT_EVENT):
605 return("GM_NEW_SENT_EVENT");
606 case(_GM_NEW_SLEEP_EVENT):
607 return("GM_NEW_SLEEP_EVENT");
608 case(GM_NEW_RAW_RECV_EVENT):
609 return("GM_NEW_RAW_RECV_EVENT");
610 case(GM_NEW_BAD_SEND_DETECTED_EVENT):
611 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
612 case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
613 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
614 case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
615 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
616 case(GM_NEW_BAD_RECV_TOKEN_EVENT):
617 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
618 case(GM_NEW_ALARM_VIOLATION_EVENT):
619 return("GM_NEW_ALARM_VIOLATION_EVENT");
620 case(GM_NEW_RECV_EVENT):
621 return("GM_NEW_RECV_EVENT");
622 case(GM_NEW_HIGH_RECV_EVENT):
623 return("GM_NEW_HIGH_RECV_EVENT");
624 case(GM_NEW_PEER_RECV_EVENT):
625 return("GM_NEW_PEER_RECV_EVENT");
626 case(GM_NEW_HIGH_PEER_RECV_EVENT):
627 return("GM_NEW_HIGH_PEER_RECV_EVENT");
628 case(GM_NEW_FAST_RECV_EVENT):
629 return("GM_NEW_FAST_RECV_EVENT");
630 case(GM_NEW_FAST_HIGH_RECV_EVENT):
631 return("GM_NEW_FAST_HIGH_RECV_EVENT");
632 case(GM_NEW_FAST_PEER_RECV_EVENT):
633 return("GM_NEW_FAST_PEER_RECV_EVENT");
634 case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
635 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
636 case(GM_NEW_REJECTED_SEND_EVENT):
637 return("GM_NEW_REJECTED_SEND_EVENT");
638 case(GM_NEW_ORPHANED_SEND_EVENT):
639 return("GM_NEW_ORPHANED_SEND_EVENT");
640 case(_GM_NEW_PUT_NOTIFICATION_EVENT):
641 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
642 case(GM_NEW_FREE_SEND_TOKEN_EVENT):
643 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
644 case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
645 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
646 case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
647 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
648 case(GM_NEW_DROPPED_SEND_EVENT):
649 return("GM_NEW_DROPPED_SEND_EVENT");
650 case(GM_NEW_BAD_SEND_VMA_EVENT):
651 return("GM_NEW_BAD_SEND_VMA_EVENT");
652 case(GM_NEW_BAD_RECV_VMA_EVENT):
653 return("GM_NEW_BAD_RECV_VMA_EVENT");
654 case(_GM_NEW_FLUSHED_ALARM_EVENT):
655 return("GM_NEW_FLUSHED_ALARM_EVENT");
656 case(GM_NEW_SENT_TOKENS_EVENT):
657 return("GM_NEW_SENT_TOKENS_EVENT");
658 case(GM_NEW_IGNORE_RECV_EVENT):
659 return("GM_NEW_IGNORE_RECV_EVENT");
660 case(GM_NEW_ETHERNET_RECV_EVENT):
661 return("GM_NEW_ETHERNET_RECV_EVENT");
663 return("Unknown Recv event");
664 /* _GM_PUT_NOTIFICATION_EVENT */
665 /* GM_FREE_SEND_TOKEN_EVENT */
666 /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
672 gmnal_yield(int delay)
674 set_current_state(TASK_INTERRUPTIBLE);
675 schedule_timeout(delay);
679 gmnal_enqueue_rx(gmnal_ni_t *gmnalni, gm_recv_t *recv)
681 void *ptr = gm_ntohp(recv->buffer);
682 gmnal_rx_t *rx = gm_hash_find(gmnalni->gmni_rx_hash, ptr);
684 /* No locking; hash is read-only */
686 LASSERT (rx != NULL);
687 LASSERT (rx->rx_msg == (gmnal_msg_t *)ptr);
689 rx->rx_recv_nob = gm_ntohl(recv->length);
690 rx->rx_recv_gmid = gm_ntoh_u16(recv->sender_node_id);
691 rx->rx_recv_port = gm_ntoh_u8(recv->sender_port_id);
692 rx->rx_recv_type = gm_ntoh_u8(recv->type);
694 spin_lock(&gmnalni->gmni_rxq_lock);
695 list_add_tail (&rx->rx_list, &gmnalni->gmni_rxq);
696 spin_unlock(&gmnalni->gmni_rxq_lock);
698 up(&gmnalni->gmni_rxq_wait);
703 gmnal_dequeue_rx(gmnal_ni_t *gmnalni)
707 CDEBUG(D_NET, "Getting entry to list\n");
710 while(down_interruptible(&gmnalni->gmni_rxq_wait) != 0)
713 if (gmnalni->gmni_thread_shutdown)
716 spin_lock(&gmnalni->gmni_rxq_lock);
718 if (list_empty(&gmnalni->gmni_rxq)) {
721 rx = list_entry(gmnalni->gmni_rxq.next,
722 gmnal_rx_t, rx_list);
723 list_del(&rx->rx_list);
726 spin_unlock(&gmnalni->gmni_rxq_lock);
731 CWARN("woken but no work\n");