1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
32 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
42 gmnal_free_netbuf_pages (gmnal_netbuf_t *nb, int npages)
46 for (i = 0; i < npages; i++)
47 __free_page(nb->nb_kiov[i].kiov_page);
51 gmnal_alloc_netbuf_pages (gmnal_ni_t *gmni, gmnal_netbuf_t *nb, int npages)
58 for (i = 0; i < npages; i++) {
59 nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
60 nb->nb_kiov[i].kiov_offset = 0;
61 nb->nb_kiov[i].kiov_len = PAGE_SIZE;
63 if (nb->nb_kiov[i].kiov_page == NULL) {
64 CERROR("Can't allocate page\n");
65 gmnal_free_netbuf_pages(nb, i);
69 CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
70 i, nb->nb_kiov[i].kiov_page,
71 lnet_page2phys(nb->nb_kiov[i].kiov_page),
72 gmni->gmni_netaddr_base);
74 gmrc = gm_register_memory_ex_phys(
76 lnet_page2phys(nb->nb_kiov[i].kiov_page),
78 gmni->gmni_netaddr_base);
79 CDEBUG(D_NET,"[%3d] page %p: %d\n",
80 i, nb->nb_kiov[i].kiov_page, gmrc);
82 if (gmrc != GM_SUCCESS) {
83 CERROR("Can't map page: %d(%s)\n", gmrc,
84 gmnal_gmstatus2str(gmrc));
85 gmnal_free_netbuf_pages(nb, i+1);
90 nb->nb_netaddr = gmni->gmni_netaddr_base;
92 gmni->gmni_netaddr_base += PAGE_SIZE;
99 gmnal_free_ltxbuf (gmnal_ni_t *gmni, gmnal_txbuf_t *txb)
101 int npages = gmni->gmni_large_pages;
103 LASSERT (gmni->gmni_port == NULL);
104 /* No unmapping; the port has been closed */
106 gmnal_free_netbuf_pages(&txb->txb_buf, gmni->gmni_large_pages);
107 LIBCFS_FREE(txb, offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]));
111 gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
113 int npages = gmni->gmni_large_pages;
114 int sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
118 LIBCFS_ALLOC(txb, sz);
120 CERROR("Can't allocate large txbuffer\n");
124 rc = gmnal_alloc_netbuf_pages(gmni, &txb->txb_buf, npages);
126 LIBCFS_FREE(txb, sz);
130 cfs_list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
132 txb->txb_next = gmni->gmni_ltxbs;
133 gmni->gmni_ltxbs = txb;
139 gmnal_free_tx (gmnal_tx_t *tx)
141 LASSERT (tx->tx_gmni->gmni_port == NULL);
143 gmnal_free_netbuf_pages(&tx->tx_buf, 1);
144 LIBCFS_FREE(tx, sizeof(*tx));
148 gmnal_alloc_tx (gmnal_ni_t *gmni)
153 LIBCFS_ALLOC(tx, sizeof(*tx));
155 CERROR("Failed to allocate tx\n");
159 memset(tx, 0, sizeof(*tx));
161 rc = gmnal_alloc_netbuf_pages(gmni, &tx->tx_buf, 1);
163 LIBCFS_FREE(tx, sizeof(*tx));
169 cfs_list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
171 tx->tx_next = gmni->gmni_txs;
178 gmnal_free_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
180 int npages = rx->rx_islarge ? gmni->gmni_large_pages : 1;
182 LASSERT (gmni->gmni_port == NULL);
184 gmnal_free_netbuf_pages(&rx->rx_buf, npages);
185 LIBCFS_FREE(rx, offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]));
189 gmnal_alloc_rx (gmnal_ni_t *gmni, int islarge)
191 int npages = islarge ? gmni->gmni_large_pages : 1;
192 int sz = offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]);
197 LIBCFS_ALLOC(rx, sz);
199 CERROR("Failed to allocate rx\n");
203 memset(rx, 0, sizeof(*rx));
205 rc = gmnal_alloc_netbuf_pages(gmni, &rx->rx_buf, npages);
211 rx->rx_islarge = islarge;
212 rx->rx_next = gmni->gmni_rxs;
215 gmrc = gm_hash_insert(gmni->gmni_rx_hash,
216 GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf), rx);
217 if (gmrc != GM_SUCCESS) {
218 CERROR("Couldn't add rx to hash table: %d\n", gmrc);
226 gmnal_free_ltxbufs (gmnal_ni_t *gmni)
230 while ((txb = gmni->gmni_ltxbs) != NULL) {
231 gmni->gmni_ltxbs = txb->txb_next;
232 gmnal_free_ltxbuf(gmni, txb);
237 gmnal_alloc_ltxbufs (gmnal_ni_t *gmni)
239 int nlarge_tx_bufs = *gmnal_tunables.gm_nlarge_tx_bufs;
243 for (i = 0; i < nlarge_tx_bufs; i++) {
244 rc = gmnal_alloc_ltxbuf(gmni);
254 gmnal_free_txs(gmnal_ni_t *gmni)
258 while ((tx = gmni->gmni_txs) != NULL) {
259 gmni->gmni_txs = tx->tx_next;
265 gmnal_alloc_txs(gmnal_ni_t *gmni)
267 int ntxcred = gm_num_send_tokens(gmni->gmni_port);
268 int ntx = *gmnal_tunables.gm_ntx;
272 CDEBUG(D_NET, "ntxcred: %d\n", ntxcred);
273 gmni->gmni_tx_credits = ntxcred;
275 for (i = 0; i < ntx; i++) {
276 rc = gmnal_alloc_tx(gmni);
285 gmnal_free_rxs(gmnal_ni_t *gmni)
289 while ((rx = gmni->gmni_rxs) != NULL) {
290 gmni->gmni_rxs = rx->rx_next;
292 gmnal_free_rx(gmni, rx);
295 LASSERT (gmni->gmni_port == NULL);
297 /* GM releases all resources allocated to a port when it closes */
298 if (gmni->gmni_rx_hash != NULL)
299 gm_destroy_hash(gmni->gmni_rx_hash);
304 gmnal_alloc_rxs (gmnal_ni_t *gmni)
306 int nrxcred = gm_num_receive_tokens(gmni->gmni_port);
307 int nrx_small = *gmnal_tunables.gm_nrx_small;
308 int nrx_large = *gmnal_tunables.gm_nrx_large;
309 int nrx = nrx_large + nrx_small;
313 CDEBUG(D_NET, "nrxcred: %d(%dL+%dS)\n", nrxcred, nrx_large, nrx_small);
316 int nlarge = (nrx_large * nrxcred)/nrx;
317 int nsmall = nrxcred - nlarge;
319 CWARN("Only %d rx credits: "
320 "reducing large %d->%d, small %d->%d\n", nrxcred,
321 nrx_large, nlarge, nrx_small, nsmall);
323 *gmnal_tunables.gm_nrx_large = nrx_large = nlarge;
324 *gmnal_tunables.gm_nrx_small = nrx_small = nsmall;
325 nrx = nlarge + nsmall;
328 gmni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs,
329 gm_hash_hash_ptr, 0, 0, nrx, 0);
330 if (gmni->gmni_rx_hash == NULL) {
331 CERROR("Failed to create hash table\n");
335 for (i = 0; i < nrx; i++ ) {
336 rc = gmnal_alloc_rx(gmni, i < nrx_large);
345 gmnal_gmstatus2str(gm_status_t status)
347 return(gm_strerror(status));
354 case(GM_INPUT_BUFFER_TOO_SMALL):
355 return("INPUT_BUFFER_TOO_SMALL");
356 case(GM_OUTPUT_BUFFER_TOO_SMALL):
357 return("OUTPUT_BUFFER_TOO_SMALL");
362 case(GM_MEMORY_FAULT):
363 return("MEMORY_FAULT");
364 case(GM_INTERRUPTED):
365 return("INTERRUPTED");
366 case(GM_INVALID_PARAMETER):
367 return("INVALID_PARAMETER");
368 case(GM_OUT_OF_MEMORY):
369 return("OUT_OF_MEMORY");
370 case(GM_INVALID_COMMAND):
371 return("INVALID_COMMAND");
372 case(GM_PERMISSION_DENIED):
373 return("PERMISSION_DENIED");
374 case(GM_INTERNAL_ERROR):
375 return("INTERNAL_ERROR");
377 return("UNATTACHED");
378 case(GM_UNSUPPORTED_DEVICE):
379 return("UNSUPPORTED_DEVICE");
380 case(GM_SEND_TIMED_OUT):
381 return("GM_SEND_TIMEDOUT");
382 case(GM_SEND_REJECTED):
383 return("GM_SEND_REJECTED");
384 case(GM_SEND_TARGET_PORT_CLOSED):
385 return("GM_SEND_TARGET_PORT_CLOSED");
386 case(GM_SEND_TARGET_NODE_UNREACHABLE):
387 return("GM_SEND_TARGET_NODE_UNREACHABLE");
388 case(GM_SEND_DROPPED):
389 return("GM_SEND_DROPPED");
390 case(GM_SEND_PORT_CLOSED):
391 return("GM_SEND_PORT_CLOSED");
392 case(GM_NODE_ID_NOT_YET_SET):
393 return("GM_NODE_ID_NOT_YET_SET");
394 case(GM_STILL_SHUTTING_DOWN):
395 return("GM_STILL_SHUTTING_DOWN");
397 return("GM_CLONE_BUSY");
398 case(GM_NO_SUCH_DEVICE):
399 return("GM_NO_SUCH_DEVICE");
401 return("GM_ABORTED");
402 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
403 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
404 case(GM_UNTRANSLATED_SYSTEM_ERROR):
405 return("GM_UNTRANSLATED_SYSTEM_ERROR");
406 case(GM_ACCESS_DENIED):
407 return("GM_ACCESS_DENIED");
411 * These ones are in the docs but aren't in the header file
412 case(GM_DEV_NOT_FOUND):
413 return("GM_DEV_NOT_FOUND");
414 case(GM_INVALID_PORT_NUMBER):
415 return("GM_INVALID_PORT_NUMBER");
417 return("GM_US_ERROR");
418 case(GM_PAGE_TABLE_FULL):
419 return("GM_PAGE_TABLE_FULL");
420 case(GM_MINOR_OVERFLOW):
421 return("GM_MINOR_OVERFLOW");
422 case(GM_SEND_ORPHANED):
423 return("GM_SEND_ORPHANED");
424 case(GM_HARDWARE_FAULT):
425 return("GM_HARDWARE_FAULT");
426 case(GM_DATA_CORRUPTED):
427 return("GM_DATA_CORRUPTED");
429 return("GM_TIMED_OUT");
431 return("GM_USER_ERROR");
433 return("GM_NOMATCH");
434 case(GM_NOT_SUPPORTED_IN_KERNEL):
435 return("GM_NOT_SUPPORTED_IN_KERNEL");
436 case(GM_NOT_SUPPORTED_ON_ARCH):
437 return("GM_NOT_SUPPORTED_ON_ARCH");
438 case(GM_PTE_REF_CNT_OVERFLOW):
439 return("GM_PTR_REF_CNT_OVERFLOW");
440 case(GM_NO_DRIVER_SUPPORT):
441 return("GM_NO_DRIVER_SUPPORT");
442 case(GM_FIRMWARE_NOT_RUNNING):
443 return("GM_FIRMWARE_NOT_RUNNING");
444 * These ones are in the docs but aren't in the header file
448 return("UNKNOWN GM ERROR CODE");
454 gmnal_rxevent2str(gm_recv_event_t *ev)
457 event = GM_RECV_EVENT_TYPE(ev);
459 case(GM_NO_RECV_EVENT):
460 return("GM_NO_RECV_EVENT");
461 case(GM_SENDS_FAILED_EVENT):
462 return("GM_SEND_FAILED_EVENT");
463 case(GM_ALARM_EVENT):
464 return("GM_ALARM_EVENT");
466 return("GM_SENT_EVENT");
467 case(_GM_SLEEP_EVENT):
468 return("_GM_SLEEP_EVENT");
469 case(GM_RAW_RECV_EVENT):
470 return("GM_RAW_RECV_EVENT");
471 case(GM_BAD_SEND_DETECTED_EVENT):
472 return("GM_BAD_SEND_DETECTED_EVENT");
473 case(GM_SEND_TOKEN_VIOLATION_EVENT):
474 return("GM_SEND_TOKEN_VIOLATION_EVENT");
475 case(GM_RECV_TOKEN_VIOLATION_EVENT):
476 return("GM_RECV_TOKEN_VIOLATION_EVENT");
477 case(GM_BAD_RECV_TOKEN_EVENT):
478 return("GM_BAD_RECV_TOKEN_EVENT");
479 case(GM_ALARM_VIOLATION_EVENT):
480 return("GM_ALARM_VIOLATION_EVENT");
482 return("GM_RECV_EVENT");
483 case(GM_HIGH_RECV_EVENT):
484 return("GM_HIGH_RECV_EVENT");
485 case(GM_PEER_RECV_EVENT):
486 return("GM_PEER_RECV_EVENT");
487 case(GM_HIGH_PEER_RECV_EVENT):
488 return("GM_HIGH_PEER_RECV_EVENT");
489 case(GM_FAST_RECV_EVENT):
490 return("GM_FAST_RECV_EVENT");
491 case(GM_FAST_HIGH_RECV_EVENT):
492 return("GM_FAST_HIGH_RECV_EVENT");
493 case(GM_FAST_PEER_RECV_EVENT):
494 return("GM_FAST_PEER_RECV_EVENT");
495 case(GM_FAST_HIGH_PEER_RECV_EVENT):
496 return("GM_FAST_HIGH_PEER_RECV_EVENT");
497 case(GM_REJECTED_SEND_EVENT):
498 return("GM_REJECTED_SEND_EVENT");
499 case(GM_ORPHANED_SEND_EVENT):
500 return("GM_ORPHANED_SEND_EVENT");
501 case(GM_BAD_RESEND_DETECTED_EVENT):
502 return("GM_BAD_RESEND_DETETED_EVENT");
503 case(GM_DROPPED_SEND_EVENT):
504 return("GM_DROPPED_SEND_EVENT");
505 case(GM_BAD_SEND_VMA_EVENT):
506 return("GM_BAD_SEND_VMA_EVENT");
507 case(GM_BAD_RECV_VMA_EVENT):
508 return("GM_BAD_RECV_VMA_EVENT");
509 case(_GM_FLUSHED_ALARM_EVENT):
510 return("GM_FLUSHED_ALARM_EVENT");
511 case(GM_SENT_TOKENS_EVENT):
512 return("GM_SENT_TOKENS_EVENTS");
513 case(GM_IGNORE_RECV_EVENT):
514 return("GM_IGNORE_RECV_EVENT");
515 case(GM_ETHERNET_RECV_EVENT):
516 return("GM_ETHERNET_RECV_EVENT");
517 case(GM_NEW_NO_RECV_EVENT):
518 return("GM_NEW_NO_RECV_EVENT");
519 case(GM_NEW_SENDS_FAILED_EVENT):
520 return("GM_NEW_SENDS_FAILED_EVENT");
521 case(GM_NEW_ALARM_EVENT):
522 return("GM_NEW_ALARM_EVENT");
523 case(GM_NEW_SENT_EVENT):
524 return("GM_NEW_SENT_EVENT");
525 case(_GM_NEW_SLEEP_EVENT):
526 return("GM_NEW_SLEEP_EVENT");
527 case(GM_NEW_RAW_RECV_EVENT):
528 return("GM_NEW_RAW_RECV_EVENT");
529 case(GM_NEW_BAD_SEND_DETECTED_EVENT):
530 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
531 case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
532 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
533 case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
534 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
535 case(GM_NEW_BAD_RECV_TOKEN_EVENT):
536 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
537 case(GM_NEW_ALARM_VIOLATION_EVENT):
538 return("GM_NEW_ALARM_VIOLATION_EVENT");
539 case(GM_NEW_RECV_EVENT):
540 return("GM_NEW_RECV_EVENT");
541 case(GM_NEW_HIGH_RECV_EVENT):
542 return("GM_NEW_HIGH_RECV_EVENT");
543 case(GM_NEW_PEER_RECV_EVENT):
544 return("GM_NEW_PEER_RECV_EVENT");
545 case(GM_NEW_HIGH_PEER_RECV_EVENT):
546 return("GM_NEW_HIGH_PEER_RECV_EVENT");
547 case(GM_NEW_FAST_RECV_EVENT):
548 return("GM_NEW_FAST_RECV_EVENT");
549 case(GM_NEW_FAST_HIGH_RECV_EVENT):
550 return("GM_NEW_FAST_HIGH_RECV_EVENT");
551 case(GM_NEW_FAST_PEER_RECV_EVENT):
552 return("GM_NEW_FAST_PEER_RECV_EVENT");
553 case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
554 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
555 case(GM_NEW_REJECTED_SEND_EVENT):
556 return("GM_NEW_REJECTED_SEND_EVENT");
557 case(GM_NEW_ORPHANED_SEND_EVENT):
558 return("GM_NEW_ORPHANED_SEND_EVENT");
559 case(_GM_NEW_PUT_NOTIFICATION_EVENT):
560 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
561 case(GM_NEW_FREE_SEND_TOKEN_EVENT):
562 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
563 case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
564 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
565 case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
566 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
567 case(GM_NEW_DROPPED_SEND_EVENT):
568 return("GM_NEW_DROPPED_SEND_EVENT");
569 case(GM_NEW_BAD_SEND_VMA_EVENT):
570 return("GM_NEW_BAD_SEND_VMA_EVENT");
571 case(GM_NEW_BAD_RECV_VMA_EVENT):
572 return("GM_NEW_BAD_RECV_VMA_EVENT");
573 case(_GM_NEW_FLUSHED_ALARM_EVENT):
574 return("GM_NEW_FLUSHED_ALARM_EVENT");
575 case(GM_NEW_SENT_TOKENS_EVENT):
576 return("GM_NEW_SENT_TOKENS_EVENT");
577 case(GM_NEW_IGNORE_RECV_EVENT):
578 return("GM_NEW_IGNORE_RECV_EVENT");
579 case(GM_NEW_ETHERNET_RECV_EVENT):
580 return("GM_NEW_ETHERNET_RECV_EVENT");
582 return("Unknown Recv event");
583 /* _GM_PUT_NOTIFICATION_EVENT */
584 /* GM_FREE_SEND_TOKEN_EVENT */
585 /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
591 gmnal_yield(int delay)
593 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
594 cfs_schedule_timeout(delay);