2 * -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
3 * vim:expandtab:shiftwidth=8:tabstop=8:
7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 only,
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License version 2 for more details (a copy is included
17 * in the LICENSE file that accompanied this code).
19 * You should have received a copy of the GNU General Public License
20 * version 2 along with this program; If not, see [sun.com URL with a
23 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
24 * CA 95054 USA or visit www.sun.com if you need additional information or
30 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
31 * Use is subject to license terms.
33 * Copyright (c) 2003 Los Alamos National Laboratory (LANL)
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
43 gmnal_free_netbuf_pages (gmnal_netbuf_t *nb, int npages)
47 for (i = 0; i < npages; i++)
48 __free_page(nb->nb_kiov[i].kiov_page);
52 gmnal_alloc_netbuf_pages (gmnal_ni_t *gmni, gmnal_netbuf_t *nb, int npages)
59 for (i = 0; i < npages; i++) {
60 nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
61 nb->nb_kiov[i].kiov_offset = 0;
62 nb->nb_kiov[i].kiov_len = PAGE_SIZE;
64 if (nb->nb_kiov[i].kiov_page == NULL) {
65 CERROR("Can't allocate page\n");
66 gmnal_free_netbuf_pages(nb, i);
70 CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
71 i, nb->nb_kiov[i].kiov_page,
72 lnet_page2phys(nb->nb_kiov[i].kiov_page),
73 gmni->gmni_netaddr_base);
75 gmrc = gm_register_memory_ex_phys(
77 lnet_page2phys(nb->nb_kiov[i].kiov_page),
79 gmni->gmni_netaddr_base);
80 CDEBUG(D_NET,"[%3d] page %p: %d\n",
81 i, nb->nb_kiov[i].kiov_page, gmrc);
83 if (gmrc != GM_SUCCESS) {
84 CERROR("Can't map page: %d(%s)\n", gmrc,
85 gmnal_gmstatus2str(gmrc));
86 gmnal_free_netbuf_pages(nb, i+1);
91 nb->nb_netaddr = gmni->gmni_netaddr_base;
93 gmni->gmni_netaddr_base += PAGE_SIZE;
100 gmnal_free_ltxbuf (gmnal_ni_t *gmni, gmnal_txbuf_t *txb)
102 int npages = gmni->gmni_large_pages;
104 LASSERT (gmni->gmni_port == NULL);
105 /* No unmapping; the port has been closed */
107 gmnal_free_netbuf_pages(&txb->txb_buf, gmni->gmni_large_pages);
108 LIBCFS_FREE(txb, offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]));
112 gmnal_alloc_ltxbuf (gmnal_ni_t *gmni)
114 int npages = gmni->gmni_large_pages;
115 int sz = offsetof(gmnal_txbuf_t, txb_buf.nb_kiov[npages]);
119 LIBCFS_ALLOC(txb, sz);
121 CERROR("Can't allocate large txbuffer\n");
125 rc = gmnal_alloc_netbuf_pages(gmni, &txb->txb_buf, npages);
127 LIBCFS_FREE(txb, sz);
131 list_add_tail(&txb->txb_list, &gmni->gmni_idle_ltxbs);
133 txb->txb_next = gmni->gmni_ltxbs;
134 gmni->gmni_ltxbs = txb;
140 gmnal_free_tx (gmnal_tx_t *tx)
142 LASSERT (tx->tx_gmni->gmni_port == NULL);
144 gmnal_free_netbuf_pages(&tx->tx_buf, 1);
145 LIBCFS_FREE(tx, sizeof(*tx));
149 gmnal_alloc_tx (gmnal_ni_t *gmni)
154 LIBCFS_ALLOC(tx, sizeof(*tx));
156 CERROR("Failed to allocate tx\n");
160 memset(tx, 0, sizeof(*tx));
162 rc = gmnal_alloc_netbuf_pages(gmni, &tx->tx_buf, 1);
164 LIBCFS_FREE(tx, sizeof(*tx));
170 list_add_tail(&tx->tx_list, &gmni->gmni_idle_txs);
172 tx->tx_next = gmni->gmni_txs;
179 gmnal_free_rx(gmnal_ni_t *gmni, gmnal_rx_t *rx)
181 int npages = rx->rx_islarge ? gmni->gmni_large_pages : 1;
183 LASSERT (gmni->gmni_port == NULL);
185 gmnal_free_netbuf_pages(&rx->rx_buf, npages);
186 LIBCFS_FREE(rx, offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]));
190 gmnal_alloc_rx (gmnal_ni_t *gmni, int islarge)
192 int npages = islarge ? gmni->gmni_large_pages : 1;
193 int sz = offsetof(gmnal_rx_t, rx_buf.nb_kiov[npages]);
198 LIBCFS_ALLOC(rx, sz);
200 CERROR("Failed to allocate rx\n");
204 memset(rx, 0, sizeof(*rx));
206 rc = gmnal_alloc_netbuf_pages(gmni, &rx->rx_buf, npages);
212 rx->rx_islarge = islarge;
213 rx->rx_next = gmni->gmni_rxs;
216 gmrc = gm_hash_insert(gmni->gmni_rx_hash,
217 GMNAL_NETBUF_LOCAL_NETADDR(&rx->rx_buf), rx);
218 if (gmrc != GM_SUCCESS) {
219 CERROR("Couldn't add rx to hash table: %d\n", gmrc);
227 gmnal_free_ltxbufs (gmnal_ni_t *gmni)
231 while ((txb = gmni->gmni_ltxbs) != NULL) {
232 gmni->gmni_ltxbs = txb->txb_next;
233 gmnal_free_ltxbuf(gmni, txb);
238 gmnal_alloc_ltxbufs (gmnal_ni_t *gmni)
240 int nlarge_tx_bufs = *gmnal_tunables.gm_nlarge_tx_bufs;
244 for (i = 0; i < nlarge_tx_bufs; i++) {
245 rc = gmnal_alloc_ltxbuf(gmni);
255 gmnal_free_txs(gmnal_ni_t *gmni)
259 while ((tx = gmni->gmni_txs) != NULL) {
260 gmni->gmni_txs = tx->tx_next;
266 gmnal_alloc_txs(gmnal_ni_t *gmni)
268 int ntxcred = gm_num_send_tokens(gmni->gmni_port);
269 int ntx = *gmnal_tunables.gm_ntx;
273 CDEBUG(D_NET, "ntxcred: %d\n", ntxcred);
274 gmni->gmni_tx_credits = ntxcred;
276 for (i = 0; i < ntx; i++) {
277 rc = gmnal_alloc_tx(gmni);
286 gmnal_free_rxs(gmnal_ni_t *gmni)
290 while ((rx = gmni->gmni_rxs) != NULL) {
291 gmni->gmni_rxs = rx->rx_next;
293 gmnal_free_rx(gmni, rx);
296 LASSERT (gmni->gmni_port == NULL);
298 /* GM releases all resources allocated to a port when it closes */
299 if (gmni->gmni_rx_hash != NULL)
300 gm_destroy_hash(gmni->gmni_rx_hash);
305 gmnal_alloc_rxs (gmnal_ni_t *gmni)
307 int nrxcred = gm_num_receive_tokens(gmni->gmni_port);
308 int nrx_small = *gmnal_tunables.gm_nrx_small;
309 int nrx_large = *gmnal_tunables.gm_nrx_large;
310 int nrx = nrx_large + nrx_small;
314 CDEBUG(D_NET, "nrxcred: %d(%dL+%dS)\n", nrxcred, nrx_large, nrx_small);
317 int nlarge = (nrx_large * nrxcred)/nrx;
318 int nsmall = nrxcred - nlarge;
320 CWARN("Only %d rx credits: "
321 "reducing large %d->%d, small %d->%d\n", nrxcred,
322 nrx_large, nlarge, nrx_small, nsmall);
324 *gmnal_tunables.gm_nrx_large = nrx_large = nlarge;
325 *gmnal_tunables.gm_nrx_small = nrx_small = nsmall;
326 nrx = nlarge + nsmall;
329 gmni->gmni_rx_hash = gm_create_hash(gm_hash_compare_ptrs,
330 gm_hash_hash_ptr, 0, 0, nrx, 0);
331 if (gmni->gmni_rx_hash == NULL) {
332 CERROR("Failed to create hash table\n");
336 for (i = 0; i < nrx; i++ ) {
337 rc = gmnal_alloc_rx(gmni, i < nrx_large);
346 gmnal_gmstatus2str(gm_status_t status)
348 return(gm_strerror(status));
355 case(GM_INPUT_BUFFER_TOO_SMALL):
356 return("INPUT_BUFFER_TOO_SMALL");
357 case(GM_OUTPUT_BUFFER_TOO_SMALL):
358 return("OUTPUT_BUFFER_TOO_SMALL");
363 case(GM_MEMORY_FAULT):
364 return("MEMORY_FAULT");
365 case(GM_INTERRUPTED):
366 return("INTERRUPTED");
367 case(GM_INVALID_PARAMETER):
368 return("INVALID_PARAMETER");
369 case(GM_OUT_OF_MEMORY):
370 return("OUT_OF_MEMORY");
371 case(GM_INVALID_COMMAND):
372 return("INVALID_COMMAND");
373 case(GM_PERMISSION_DENIED):
374 return("PERMISSION_DENIED");
375 case(GM_INTERNAL_ERROR):
376 return("INTERNAL_ERROR");
378 return("UNATTACHED");
379 case(GM_UNSUPPORTED_DEVICE):
380 return("UNSUPPORTED_DEVICE");
381 case(GM_SEND_TIMED_OUT):
382 return("GM_SEND_TIMEDOUT");
383 case(GM_SEND_REJECTED):
384 return("GM_SEND_REJECTED");
385 case(GM_SEND_TARGET_PORT_CLOSED):
386 return("GM_SEND_TARGET_PORT_CLOSED");
387 case(GM_SEND_TARGET_NODE_UNREACHABLE):
388 return("GM_SEND_TARGET_NODE_UNREACHABLE");
389 case(GM_SEND_DROPPED):
390 return("GM_SEND_DROPPED");
391 case(GM_SEND_PORT_CLOSED):
392 return("GM_SEND_PORT_CLOSED");
393 case(GM_NODE_ID_NOT_YET_SET):
394 return("GM_NODE_ID_NOT_YET_SET");
395 case(GM_STILL_SHUTTING_DOWN):
396 return("GM_STILL_SHUTTING_DOWN");
398 return("GM_CLONE_BUSY");
399 case(GM_NO_SUCH_DEVICE):
400 return("GM_NO_SUCH_DEVICE");
402 return("GM_ABORTED");
403 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
404 return("GM_INCOMPATIBLE_LIB_AND_DRIVER");
405 case(GM_UNTRANSLATED_SYSTEM_ERROR):
406 return("GM_UNTRANSLATED_SYSTEM_ERROR");
407 case(GM_ACCESS_DENIED):
408 return("GM_ACCESS_DENIED");
412 * These ones are in the docs but aren't in the header file
413 case(GM_DEV_NOT_FOUND):
414 return("GM_DEV_NOT_FOUND");
415 case(GM_INVALID_PORT_NUMBER):
416 return("GM_INVALID_PORT_NUMBER");
418 return("GM_US_ERROR");
419 case(GM_PAGE_TABLE_FULL):
420 return("GM_PAGE_TABLE_FULL");
421 case(GM_MINOR_OVERFLOW):
422 return("GM_MINOR_OVERFLOW");
423 case(GM_SEND_ORPHANED):
424 return("GM_SEND_ORPHANED");
425 case(GM_HARDWARE_FAULT):
426 return("GM_HARDWARE_FAULT");
427 case(GM_DATA_CORRUPTED):
428 return("GM_DATA_CORRUPTED");
430 return("GM_TIMED_OUT");
432 return("GM_USER_ERROR");
434 return("GM_NOMATCH");
435 case(GM_NOT_SUPPORTED_IN_KERNEL):
436 return("GM_NOT_SUPPORTED_IN_KERNEL");
437 case(GM_NOT_SUPPORTED_ON_ARCH):
438 return("GM_NOT_SUPPORTED_ON_ARCH");
439 case(GM_PTE_REF_CNT_OVERFLOW):
440 return("GM_PTR_REF_CNT_OVERFLOW");
441 case(GM_NO_DRIVER_SUPPORT):
442 return("GM_NO_DRIVER_SUPPORT");
443 case(GM_FIRMWARE_NOT_RUNNING):
444 return("GM_FIRMWARE_NOT_RUNNING");
445 * These ones are in the docs but aren't in the header file
449 return("UNKNOWN GM ERROR CODE");
455 gmnal_rxevent2str(gm_recv_event_t *ev)
458 event = GM_RECV_EVENT_TYPE(ev);
460 case(GM_NO_RECV_EVENT):
461 return("GM_NO_RECV_EVENT");
462 case(GM_SENDS_FAILED_EVENT):
463 return("GM_SEND_FAILED_EVENT");
464 case(GM_ALARM_EVENT):
465 return("GM_ALARM_EVENT");
467 return("GM_SENT_EVENT");
468 case(_GM_SLEEP_EVENT):
469 return("_GM_SLEEP_EVENT");
470 case(GM_RAW_RECV_EVENT):
471 return("GM_RAW_RECV_EVENT");
472 case(GM_BAD_SEND_DETECTED_EVENT):
473 return("GM_BAD_SEND_DETECTED_EVENT");
474 case(GM_SEND_TOKEN_VIOLATION_EVENT):
475 return("GM_SEND_TOKEN_VIOLATION_EVENT");
476 case(GM_RECV_TOKEN_VIOLATION_EVENT):
477 return("GM_RECV_TOKEN_VIOLATION_EVENT");
478 case(GM_BAD_RECV_TOKEN_EVENT):
479 return("GM_BAD_RECV_TOKEN_EVENT");
480 case(GM_ALARM_VIOLATION_EVENT):
481 return("GM_ALARM_VIOLATION_EVENT");
483 return("GM_RECV_EVENT");
484 case(GM_HIGH_RECV_EVENT):
485 return("GM_HIGH_RECV_EVENT");
486 case(GM_PEER_RECV_EVENT):
487 return("GM_PEER_RECV_EVENT");
488 case(GM_HIGH_PEER_RECV_EVENT):
489 return("GM_HIGH_PEER_RECV_EVENT");
490 case(GM_FAST_RECV_EVENT):
491 return("GM_FAST_RECV_EVENT");
492 case(GM_FAST_HIGH_RECV_EVENT):
493 return("GM_FAST_HIGH_RECV_EVENT");
494 case(GM_FAST_PEER_RECV_EVENT):
495 return("GM_FAST_PEER_RECV_EVENT");
496 case(GM_FAST_HIGH_PEER_RECV_EVENT):
497 return("GM_FAST_HIGH_PEER_RECV_EVENT");
498 case(GM_REJECTED_SEND_EVENT):
499 return("GM_REJECTED_SEND_EVENT");
500 case(GM_ORPHANED_SEND_EVENT):
501 return("GM_ORPHANED_SEND_EVENT");
502 case(GM_BAD_RESEND_DETECTED_EVENT):
503 return("GM_BAD_RESEND_DETETED_EVENT");
504 case(GM_DROPPED_SEND_EVENT):
505 return("GM_DROPPED_SEND_EVENT");
506 case(GM_BAD_SEND_VMA_EVENT):
507 return("GM_BAD_SEND_VMA_EVENT");
508 case(GM_BAD_RECV_VMA_EVENT):
509 return("GM_BAD_RECV_VMA_EVENT");
510 case(_GM_FLUSHED_ALARM_EVENT):
511 return("GM_FLUSHED_ALARM_EVENT");
512 case(GM_SENT_TOKENS_EVENT):
513 return("GM_SENT_TOKENS_EVENTS");
514 case(GM_IGNORE_RECV_EVENT):
515 return("GM_IGNORE_RECV_EVENT");
516 case(GM_ETHERNET_RECV_EVENT):
517 return("GM_ETHERNET_RECV_EVENT");
518 case(GM_NEW_NO_RECV_EVENT):
519 return("GM_NEW_NO_RECV_EVENT");
520 case(GM_NEW_SENDS_FAILED_EVENT):
521 return("GM_NEW_SENDS_FAILED_EVENT");
522 case(GM_NEW_ALARM_EVENT):
523 return("GM_NEW_ALARM_EVENT");
524 case(GM_NEW_SENT_EVENT):
525 return("GM_NEW_SENT_EVENT");
526 case(_GM_NEW_SLEEP_EVENT):
527 return("GM_NEW_SLEEP_EVENT");
528 case(GM_NEW_RAW_RECV_EVENT):
529 return("GM_NEW_RAW_RECV_EVENT");
530 case(GM_NEW_BAD_SEND_DETECTED_EVENT):
531 return("GM_NEW_BAD_SEND_DETECTED_EVENT");
532 case(GM_NEW_SEND_TOKEN_VIOLATION_EVENT):
533 return("GM_NEW_SEND_TOKEN_VIOLATION_EVENT");
534 case(GM_NEW_RECV_TOKEN_VIOLATION_EVENT):
535 return("GM_NEW_RECV_TOKEN_VIOLATION_EVENT");
536 case(GM_NEW_BAD_RECV_TOKEN_EVENT):
537 return("GM_NEW_BAD_RECV_TOKEN_EVENT");
538 case(GM_NEW_ALARM_VIOLATION_EVENT):
539 return("GM_NEW_ALARM_VIOLATION_EVENT");
540 case(GM_NEW_RECV_EVENT):
541 return("GM_NEW_RECV_EVENT");
542 case(GM_NEW_HIGH_RECV_EVENT):
543 return("GM_NEW_HIGH_RECV_EVENT");
544 case(GM_NEW_PEER_RECV_EVENT):
545 return("GM_NEW_PEER_RECV_EVENT");
546 case(GM_NEW_HIGH_PEER_RECV_EVENT):
547 return("GM_NEW_HIGH_PEER_RECV_EVENT");
548 case(GM_NEW_FAST_RECV_EVENT):
549 return("GM_NEW_FAST_RECV_EVENT");
550 case(GM_NEW_FAST_HIGH_RECV_EVENT):
551 return("GM_NEW_FAST_HIGH_RECV_EVENT");
552 case(GM_NEW_FAST_PEER_RECV_EVENT):
553 return("GM_NEW_FAST_PEER_RECV_EVENT");
554 case(GM_NEW_FAST_HIGH_PEER_RECV_EVENT):
555 return("GM_NEW_FAST_HIGH_PEER_RECV_EVENT");
556 case(GM_NEW_REJECTED_SEND_EVENT):
557 return("GM_NEW_REJECTED_SEND_EVENT");
558 case(GM_NEW_ORPHANED_SEND_EVENT):
559 return("GM_NEW_ORPHANED_SEND_EVENT");
560 case(_GM_NEW_PUT_NOTIFICATION_EVENT):
561 return("_GM_NEW_PUT_NOTIFICATION_EVENT");
562 case(GM_NEW_FREE_SEND_TOKEN_EVENT):
563 return("GM_NEW_FREE_SEND_TOKEN_EVENT");
564 case(GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT):
565 return("GM_NEW_FREE_HIGH_SEND_TOKEN_EVENT");
566 case(GM_NEW_BAD_RESEND_DETECTED_EVENT):
567 return("GM_NEW_BAD_RESEND_DETECTED_EVENT");
568 case(GM_NEW_DROPPED_SEND_EVENT):
569 return("GM_NEW_DROPPED_SEND_EVENT");
570 case(GM_NEW_BAD_SEND_VMA_EVENT):
571 return("GM_NEW_BAD_SEND_VMA_EVENT");
572 case(GM_NEW_BAD_RECV_VMA_EVENT):
573 return("GM_NEW_BAD_RECV_VMA_EVENT");
574 case(_GM_NEW_FLUSHED_ALARM_EVENT):
575 return("GM_NEW_FLUSHED_ALARM_EVENT");
576 case(GM_NEW_SENT_TOKENS_EVENT):
577 return("GM_NEW_SENT_TOKENS_EVENT");
578 case(GM_NEW_IGNORE_RECV_EVENT):
579 return("GM_NEW_IGNORE_RECV_EVENT");
580 case(GM_NEW_ETHERNET_RECV_EVENT):
581 return("GM_NEW_ETHERNET_RECV_EVENT");
583 return("Unknown Recv event");
584 /* _GM_PUT_NOTIFICATION_EVENT */
585 /* GM_FREE_SEND_TOKEN_EVENT */
586 /* GM_FREE_HIGH_SEND_TOKEN_EVENT */
592 gmnal_yield(int delay)
594 set_current_state(TASK_INTERRUPTIBLE);
595 schedule_timeout(delay);