4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (C) 2006 Myricom, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/mxlnd/mxlnd.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 * Author: Scott Atchley <atchley at myri.com>
46 .lnd_startup = mxlnd_startup,
47 .lnd_shutdown = mxlnd_shutdown,
49 .lnd_send = mxlnd_send,
50 .lnd_recv = mxlnd_recv,
53 kmx_data_t kmxlnd_data;
56 mxlnd_free_pages(kmx_pages_t *p)
58 int npages = p->mxg_npages;
61 CDEBUG(D_MALLOC, "freeing %d pages\n", npages);
63 for (i = 0; i < npages; i++) {
64 if (p->mxg_pages[i] != NULL) {
65 __free_page(p->mxg_pages[i]);
66 spin_lock(&kmxlnd_data.kmx_mem_lock);
67 kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
68 spin_unlock(&kmxlnd_data.kmx_mem_lock);
72 MXLND_FREE(p, offsetof(kmx_pages_t, mxg_pages[npages]));
76 mxlnd_alloc_pages(kmx_pages_t **pp, int npages)
78 kmx_pages_t *p = NULL;
81 CDEBUG(D_MALLOC, "allocing %d pages\n", npages);
83 MXLND_ALLOC(p, offsetof(kmx_pages_t, mxg_pages[npages]));
85 CERROR("Can't allocate descriptor for %d pages\n", npages);
89 memset(p, 0, offsetof(kmx_pages_t, mxg_pages[npages]));
90 p->mxg_npages = npages;
92 for (i = 0; i < npages; i++) {
93 p->mxg_pages[i] = alloc_page(GFP_KERNEL);
94 if (p->mxg_pages[i] == NULL) {
95 CERROR("Can't allocate page %d of %d\n", i, npages);
99 spin_lock(&kmxlnd_data.kmx_mem_lock);
100 kmxlnd_data.kmx_mem_used += PAGE_SIZE;
101 spin_unlock(&kmxlnd_data.kmx_mem_lock);
109 * mxlnd_ctx_init - reset ctx struct to the default values
110 * @ctx - a kmx_ctx pointer
113 mxlnd_ctx_init(kmx_ctx_t *ctx)
115 if (ctx == NULL) return;
117 /* do not change mxc_type */
118 ctx->mxc_incarnation = 0;
119 ctx->mxc_deadline = 0;
120 ctx->mxc_state = MXLND_CTX_IDLE;
121 if (!cfs_list_empty(&ctx->mxc_list))
122 cfs_list_del_init(&ctx->mxc_list);
123 /* ignore mxc_rx_list */
124 if (ctx->mxc_type == MXLND_REQ_TX) {
126 ctx->mxc_peer = NULL;
127 ctx->mxc_conn = NULL;
130 ctx->mxc_lntmsg[0] = NULL;
131 ctx->mxc_lntmsg[1] = NULL;
132 ctx->mxc_msg_type = 0;
133 ctx->mxc_cookie = 0LL;
134 ctx->mxc_match = 0LL;
135 /* ctx->mxc_seg.segment_ptr points to backing page */
136 ctx->mxc_seg.segment_length = 0;
137 if (ctx->mxc_seg_list != NULL) {
138 LASSERT(ctx->mxc_nseg > 0);
139 MXLND_FREE(ctx->mxc_seg_list, ctx->mxc_nseg * sizeof(mx_ksegment_t));
141 ctx->mxc_seg_list = NULL;
144 memset(&ctx->mxc_mxreq, 0, sizeof(mx_request_t));
145 memset(&ctx->mxc_status, 0, sizeof(mx_status_t));
150 ctx->mxc_msg->mxm_type = 0;
151 ctx->mxc_msg->mxm_credits = 0;
152 ctx->mxc_msg->mxm_nob = 0;
158 * mxlnd_free_txs - free kmx_txs and associated pages
160 * Called from mxlnd_shutdown()
166 kmx_ctx_t *tx = NULL;
168 if (kmxlnd_data.kmx_tx_pages) {
169 for (i = 0; i < MXLND_TX_MSGS(); i++) {
170 tx = &kmxlnd_data.kmx_txs[i];
171 if (tx->mxc_seg_list != NULL) {
172 LASSERT(tx->mxc_nseg > 0);
173 MXLND_FREE(tx->mxc_seg_list,
175 sizeof(*tx->mxc_seg_list));
178 MXLND_FREE(kmxlnd_data.kmx_txs,
179 MXLND_TX_MSGS() * sizeof(kmx_ctx_t));
180 mxlnd_free_pages(kmxlnd_data.kmx_tx_pages);
187 * mxlnd_init_txs - allocate tx descriptors then stash on txs and idle tx lists
189 * Called from mxlnd_startup()
190 * returns 0 on success, else -ENOMEM
200 kmx_ctx_t *tx = NULL;
201 kmx_pages_t *pages = NULL;
202 struct page *page = NULL;
204 /* pre-mapped messages are not bigger than 1 page */
205 CLASSERT(MXLND_MSG_SIZE <= PAGE_SIZE);
207 /* No fancy arithmetic when we do the buffer calculations */
208 CLASSERT (PAGE_SIZE % MXLND_MSG_SIZE == 0);
210 ret = mxlnd_alloc_pages(&pages, MXLND_TX_MSG_PAGES());
212 CERROR("Can't allocate tx pages\n");
215 kmxlnd_data.kmx_tx_pages = pages;
217 MXLND_ALLOC(kmxlnd_data.kmx_txs, MXLND_TX_MSGS() * sizeof(kmx_ctx_t));
218 if (&kmxlnd_data.kmx_txs == NULL) {
219 CERROR("Can't allocate %d tx descriptors\n", MXLND_TX_MSGS());
220 mxlnd_free_pages(pages);
224 memset(kmxlnd_data.kmx_txs, 0, MXLND_TX_MSGS() * sizeof(kmx_ctx_t));
226 for (i = 0; i < MXLND_TX_MSGS(); i++) {
228 tx = &kmxlnd_data.kmx_txs[i];
229 tx->mxc_type = MXLND_REQ_TX;
231 CFS_INIT_LIST_HEAD(&tx->mxc_list);
233 /* map mxc_msg to page */
234 page = pages->mxg_pages[ipage];
235 addr = page_address(page);
236 LASSERT(addr != NULL);
237 tx->mxc_msg = (kmx_msg_t *)(addr + offset);
238 tx->mxc_seg.segment_ptr = MX_PA_TO_U64(virt_to_phys(tx->mxc_msg));
242 offset += MXLND_MSG_SIZE;
243 LASSERT (offset <= PAGE_SIZE);
245 if (offset == PAGE_SIZE) {
248 LASSERT (ipage <= MXLND_TX_MSG_PAGES());
251 /* in startup(), no locks required */
252 cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
259 * mxlnd_free_peers - free peers
261 * Called from mxlnd_shutdown()
264 mxlnd_free_peers(void)
268 kmx_peer_t *peer = NULL;
269 kmx_peer_t *next = NULL;
271 for (i = 0; i < MXLND_HASH_SIZE; i++) {
272 cfs_list_for_each_entry_safe(peer, next,
273 &kmxlnd_data.kmx_peers[i],
275 cfs_list_del_init(&peer->mxp_list);
276 if (peer->mxp_conn) mxlnd_conn_decref(peer->mxp_conn);
277 mxlnd_peer_decref(peer);
281 CDEBUG(D_NET, "%s: freed %d peers\n", __func__, count);
285 * mxlnd_init_mx - open the endpoint, set our ID, register the EAGER callback
286 * @ni - the network interface
288 * Returns 0 on success, -1 on failure
291 mxlnd_init_mx(lnet_ni_t *ni)
295 u32 board = *kmxlnd_tunables.kmx_board;
296 u32 ep_id = *kmxlnd_tunables.kmx_ep_id;
304 if (mxret != MX_SUCCESS) {
305 CERROR("mx_init() failed with %s (%d)\n", mx_strerror(mxret), mxret);
309 if (ni->ni_interfaces[0] != NULL) {
310 /* Use the IPoMX interface specified in 'networks=' */
312 CLASSERT (LNET_MAX_INTERFACES > 1);
313 if (ni->ni_interfaces[1] != NULL) {
314 CERROR("Multiple interfaces not supported\n");
315 goto failed_with_init;
318 ifname = ni->ni_interfaces[0];
320 ifname = *kmxlnd_tunables.kmx_default_ipif;
323 ret = libcfs_ipif_query(ifname, &if_up, &ip, &netmask);
325 CERROR("Can't query IPoMX interface %s: %d\n",
327 goto failed_with_init;
331 CERROR("Can't query IPoMX interface %s: it's down\n",
333 goto failed_with_init;
336 mxret = mx_open_endpoint(board, ep_id, MXLND_MSG_MAGIC,
337 NULL, 0, &kmxlnd_data.kmx_endpt);
338 if (mxret != MX_SUCCESS) {
339 CERROR("mx_open_endpoint() failed with %d\n", mxret);
340 goto failed_with_init;
343 mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &kmxlnd_data.kmx_epa);
344 mx_decompose_endpoint_addr(kmxlnd_data.kmx_epa, &nic_id, &ep_id);
345 mxret = mx_connect(kmxlnd_data.kmx_endpt, nic_id, ep_id,
346 MXLND_MSG_MAGIC, MXLND_CONNECT_TIMEOUT/CFS_HZ*1000,
347 &kmxlnd_data.kmx_epa);
348 if (mxret != MX_SUCCESS) {
349 CNETERR("unable to connect to myself (%s)\n", mx_strerror(mxret));
350 goto failed_with_endpoint;
353 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ip);
354 CDEBUG(D_NET, "My NID is 0x%llx\n", ni->ni_nid);
356 /* this will catch all unexpected receives. */
357 mxret = mx_register_unexp_handler(kmxlnd_data.kmx_endpt,
358 (mx_unexp_handler_t) mxlnd_unexpected_recv,
360 if (mxret != MX_SUCCESS) {
361 CERROR("mx_register_unexp_callback() failed with %s\n",
363 goto failed_with_endpoint;
365 mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL,
366 MXLND_COMM_TIMEOUT/CFS_HZ*1000);
367 if (mxret != MX_SUCCESS) {
368 CERROR("mx_set_request_timeout() failed with %s\n",
370 goto failed_with_endpoint;
374 failed_with_endpoint:
375 mx_close_endpoint(kmxlnd_data.kmx_endpt);
383 * mxlnd_thread_start - spawn a kernel thread with this function
384 * @fn - function pointer
385 * @arg - pointer to the parameter data
387 * Returns 0 on success and a negative value on failure
390 mxlnd_thread_start(int (*fn)(void *arg), void *arg)
393 int i = (int) ((long) arg);
395 cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
396 init_completion(&kmxlnd_data.kmx_completions[i]);
398 pid = cfs_create_thread(fn, arg, 0);
400 CERROR("cfs_create_thread() failed with %d\n", pid);
401 cfs_atomic_dec(&kmxlnd_data.kmx_nthreads);
407 * mxlnd_thread_stop - decrement thread counter
409 * The thread returns 0 when it detects shutdown.
410 * We are simply decrementing the thread counter.
413 mxlnd_thread_stop(long id)
416 cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
417 complete(&kmxlnd_data.kmx_completions[i]);
421 * mxlnd_shutdown - stop IO, clean up state
422 * @ni - LNET interface handle
424 * No calls to the LND should be made after calling this function.
427 mxlnd_shutdown (lnet_ni_t *ni)
430 int nthreads = MXLND_NDAEMONS
431 + *kmxlnd_tunables.kmx_n_waitd;
433 LASSERT (ni == kmxlnd_data.kmx_ni);
434 LASSERT (ni->ni_data == &kmxlnd_data);
435 CDEBUG(D_NET, "in shutdown()\n");
437 CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
438 "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
439 kmxlnd_data.kmx_mem_used);
442 CDEBUG(D_NET, "setting shutdown = 1\n");
443 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
445 switch (kmxlnd_data.kmx_init) {
449 /* calls write_[un]lock(kmx_global_lock) */
450 mxlnd_del_peer(LNET_NID_ANY);
452 /* wakeup request_waitds */
453 mx_wakeup(kmxlnd_data.kmx_endpt);
454 up(&kmxlnd_data.kmx_tx_queue_sem);
455 up(&kmxlnd_data.kmx_conn_sem);
456 mxlnd_sleep(2 * CFS_HZ);
460 case MXLND_INIT_THREADS:
462 CDEBUG(D_NET, "waiting on threads\n");
463 /* wait for threads to complete */
464 for (i = 0; i < nthreads; i++) {
465 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
467 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
469 CDEBUG(D_NET, "freeing completions\n");
470 MXLND_FREE(kmxlnd_data.kmx_completions,
471 nthreads * sizeof(struct completion));
477 CDEBUG(D_NET, "stopping mx\n");
479 /* no peers left, close the endpoint */
480 mx_close_endpoint(kmxlnd_data.kmx_endpt);
487 CDEBUG(D_NET, "freeing txs\n");
489 /* free all txs and associated pages */
494 case MXLND_INIT_DATA:
496 CDEBUG(D_NET, "freeing peers\n");
498 /* peers should be gone, but check again */
501 /* conn zombies should be gone, but check again */
502 mxlnd_free_conn_zombies();
506 case MXLND_INIT_NOTHING:
509 CDEBUG(D_NET, "shutdown complete\n");
511 CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
512 "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
513 kmxlnd_data.kmx_mem_used);
515 kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
521 * mxlnd_startup - initialize state, open an endpoint, start IO
522 * @ni - LNET interface handle
524 * Initialize state, open an endpoint, start monitoring threads.
525 * Should only be called once.
528 mxlnd_startup (lnet_ni_t *ni)
532 int nthreads = MXLND_NDAEMONS /* tx_queued, timeoutd, connd */
533 + *kmxlnd_tunables.kmx_n_waitd;
536 LASSERT (ni->ni_lnd == &the_kmxlnd);
538 if (kmxlnd_data.kmx_init != MXLND_INIT_NOTHING) {
539 CERROR("Only 1 instance supported\n");
542 CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
543 "kmx_mem_used %ld\n", cfs_atomic_read(&libcfs_kmemory),
544 kmxlnd_data.kmx_mem_used);
546 ni->ni_maxtxcredits = MXLND_TX_MSGS();
547 ni->ni_peertxcredits = *kmxlnd_tunables.kmx_peercredits;
548 if (ni->ni_maxtxcredits < ni->ni_peertxcredits)
549 ni->ni_maxtxcredits = ni->ni_peertxcredits;
552 memset (&kmxlnd_data, 0, sizeof (kmxlnd_data));
554 kmxlnd_data.kmx_ni = ni;
555 ni->ni_data = &kmxlnd_data;
557 cfs_gettimeofday(&tv);
558 kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
559 CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
561 rwlock_init (&kmxlnd_data.kmx_global_lock);
562 spin_lock_init (&kmxlnd_data.kmx_mem_lock);
564 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
565 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
566 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
567 spin_lock_init (&kmxlnd_data.kmx_conn_lock);
568 sema_init(&kmxlnd_data.kmx_conn_sem, 0);
570 for (i = 0; i < MXLND_HASH_SIZE; i++) {
571 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
574 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
575 spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
576 kmxlnd_data.kmx_tx_next_cookie = 1;
577 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
578 spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
579 sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
581 kmxlnd_data.kmx_init = MXLND_INIT_DATA;
582 /*****************************************************/
584 ret = mxlnd_init_txs();
586 CERROR("Can't alloc tx descs: %d\n", ret);
589 kmxlnd_data.kmx_init = MXLND_INIT_TXS;
590 /*****************************************************/
592 ret = mxlnd_init_mx(ni);
594 CERROR("Can't init mx\n");
598 kmxlnd_data.kmx_init = MXLND_INIT_MX;
599 /*****************************************************/
603 MXLND_ALLOC(kmxlnd_data.kmx_completions,
604 nthreads * sizeof(struct completion));
605 if (kmxlnd_data.kmx_completions == NULL) {
606 CERROR("failed to alloc kmxlnd_data.kmx_completions\n");
609 memset(kmxlnd_data.kmx_completions, 0,
610 nthreads * sizeof(struct completion));
612 CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
613 *kmxlnd_tunables.kmx_n_waitd,
614 *kmxlnd_tunables.kmx_n_waitd == 1 ? "thread" : "threads");
616 for (i = 0; i < *kmxlnd_tunables.kmx_n_waitd; i++) {
617 ret = mxlnd_thread_start(mxlnd_request_waitd, (void*)((long)i));
619 CERROR("Starting mxlnd_request_waitd[%d] failed with %d\n", i, ret);
620 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
621 mx_wakeup(kmxlnd_data.kmx_endpt);
622 for (--i; i >= 0; i--) {
623 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
625 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
626 MXLND_FREE(kmxlnd_data.kmx_completions,
627 nthreads * sizeof(struct completion));
632 ret = mxlnd_thread_start(mxlnd_tx_queued, (void*)((long)i++));
634 CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
635 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
636 mx_wakeup(kmxlnd_data.kmx_endpt);
637 for (--i; i >= 0; i--) {
638 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
640 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
641 MXLND_FREE(kmxlnd_data.kmx_completions,
642 nthreads * sizeof(struct completion));
645 ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
647 CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
648 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
649 mx_wakeup(kmxlnd_data.kmx_endpt);
650 up(&kmxlnd_data.kmx_tx_queue_sem);
651 for (--i; i >= 0; i--) {
652 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
654 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
655 MXLND_FREE(kmxlnd_data.kmx_completions,
656 nthreads * sizeof(struct completion));
659 ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
661 CERROR("Starting mxlnd_connd failed with %d\n", ret);
662 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
663 mx_wakeup(kmxlnd_data.kmx_endpt);
664 up(&kmxlnd_data.kmx_tx_queue_sem);
665 for (--i; i >= 0; i--) {
666 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
668 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
669 MXLND_FREE(kmxlnd_data.kmx_completions,
670 nthreads * sizeof(struct completion));
674 kmxlnd_data.kmx_init = MXLND_INIT_THREADS;
675 /*****************************************************/
677 kmxlnd_data.kmx_init = MXLND_INIT_ALL;
678 CDEBUG(D_MALLOC, "startup complete (kmx_mem_used %ld)\n", kmxlnd_data.kmx_mem_used);
682 CERROR("mxlnd_startup failed\n");
687 static int mxlnd_init(void)
689 lnet_register_lnd(&the_kmxlnd);
693 static void mxlnd_exit(void)
695 lnet_unregister_lnd(&the_kmxlnd);
699 module_init(mxlnd_init);
700 module_exit(mxlnd_exit);
702 MODULE_LICENSE("GPL");
703 MODULE_AUTHOR("Myricom, Inc. - help@myri.com");
704 MODULE_DESCRIPTION("Kernel MyrinetExpress LND");
705 MODULE_VERSION("0.6.0");