1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Copyright (C) 2006 Myricom, Inc.
7 * Author: Scott Atchley <atchley at myri.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 .lnd_startup = mxlnd_startup,
30 .lnd_shutdown = mxlnd_shutdown,
32 .lnd_send = mxlnd_send,
33 .lnd_recv = mxlnd_recv,
36 kmx_data_t kmxlnd_data;
39 * mxlnd_ctx_free - free ctx struct
40 * @ctx - a kmx_peer pointer
42 * The calling function should remove the ctx from the ctx list first
46 mxlnd_ctx_free(struct kmx_ctx *ctx)
48 if (ctx == NULL) return;
50 if (ctx->mxc_page != NULL) {
51 __free_page(ctx->mxc_page);
52 spin_lock(&kmxlnd_data.kmx_global_lock);
53 kmxlnd_data.kmx_mem_used -= MXLND_EAGER_SIZE;
54 spin_unlock(&kmxlnd_data.kmx_global_lock);
57 if (ctx->mxc_seg_list != NULL) {
58 LASSERT(ctx->mxc_nseg > 0);
59 MXLND_FREE(ctx->mxc_seg_list, ctx->mxc_nseg * sizeof(mx_ksegment_t));
62 MXLND_FREE (ctx, sizeof (*ctx));
67 * mxlnd_ctx_alloc - allocate and initialize a new ctx struct
68 * @ctxp - address of a kmx_ctx pointer
70 * Returns 0 on success and -EINVAL, -ENOMEM on failure
73 mxlnd_ctx_alloc(struct kmx_ctx **ctxp, enum kmx_req_type type)
76 struct kmx_ctx *ctx = NULL;
78 if (ctxp == NULL) return -EINVAL;
80 MXLND_ALLOC(ctx, sizeof (*ctx));
82 CDEBUG(D_NETERROR, "Cannot allocate ctx\n");
85 memset(ctx, 0, sizeof(*ctx));
86 spin_lock_init(&ctx->mxc_lock);
89 ctx->mxc_page = alloc_page (GFP_KERNEL);
90 if (ctx->mxc_page == NULL) {
91 CDEBUG(D_NETERROR, "Can't allocate page\n");
95 spin_lock(&kmxlnd_data.kmx_global_lock);
96 kmxlnd_data.kmx_mem_used += MXLND_EAGER_SIZE;
97 spin_unlock(&kmxlnd_data.kmx_global_lock);
98 ctx->mxc_msg = (struct kmx_msg *)((char *)page_address(ctx->mxc_page));
99 ctx->mxc_seg.segment_ptr = MX_PA_TO_U64(lnet_page2phys(ctx->mxc_page));
100 ctx->mxc_state = MXLND_CTX_IDLE;
111 * mxlnd_ctx_init - reset ctx struct to the default values
112 * @ctx - a kmx_ctx pointer
115 mxlnd_ctx_init(struct kmx_ctx *ctx)
117 if (ctx == NULL) return;
119 /* do not change mxc_type */
120 ctx->mxc_incarnation = 0;
121 ctx->mxc_deadline = 0;
122 ctx->mxc_state = MXLND_CTX_IDLE;
123 /* ignore mxc_global_list */
124 if (ctx->mxc_list.next != NULL && !list_empty(&ctx->mxc_list)) {
125 if (ctx->mxc_peer != NULL)
126 spin_lock(&ctx->mxc_lock);
127 list_del_init(&ctx->mxc_list);
128 if (ctx->mxc_peer != NULL)
129 spin_unlock(&ctx->mxc_lock);
131 /* ignore mxc_rx_list */
132 /* ignore mxc_lock */
134 ctx->mxc_peer = NULL;
135 ctx->mxc_conn = NULL;
137 /* ignore mxc_page */
138 ctx->mxc_lntmsg[0] = NULL;
139 ctx->mxc_lntmsg[1] = NULL;
140 ctx->mxc_msg_type = 0;
141 ctx->mxc_cookie = 0LL;
142 ctx->mxc_match = 0LL;
143 /* ctx->mxc_seg.segment_ptr points to mxc_page */
144 ctx->mxc_seg.segment_length = 0;
145 if (ctx->mxc_seg_list != NULL) {
146 LASSERT(ctx->mxc_nseg > 0);
147 MXLND_FREE(ctx->mxc_seg_list, ctx->mxc_nseg * sizeof(mx_ksegment_t));
149 ctx->mxc_seg_list = NULL;
152 ctx->mxc_mxreq = NULL;
153 memset(&ctx->mxc_status, 0, sizeof(mx_status_t));
157 ctx->mxc_msg->mxm_type = 0;
158 ctx->mxc_msg->mxm_credits = 0;
159 ctx->mxc_msg->mxm_nob = 0;
160 ctx->mxc_msg->mxm_seq = 0;
166 * mxlnd_free_txs - free kmx_txs and associated pages
168 * Called from mxlnd_shutdown()
173 struct kmx_ctx *tx = NULL;
174 struct kmx_ctx *next = NULL;
176 list_for_each_entry_safe(tx, next, &kmxlnd_data.kmx_txs, mxc_global_list) {
177 list_del_init(&tx->mxc_global_list);
184 * mxlnd_init_txs - allocate tx descriptors then stash on txs and idle tx lists
186 * Called from mxlnd_startup()
187 * returns 0 on success, else -ENOMEM
194 struct kmx_ctx *tx = NULL;
196 for (i = 0; i < *kmxlnd_tunables.kmx_ntx; i++) {
197 ret = mxlnd_ctx_alloc(&tx, MXLND_REQ_TX);
203 /* in startup(), no locks required */
204 list_add_tail(&tx->mxc_global_list, &kmxlnd_data.kmx_txs);
205 list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
211 * mxlnd_free_rxs - free initial kmx_rx descriptors and associated pages
213 * Called from mxlnd_shutdown()
218 struct kmx_ctx *rx = NULL;
219 struct kmx_ctx *next = NULL;
221 list_for_each_entry_safe(rx, next, &kmxlnd_data.kmx_rxs, mxc_global_list) {
222 list_del_init(&rx->mxc_global_list);
229 * mxlnd_init_rxs - allocate initial rx descriptors
231 * Called from startup(). We create MXLND_MAX_PEERS plus MXLND_NTX
232 * rx descriptors. We create one for each potential peer to handle
233 * the initial connect request. We create on for each tx in case the
234 * send requires a non-eager receive.
236 * Returns 0 on success, else -ENOMEM
243 struct kmx_ctx *rx = NULL;
245 for (i = 0; i < (*kmxlnd_tunables.kmx_ntx + *kmxlnd_tunables.kmx_max_peers); i++) {
246 ret = mxlnd_ctx_alloc(&rx, MXLND_REQ_RX);
252 /* in startup(), no locks required */
253 list_add_tail(&rx->mxc_global_list, &kmxlnd_data.kmx_rxs);
254 list_add_tail(&rx->mxc_list, &kmxlnd_data.kmx_rx_idle);
260 * mxlnd_free_peers - free peers
262 * Called from mxlnd_shutdown()
265 mxlnd_free_peers(void)
268 struct kmx_peer *peer = NULL;
269 struct kmx_peer *next = NULL;
271 for (i = 0; i < MXLND_HASH_SIZE; i++) {
272 list_for_each_entry_safe(peer, next, &kmxlnd_data.kmx_peers[i], mxp_peers) {
273 list_del_init(&peer->mxp_peers);
274 if (peer->mxp_conn) mxlnd_conn_decref(peer->mxp_conn);
275 mxlnd_peer_decref(peer);
281 mxlnd_host_alloc(struct kmx_host **hostp)
283 struct kmx_host *host = NULL;
285 MXLND_ALLOC(host, sizeof (*host));
287 CDEBUG(D_NETERROR, "Cannot allocate host\n");
290 memset(host, 0, sizeof(*host));
291 spin_lock_init(&host->mxh_lock);
299 mxlnd_host_free(struct kmx_host *host)
301 if (host == NULL) return;
303 if (host->mxh_hostname != NULL)
304 MXLND_FREE(host->mxh_hostname, strlen(host->mxh_hostname) + 1);
306 MXLND_FREE(host, sizeof(*host));
311 * mxlnd_free_hosts - free kmx_hosts
313 * Called from mxlnd_shutdown()
316 mxlnd_free_hosts(void)
318 struct kmx_host *host = NULL;
319 struct kmx_host *next = NULL;
321 list_for_each_entry_safe(host, next, &kmxlnd_data.kmx_hosts, mxh_list) {
322 list_del_init(&host->mxh_list);
323 mxlnd_host_free(host);
329 #define str(s) xstr(s)
330 #define MXLND_MAX_BOARD 4 /* we expect hosts to have fewer NICs than this */
331 #define MXLND_MAX_EP_ID 16 /* we expect hosts to have less than this endpoints */
333 /* this parses a line that consists of:
335 * IP HOSTNAME BOARD ENDPOINT ID
336 * 169.192.0.113 mds01 0 3
338 * By default MX uses the alias (short hostname). If you override
339 * it using mx_hostname to use the FQDN or some other name, the hostname
340 * here must match exactly.
343 /* MX_MAX_HOSTNAME_LEN = 80. See myriexpress.h */
345 mxlnd_parse_line(char *line)
350 u32 ip[4] = { 0, 0, 0, 0 };
351 char hostname[MX_MAX_HOSTNAME_LEN];
354 struct kmx_host *host = NULL;
356 if (line == NULL) return -1;
360 if (len == 0) return -1;
362 /* convert tabs to spaces */
363 for (i = 0; i < len; i++) {
364 if (line[i] == '\t') line[i] = ' ';
367 memset(&hostname, 0 , sizeof(hostname));
368 ret = sscanf(line, "%d.%d.%d.%d %" str(MX_MAX_HOSTNAME_LEN) "s %d %d",
369 &ip[0], &ip[1], &ip[2], &ip[3], hostname, &board, &ep_id);
375 /* check for valid values */
376 /* we assume a valid IP address (all <= 255), number of NICs,
377 * and number of endpoint IDs */
378 if (ip[0] > 255 || ip [1] > 255 || ip[2] > 255 || ip[3] > 255 ||
379 board > MXLND_MAX_BOARD || ep_id > MXLND_MAX_EP_ID) {
380 CDEBUG(D_NETERROR, "Illegal value in \"%s\". Ignoring "
381 "this host.\n", line);
385 ret = mxlnd_host_alloc(&host);
386 if (ret != 0) return -1;
388 host->mxh_addr = ((ip[0]<<24)|(ip[1]<<16)|(ip[2]<<8)|ip[3]);
389 len = strlen(hostname);
390 MXLND_ALLOC(host->mxh_hostname, len + 1);
391 memset(host->mxh_hostname, 0, len + 1);
392 strncpy(host->mxh_hostname, hostname, len);
393 host->mxh_board = board;
394 host->mxh_ep_id = ep_id;
396 spin_lock(&kmxlnd_data.kmx_hosts_lock);
397 list_add_tail(&host->mxh_list, &kmxlnd_data.kmx_hosts);
398 spin_unlock(&kmxlnd_data.kmx_hosts_lock);
404 mxlnd_print_hosts(void)
407 struct kmx_host *host = NULL;
409 list_for_each_entry(host, &kmxlnd_data.kmx_hosts, mxh_list) {
411 u32 addr = host->mxh_addr;
413 ip[0] = (addr >> 24) & 0xff;
414 ip[1] = (addr >> 16) & 0xff;
415 ip[2] = (addr >> 8) & 0xff;
417 CDEBUG(D_NET, "\tip= %d.%d.%d.%d\n\thost= %s\n\tboard= %d\n\tep_id= %d\n\n",
418 ip[0], ip[1], ip[2], ip[3],
419 host->mxh_hostname, host->mxh_board, host->mxh_ep_id);
425 #define MXLND_BUFSIZE (PAGE_SIZE - 1)
428 mxlnd_parse_hosts(char *filename)
432 s32 bufsize = MXLND_BUFSIZE;
435 struct file *filp = NULL;
441 if (filename == NULL) return -1;
443 filp = filp_open(filename, O_RDONLY, 0);
445 CERROR("filp_open() failed for %s\n", filename);
449 size = (s32) cfs_filp_size(filp);
450 if (size < MXLND_BUFSIZE) bufsize = size;
452 MXLND_ALLOC(buf, allocd + 1);
454 CERROR("Cannot allocate buf\n");
455 filp_close(filp, current->files);
459 while (offset < size) {
460 memset(buf, 0, bufsize + 1);
461 ret = kernel_read(filp, (unsigned long) offset, buf, (unsigned long) bufsize);
463 CDEBUG(D_NETERROR, "kernel_read() returned %d - closing %s\n", ret, filename);
464 filp_close(filp, current->files);
465 MXLND_FREE(buf, allocd + 1);
469 if (ret < bufsize) bufsize = ret;
471 while (buf_off < bufsize) {
472 sep = strchr(buf + buf_off, '\n');
475 line = buf + buf_off;
477 ret = mxlnd_parse_line(line);
478 if (ret != 0 && strlen(line) != 0) {
479 CDEBUG(D_NETERROR, "Failed to parse \"%s\". Ignoring this host.\n", line);
481 buf_off += strlen(line) + 1;
483 /* last line or we need to read more */
484 line = buf + buf_off;
485 ret = mxlnd_parse_line(line);
487 bufsize -= strlen(line) + 1;
489 buf_off += strlen(line) + 1;
493 bufsize = MXLND_BUFSIZE;
496 MXLND_FREE(buf, allocd + 1);
497 filp_close(filp, current->files);
504 * mxlnd_init_mx - open the endpoint, set out ID, register the EAGER callback
505 * @ni - the network interface
507 * Returns 0 on success, -1 on failure
510 mxlnd_init_mx(lnet_ni_t *ni)
515 mx_endpoint_addr_t addr;
516 u32 board = *kmxlnd_tunables.kmx_board;
517 u32 ep_id = *kmxlnd_tunables.kmx_ep_id;
519 struct kmx_host *host = NULL;
522 if (mxret != MX_SUCCESS) {
523 CERROR("mx_init() failed with %s (%d)\n", mx_strerror(mxret), mxret);
527 ret = mxlnd_parse_hosts(*kmxlnd_tunables.kmx_hosts);
529 if (*kmxlnd_tunables.kmx_hosts != NULL) {
530 CERROR("mxlnd_parse_hosts(%s) failed\n", *kmxlnd_tunables.kmx_hosts);
536 list_for_each_entry(host, &kmxlnd_data.kmx_hosts, mxh_list) {
537 if (strcmp(host->mxh_hostname, system_utsname.nodename) == 0) {
538 /* override the defaults and module parameters with
539 * the info from the hosts file */
540 board = host->mxh_board;
541 ep_id = host->mxh_ep_id;
542 kmxlnd_data.kmx_localhost = host;
543 CDEBUG(D_NET, "my hostname is %s board %d ep_id %d\n", kmxlnd_data.kmx_localhost->mxh_hostname, kmxlnd_data.kmx_localhost->mxh_board, kmxlnd_data.kmx_localhost->mxh_ep_id);
550 CERROR("no host entry found for localhost\n");
555 mxret = mx_open_endpoint(board, ep_id, MXLND_MSG_MAGIC,
556 NULL, 0, &kmxlnd_data.kmx_endpt);
557 if (mxret != MX_SUCCESS) {
558 CERROR("mx_open_endpoint() failed with %d\n", mxret);
563 mx_get_endpoint_addr(kmxlnd_data.kmx_endpt, &addr);
564 mx_decompose_endpoint_addr(addr, &nic_id, &ep_id);
566 LASSERT(host != NULL);
567 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), host->mxh_addr);
569 CDEBUG(D_NET, "My NID is 0x%llx\n", ni->ni_nid);
571 /* this will catch all unexpected receives. */
572 mxret = mx_register_unexp_handler(kmxlnd_data.kmx_endpt,
573 (mx_unexp_handler_t) mxlnd_unexpected_recv,
575 if (mxret != MX_SUCCESS) {
576 CERROR("mx_register_unexp_callback() failed with %s\n",
578 mx_close_endpoint(kmxlnd_data.kmx_endpt);
582 mxret = mx_set_request_timeout(kmxlnd_data.kmx_endpt, NULL, MXLND_COMM_TIMEOUT/HZ*1000);
583 if (mxret != MX_SUCCESS) {
584 CERROR("mx_set_request_timeout() failed with %s\n",
586 mx_close_endpoint(kmxlnd_data.kmx_endpt);
595 * mxlnd_thread_start - spawn a kernel thread with this function
596 * @fn - function pointer
597 * @arg - pointer to the parameter data
599 * Returns 0 on success and a negative value on failure
602 mxlnd_thread_start(int (*fn)(void *arg), void *arg)
605 int i = (int) ((long) arg);
607 atomic_inc(&kmxlnd_data.kmx_nthreads);
608 init_completion(&kmxlnd_data.kmx_completions[i]);
610 pid = kernel_thread (fn, arg, 0);
612 CERROR("mx_thread_start() failed with %d\n", pid);
613 atomic_dec(&kmxlnd_data.kmx_nthreads);
619 * mxlnd_thread_stop - decrement thread counter
621 * The thread returns 0 when it detects shutdown.
622 * We are simply decrementing the thread counter.
625 mxlnd_thread_stop(long id)
628 atomic_dec (&kmxlnd_data.kmx_nthreads);
629 complete(&kmxlnd_data.kmx_completions[i]);
633 * mxlnd_shutdown - stop IO, clean up state
634 * @ni - LNET interface handle
636 * No calls to the LND should be made after calling this function.
639 mxlnd_shutdown (lnet_ni_t *ni)
643 LASSERT (ni == kmxlnd_data.kmx_ni);
644 LASSERT (ni->ni_data == &kmxlnd_data);
645 CDEBUG(D_NET, "in shutdown()\n");
647 CDEBUG(D_MALLOC, "before MXLND cleanup: libcfs_kmemory %d "
648 "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
649 kmxlnd_data.kmx_mem_used);
651 switch (kmxlnd_data.kmx_init) {
655 CDEBUG(D_NET, "setting shutdown = 1\n");
656 /* set shutdown and wakeup request_waitds */
657 kmxlnd_data.kmx_shutdown = 1;
659 mx_wakeup(kmxlnd_data.kmx_endpt);
660 up(&kmxlnd_data.kmx_tx_queue_sem);
665 case MXLND_INIT_THREADS:
667 CDEBUG(D_NET, "waiting on threads\n");
668 /* wait for threads to complete */
669 for (i = 0; i < MXLND_NCOMPLETIONS; i++) {
670 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
672 LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
674 CDEBUG(D_NET, "freeing completions\n");
675 MXLND_FREE(kmxlnd_data.kmx_completions,
676 MXLND_NCOMPLETIONS * sizeof(struct completion));
682 CDEBUG(D_NET, "stopping mx\n");
684 /* wakeup waiters if they missed the above.
685 * close endpoint to stop all traffic.
686 * this will cancel and cleanup all requests, etc. */
688 mx_wakeup(kmxlnd_data.kmx_endpt);
689 mx_close_endpoint(kmxlnd_data.kmx_endpt);
692 CDEBUG(D_NET, "mxlnd_free_hosts();\n");
699 CDEBUG(D_NET, "freeing rxs\n");
701 /* free all rxs and associated pages */
708 CDEBUG(D_NET, "freeing txs\n");
710 /* free all txs and associated pages */
715 case MXLND_INIT_DATA:
717 CDEBUG(D_NET, "freeing peers\n");
724 case MXLND_INIT_NOTHING:
727 CDEBUG(D_NET, "shutdown complete\n");
729 CDEBUG(D_MALLOC, "after MXLND cleanup: libcfs_kmemory %d "
730 "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
731 kmxlnd_data.kmx_mem_used);
733 kmxlnd_data.kmx_init = MXLND_INIT_NOTHING;
739 * mxlnd_startup - initialize state, open an endpoint, start IO
740 * @ni - LNET interface handle
742 * Initialize state, open an endpoint, start monitoring threads.
743 * Should only be called once.
746 mxlnd_startup (lnet_ni_t *ni)
752 LASSERT (ni->ni_lnd == &the_kmxlnd);
754 if (kmxlnd_data.kmx_init != MXLND_INIT_NOTHING) {
755 CERROR("Only 1 instance supported\n");
758 CDEBUG(D_MALLOC, "before MXLND startup: libcfs_kmemory %d "
759 "kmx_mem_used %ld\n", atomic_read (&libcfs_kmemory),
760 kmxlnd_data.kmx_mem_used);
762 /* reserve 1/2 of tx for connect request messages */
763 ni->ni_maxtxcredits = *kmxlnd_tunables.kmx_ntx / 2;
764 ni->ni_peertxcredits = *kmxlnd_tunables.kmx_credits;
767 memset (&kmxlnd_data, 0, sizeof (kmxlnd_data));
769 kmxlnd_data.kmx_ni = ni;
770 ni->ni_data = &kmxlnd_data;
772 do_gettimeofday(&tv);
773 kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
774 CDEBUG(D_NET, "my incarnation is %lld\n", kmxlnd_data.kmx_incarnation);
776 spin_lock_init (&kmxlnd_data.kmx_global_lock);
778 INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_req);
779 spin_lock_init (&kmxlnd_data.kmx_conn_lock);
780 sema_init(&kmxlnd_data.kmx_conn_sem, 0);
782 INIT_LIST_HEAD (&kmxlnd_data.kmx_hosts);
783 spin_lock_init (&kmxlnd_data.kmx_hosts_lock);
785 for (i = 0; i < MXLND_HASH_SIZE; i++) {
786 INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
788 rwlock_init (&kmxlnd_data.kmx_peers_lock);
790 INIT_LIST_HEAD (&kmxlnd_data.kmx_txs);
791 INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
792 spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
793 kmxlnd_data.kmx_tx_next_cookie = 1;
794 INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
795 spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
796 sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
798 INIT_LIST_HEAD (&kmxlnd_data.kmx_rxs);
799 spin_lock_init (&kmxlnd_data.kmx_rxs_lock);
800 INIT_LIST_HEAD (&kmxlnd_data.kmx_rx_idle);
801 spin_lock_init (&kmxlnd_data.kmx_rx_idle_lock);
803 kmxlnd_data.kmx_init = MXLND_INIT_DATA;
804 /*****************************************************/
806 ret = mxlnd_init_txs();
808 CERROR("Can't alloc tx descs: %d\n", ret);
811 kmxlnd_data.kmx_init = MXLND_INIT_TXS;
812 /*****************************************************/
814 ret = mxlnd_init_rxs();
816 CERROR("Can't alloc rx descs: %d\n", ret);
819 kmxlnd_data.kmx_init = MXLND_INIT_RXS;
820 /*****************************************************/
822 ret = mxlnd_init_mx(ni);
824 CERROR("Can't init mx\n");
828 kmxlnd_data.kmx_init = MXLND_INIT_MX;
829 /*****************************************************/
833 MXLND_ALLOC (kmxlnd_data.kmx_completions,
834 MXLND_NCOMPLETIONS * sizeof(struct completion));
835 if (kmxlnd_data.kmx_completions == NULL) {
836 CERROR("failed to alloc kmxlnd_data.kmx_completions");
839 memset(kmxlnd_data.kmx_completions, 0,
840 MXLND_NCOMPLETIONS * sizeof(struct completion));
844 if (MXLND_N_SCHED > *kmxlnd_tunables.kmx_n_waitd) {
845 *kmxlnd_tunables.kmx_n_waitd = MXLND_N_SCHED;
847 CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
848 *kmxlnd_tunables.kmx_n_waitd,
849 *kmxlnd_tunables.kmx_n_waitd == 1 ? "thread" : "threads");
851 for (i = 0; i < *kmxlnd_tunables.kmx_n_waitd; i++) {
852 ret = mxlnd_thread_start(mxlnd_request_waitd, (void*)((long)i));
854 CERROR("Starting mxlnd_request_waitd[%d] failed with %d\n", i, ret);
855 for (--i; i >= 0; i--) {
856 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
858 LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
859 MXLND_FREE(kmxlnd_data.kmx_completions,
860 MXLND_NCOMPLETIONS * sizeof(struct completion));
865 ret = mxlnd_thread_start(mxlnd_tx_queued, (void*)((long)i++));
867 CERROR("Starting mxlnd_tx_queued failed with %d\n", ret);
868 for (--i; i >= 0; i--) {
869 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
871 LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
872 MXLND_FREE(kmxlnd_data.kmx_completions,
873 MXLND_NCOMPLETIONS * sizeof(struct completion));
876 ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
878 CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
879 for (--i; i >= 0; i--) {
880 wait_for_completion(&kmxlnd_data.kmx_completions[i]);
882 LASSERT(atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
883 MXLND_FREE(kmxlnd_data.kmx_completions,
884 MXLND_NCOMPLETIONS * sizeof(struct completion));
889 kmxlnd_data.kmx_init = MXLND_INIT_THREADS;
890 /*****************************************************/
892 kmxlnd_data.kmx_init = MXLND_INIT_ALL;
893 CDEBUG(D_MALLOC, "startup complete (kmx_mem_used %ld)\n", kmxlnd_data.kmx_mem_used);
897 CERROR("mxlnd_startup failed\n");
902 static int mxlnd_init(void)
904 lnet_register_lnd(&the_kmxlnd);
908 static void mxlnd_exit(void)
910 lnet_unregister_lnd(&the_kmxlnd);
914 module_init(mxlnd_init);
915 module_exit(mxlnd_exit);
917 MODULE_LICENSE("GPL");
918 MODULE_AUTHOR("Myricom, Inc. - help@myri.com");
919 MODULE_DESCRIPTION("Kernel MyrinetExpress LND");
920 MODULE_VERSION("0.5.0");