1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Copyright (C) 2006 Myricom, Inc.
7 * Author: Scott Atchley <atchley at myri.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 #ifndef AUTOCONF_INCLUDED
29 #include <linux/config.h>
31 #include <linux/module.h> /* module */
32 #include <linux/kernel.h> /* module */
34 #include <linux/string.h>
35 #include <linux/stat.h>
36 #include <linux/errno.h>
37 #include <linux/smp_lock.h>
38 #include <linux/unistd.h>
39 #include <linux/uio.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
45 #include <linux/init.h> /* module */
47 #include <linux/file.h>
48 #include <linux/stat.h>
49 #include <linux/list.h>
50 #include <linux/kmod.h>
51 #include <linux/sysctl.h>
52 #include <linux/random.h>
53 #include <linux/utsname.h>
58 #include <linux/netdevice.h> /* these are needed for ARP */
59 #include <linux/if_arp.h>
61 #include <linux/inetdevice.h>
63 #define DEBUG_SUBSYSTEM S_LND
65 #include "libcfs/kp30.h"
66 #include "lnet/lnet.h"
67 #include "lnet/lib-lnet.h"
70 #include "mx_extensions.h"
71 #include "myriexpress.h"
73 #if LNET_MAX_IOV > MX_MAX_SEGMENTS
74 #error LNET_MAX_IOV is greater then MX_MAX_SEGMENTS
77 /* Using MX's 64 match bits
78 * We are using the match bits to specify message type and the cookie. The
79 * highest four bits (60-63) are reserved for message type. Below we specify
80 * the types. MXLND_MASK_ICON_REQ and MXLND_MASK_ICON_ACK are used for
81 * mx_iconnect(). We reserve the remaining combinations for future use. The
82 * next 8 bits (52-59) are reserved for returning a status code for failed
83 * GET_DATA (payload) messages. The last 52 bits are used for cookies. That
84 * should allow unique cookies for 4 KB messages at 10 Gbps line rate without
85 * rollover for about 8 years. That should be enough. */
88 #define MXLND_MASK_ICON_REQ (0xBLL << 60) /* it is a mx_iconnect() completion */
89 #define MXLND_MASK_CONN_REQ (0xCLL << 60) /* CONN_REQ msg */
90 #define MXLND_MASK_ICON_ACK (0x9LL << 60) /* it is a mx_iconnect() completion */
91 #define MXLND_MASK_CONN_ACK (0xALL << 60) /* CONN_ACK msg*/
92 #define MXLND_MASK_EAGER (0xELL << 60) /* EAGER msg */
93 #define MXLND_MASK_NOOP (0x1LL << 60) /* NOOP msg */
94 #define MXLND_MASK_PUT_REQ (0x2LL << 60) /* PUT_REQ msg */
95 #define MXLND_MASK_PUT_ACK (0x3LL << 60) /* PUT_ACK msg */
96 #define MXLND_MASK_PUT_DATA (0x4LL << 60) /* PUT_DATA msg */
97 #define MXLND_MASK_GET_REQ (0x5LL << 60) /* GET_REQ msg */
98 #define MXLND_MASK_GET_DATA (0x6LL << 60) /* GET_DATA msg */
99 //#define MXLND_MASK_NAK (0x7LL << 60) /* NAK msg */
101 #define MXLND_MAX_COOKIE ((1LL << 52) - 1) /* when to roll-over the cookie value */
102 #define MXLND_NCOMPLETIONS (MXLND_N_SCHED + 2) /* max threads for completion array */
104 /* defaults for configurable parameters */
105 #define MXLND_N_SCHED 1 /* # schedulers (mx_wait_any() threads) */
106 #define MXLND_MX_BOARD 0 /* Use the first MX NIC if more than 1 avail */
107 #define MXLND_MX_EP_ID 3 /* MX endpoint ID */
108 #define MXLND_COMM_TIMEOUT (20 * HZ) /* timeout for send/recv (jiffies) */
109 #define MXLND_WAIT_TIMEOUT HZ /* timeout for wait (jiffies) */
110 #define MXLND_POLLING 0 /* poll iterations before blocking */
111 #define MXLND_MAX_PEERS 1024 /* number of nodes talking to me */
112 #define MXLND_EAGER_NUM MXLND_MAX_PEERS /* number of pre-posted receives */
113 #define MXLND_EAGER_SIZE PAGE_SIZE /* pre-posted eager message size */
114 #define MXLND_MSG_QUEUE_DEPTH 8 /* msg queue depth */
115 #define MXLND_CREDIT_HIGHWATER (MXLND_MSG_QUEUE_DEPTH - 2)
116 /* when to send a noop to return credits */
117 #define MXLND_NTX 256 /* # of kmx_tx - total sends in flight
118 1/2 are reserved for connect messages */
120 #define MXLND_HASH_BITS 6 /* the number of bits to hash over */
121 #define MXLND_HASH_SIZE (1<<MXLND_HASH_BITS)
122 /* number of peer lists for lookup.
123 we hash over the last N bits of
124 the IP address converted to an int. */
125 #define MXLND_HASH_MASK (MXLND_HASH_SIZE - 1)
126 /* ensure we use only the last N bits */
128 /* debugging features */
129 #define MXLND_CKSUM 0 /* checksum kmx_msg_t */
130 #define MXLND_DEBUG 0 /* turn on printk()s */
132 extern inline void mxlnd_noop(char *s, ...);
134 #define MXLND_PRINT printk
136 #define MXLND_PRINT mxlnd_noop
139 /* provide wrappers around LIBCFS_ALLOC/FREE to keep MXLND specific
140 * memory usage stats that include pages */
142 #define MXLND_ALLOC(x, size) \
144 spin_lock(&kmxlnd_data.kmx_global_lock); \
145 kmxlnd_data.kmx_mem_used += size; \
146 spin_unlock(&kmxlnd_data.kmx_global_lock); \
147 LIBCFS_ALLOC(x, size); \
149 spin_lock(&kmxlnd_data.kmx_global_lock); \
150 kmxlnd_data.kmx_mem_used -= size; \
151 spin_unlock(&kmxlnd_data.kmx_global_lock); \
155 #define MXLND_FREE(x, size) \
157 spin_lock(&kmxlnd_data.kmx_global_lock); \
158 kmxlnd_data.kmx_mem_used -= size; \
159 spin_unlock(&kmxlnd_data.kmx_global_lock); \
160 LIBCFS_FREE(x, size); \
164 typedef struct kmx_tunables {
165 int *kmx_n_waitd; /* # completion threads */
166 int *kmx_max_peers; /* max # of potential peers */
167 int *kmx_cksum; /* checksum small msgs? */
168 int *kmx_ntx; /* total # of tx (1/2 for LNET 1/2 for CONN_REQ */
169 int *kmx_credits; /* concurrent sends to 1 peer */
170 int *kmx_board; /* MX board (NIC) number */
171 int *kmx_ep_id; /* MX endpoint number */
172 int *kmx_polling; /* if 0, block. if > 0, poll this many
173 iterations before blocking */
174 char **kmx_hosts; /* Location of hosts file, if used */
177 /* structure to hold IP-to-hostname resolution data */
179 struct kmx_peer *mxh_peer; /* pointer to matching peer */
180 u32 mxh_addr; /* IP address as int */
181 char *mxh_hostname; /* peer's hostname */
182 u32 mxh_board; /* peer's board rank */
183 u32 mxh_ep_id; /* peer's MX endpoint ID */
184 struct list_head mxh_list; /* position on kmx_hosts */
185 spinlock_t mxh_lock; /* lock */
188 /* global interface state */
189 typedef struct kmx_data
191 int kmx_init; /* initialization state */
192 int kmx_shutdown; /* shutting down? */
193 atomic_t kmx_nthreads; /* number of threads */
194 struct completion *kmx_completions; /* array of completion structs */
195 lnet_ni_t *kmx_ni; /* the LND instance */
196 u64 kmx_incarnation; /* my incarnation value - unused */
197 long kmx_mem_used; /* memory used */
198 struct kmx_host *kmx_localhost; /* pointer to my kmx_host info */
199 mx_endpoint_t kmx_endpt; /* the MX endpoint */
201 spinlock_t kmx_global_lock; /* global lock */
203 struct list_head kmx_conn_req; /* list of connection requests */
204 spinlock_t kmx_conn_lock; /* connection list lock */
205 struct semaphore kmx_conn_sem; /* semaphore for connection request list */
207 struct list_head kmx_hosts; /* host lookup info */
208 spinlock_t kmx_hosts_lock; /* hosts list lock */
210 struct list_head kmx_peers[MXLND_HASH_SIZE];
211 /* list of all known peers */
212 rwlock_t kmx_peers_lock; /* peer list rw lock */
213 atomic_t kmx_npeers; /* number of peers */
215 struct list_head kmx_txs; /* all tx descriptors */
216 struct list_head kmx_tx_idle; /* list of idle tx */
217 spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
218 s32 kmx_tx_used; /* txs in use */
219 u64 kmx_tx_next_cookie; /* unique id for tx */
220 struct list_head kmx_tx_queue; /* generic send queue */
221 spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
222 struct semaphore kmx_tx_queue_sem; /* semaphore for tx queue */
224 struct list_head kmx_rxs; /* all rx descriptors */
225 spinlock_t kmx_rxs_lock; /* lock for rxs list */
226 struct list_head kmx_rx_idle; /* list of idle tx */
227 spinlock_t kmx_rx_idle_lock; /* lock for idle rx list */
230 #define MXLND_INIT_NOTHING 0 /* in the beginning, there was nothing... */
231 #define MXLND_INIT_DATA 1 /* main data structures created */
232 #define MXLND_INIT_TXS 2 /* tx descriptors created */
233 #define MXLND_INIT_RXS 3 /* initial rx descriptors created */
234 #define MXLND_INIT_MX 4 /* initiate MX library, open endpoint, get NIC id */
235 #define MXLND_INIT_THREADS 5 /* waitd, timeoutd, tx_queued threads */
236 #define MXLND_INIT_ALL 6 /* startup completed */
238 #include "mxlnd_wire.h"
245 /* The life cycle of a request */
247 MXLND_CTX_INIT = 0, /* just created */
248 MXLND_CTX_IDLE = 1, /* available for use */
249 MXLND_CTX_PREP = 2, /* getting ready for send/recv */
250 MXLND_CTX_PENDING = 3, /* mx_isend() or mx_irecv() called */
251 MXLND_CTX_COMPLETED = 4, /* cleaning up after completion or timeout */
252 MXLND_CTX_CANCELED = 5, /* timed out but still in ctx list */
255 /* Context Structure - generic tx/rx descriptor
256 * It represents the context (or state) of each send or receive request.
257 * In other LNDs, they have separate TX and RX descriptors and this replaces both.
259 * We will keep the these on the global kmx_rxs and kmx_txs lists for cleanup
260 * during shutdown(). We will move them between the rx/tx idle lists and the
261 * pending list which is monitored by mxlnd_timeoutd().
264 enum kmx_req_type mxc_type; /* TX or RX */
265 u64 mxc_incarnation; /* store the peer's incarnation here
266 to verify before changing flow
267 control credits after completion */
268 unsigned long mxc_deadline; /* request time out in absolute jiffies */
269 enum kmx_req_state mxc_state; /* what is the state of the request? */
270 struct list_head mxc_global_list; /* place on kmx_rxs or kmx_txs */
271 struct list_head mxc_list; /* place on rx/tx idle list, tx q, peer tx */
272 struct list_head mxc_rx_list; /* place on mxp_rx_posted list */
273 spinlock_t mxc_lock; /* lock */
275 lnet_nid_t mxc_nid; /* dst's NID if peer is not known */
276 struct kmx_peer *mxc_peer; /* owning peer */
277 struct kmx_conn *mxc_conn; /* owning conn */
278 struct kmx_msg *mxc_msg; /* msg hdr mapped to mxc_page */
279 struct page *mxc_page; /* buffer for eager msgs */
280 lnet_msg_t *mxc_lntmsg[2]; /* lnet msgs to finalize */
282 u8 mxc_msg_type; /* what type of message is this? */
283 u64 mxc_cookie; /* completion cookie */
284 u64 mxc_match; /* MX match info */
285 mx_ksegment_t mxc_seg; /* local MX ksegment for non-DATA */
286 mx_ksegment_t *mxc_seg_list; /* MX ksegment array for DATA */
287 int mxc_nseg; /* number of segments */
288 unsigned long mxc_pin_type; /* MX_PIN_KERNEL or MX_PIN_PHYSICAL */
289 u32 mxc_nob; /* number of bytes sent/received */
290 mx_request_t mxc_mxreq; /* MX request */
291 mx_status_t mxc_status; /* MX status */
292 s64 mxc_get; /* # of times returned from idle list */
293 s64 mxc_put; /* # of times returned from idle list */
296 #define MXLND_CONN_DISCONNECT -2 /* conn is being destroyed - do not add txs */
297 #define MXLND_CONN_FAIL -1 /* connect failed (bad handshake, unavail, etc.) */
298 #define MXLND_CONN_INIT 0 /* in the beginning, there was nothing... */
299 #define MXLND_CONN_REQ 1 /* a connection request message is needed */
300 #define MXLND_CONN_ACK 2 /* a connection ack is needed */
301 #define MXLND_CONN_WAIT 3 /* waiting for req or ack to complete */
302 #define MXLND_CONN_READY 4 /* ready to send */
304 /* connection state - queues for queued and pending msgs */
307 u64 mxk_incarnation; /* connections's incarnation value */
308 atomic_t mxk_refcount; /* reference counting */
310 struct kmx_peer *mxk_peer; /* owning peer */
311 mx_endpoint_addr_t mxk_epa; /* peer's endpoint address */
313 struct list_head mxk_list; /* for placing on mxp_conns */
314 spinlock_t mxk_lock; /* lock */
315 unsigned long mxk_timeout; /* expiration of oldest pending tx/rx */
316 unsigned long mxk_last_tx; /* when last tx completed with success */
317 unsigned long mxk_last_rx; /* when last rx completed */
319 int mxk_credits; /* # of my credits for sending to peer */
320 int mxk_outstanding; /* # of credits to return */
322 int mxk_status; /* can we send messages? MXLND_CONN_* */
323 struct list_head mxk_tx_credit_queue; /* send queue for peer */
324 struct list_head mxk_tx_free_queue; /* send queue for peer */
325 int mxk_ntx_msgs; /* # of msgs on tx queues */
326 int mxk_ntx_data ; /* # of DATA on tx queues */
327 int mxk_ntx_posted; /* # of tx msgs in flight */
328 int mxk_data_posted; /* # of tx data payloads in flight */
330 struct list_head mxk_pending; /* in flight rxs and txs */
336 lnet_nid_t mxp_nid; /* peer's LNET NID */
337 u64 mxp_incarnation; /* peer's incarnation value */
338 atomic_t mxp_refcount; /* reference counts */
340 struct kmx_host *mxp_host; /* peer lookup info */
341 u64 mxp_nic_id; /* remote's MX nic_id for mx_connect() */
343 struct list_head mxp_peers; /* for placing on kmx_peers */
344 spinlock_t mxp_lock; /* lock */
346 struct list_head mxp_conns; /* list of connections */
347 struct kmx_conn *mxp_conn; /* current connection */
349 unsigned long mxp_reconnect_time; /* when to retry connect */
350 int mxp_incompatible; /* incorrect conn_req values */
353 extern kmx_data_t kmxlnd_data;
354 extern kmx_tunables_t kmxlnd_tunables;
356 /* required for the LNET API */
357 int mxlnd_startup(lnet_ni_t *ni);
358 void mxlnd_shutdown(lnet_ni_t *ni);
359 int mxlnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
360 int mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
361 int mxlnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
362 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
363 unsigned int offset, unsigned int mlen, unsigned int rlen);
366 extern void mxlnd_thread_stop(long id);
367 extern int mxlnd_ctx_alloc(struct kmx_ctx **ctxp, enum kmx_req_type type);
368 extern void mxlnd_ctx_free(struct kmx_ctx *ctx);
369 extern void mxlnd_ctx_init(struct kmx_ctx *ctx);
370 extern lnet_nid_t mxlnd_nic_id2nid(lnet_ni_t *ni, u64 nic_id);
371 extern u64 mxlnd_nid2nic_id(lnet_nid_t nid);
374 void mxlnd_eager_recv(void *context, uint64_t match_value, uint32_t length);
375 extern mx_unexp_handler_action_t mxlnd_unexpected_recv(void *context,
376 mx_endpoint_addr_t source, uint64_t match_value, uint32_t length,
377 void *data_if_available);
378 extern void mxlnd_peer_free(struct kmx_peer *peer);
379 extern void mxlnd_conn_free(struct kmx_conn *conn);
380 extern void mxlnd_sleep(unsigned long timeout);
381 extern int mxlnd_tx_queued(void *arg);
382 extern void mxlnd_handle_rx_completion(struct kmx_ctx *rx);
383 extern int mxlnd_check_sends(struct kmx_peer *peer);
384 extern int mxlnd_tx_peer_queued(void *arg);
385 extern int mxlnd_request_waitd(void *arg);
386 extern int mxlnd_unex_recvd(void *arg);
387 extern int mxlnd_timeoutd(void *arg);
388 extern int mxlnd_connd(void *arg);
390 #define mxlnd_peer_addref(peer) \
392 LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
393 atomic_inc(&(peer)->mxp_refcount); \
397 #define mxlnd_peer_decref(peer) \
399 LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
400 if (atomic_dec_and_test(&(peer)->mxp_refcount)) \
401 mxlnd_peer_free(peer); \
404 #define mxlnd_conn_addref(conn) \
406 LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
407 atomic_inc(&(conn)->mxk_refcount); \
411 #define mxlnd_conn_decref(conn) \
413 LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
414 if (atomic_dec_and_test(&(conn)->mxk_refcount)) \
415 mxlnd_conn_free(conn); \