1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Copyright (C) 2006 Myricom, Inc.
7 * Author: Scott Atchley <atchley at myri.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/config.h>
30 #include <linux/module.h> /* module */
31 #include <linux/kernel.h> /* module */
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/errno.h>
36 #include <linux/smp_lock.h>
37 #include <linux/unistd.h>
38 #include <linux/uio.h>
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
44 #include <linux/init.h> /* module */
46 #include <linux/file.h>
47 #include <linux/stat.h>
48 #include <linux/list.h>
49 #include <linux/kmod.h>
50 #include <linux/sysctl.h>
51 #include <linux/random.h>
52 #include <linux/utsname.h>
57 #include <linux/netdevice.h> /* these are needed for ARP */
58 #include <linux/if_arp.h>
60 #include <linux/inetdevice.h>
62 #define DEBUG_SUBSYSTEM S_LND
64 #include "libcfs/kp30.h"
65 #include "lnet/lnet.h"
66 #include "lnet/lib-lnet.h"
69 #include "mx_extensions.h"
70 #include "myriexpress.h"
72 #if LNET_MAX_IOV > MX_MAX_SEGMENTS
73 #error LNET_MAX_IOV is greater then MX_MAX_SEGMENTS
76 /* Using MX's 64 match bits
77 * We are using the match bits to specify message type and the cookie. The
78 * highest four bits (60-63) are reserved for message type. Below we specify
79 * the types. MXLND_MASK_ICON_REQ and MXLND_MASK_ICON_ACK are used for
80 * mx_iconnect(). We reserve the remaining combinations for future use. The
81 * next 8 bits (52-59) are reserved for returning a status code for failed
82 * GET_DATA (payload) messages. The last 52 bits are used for cookies. That
83 * should allow unique cookies for 4 KB messages at 10 Gbps line rate without
84 * rollover for about 8 years. That should be enough. */
87 #define MXLND_MASK_ICON_REQ (0xBLL << 60) /* it is a mx_iconnect() completion */
88 #define MXLND_MASK_CONN_REQ (0xCLL << 60) /* CONN_REQ msg */
89 #define MXLND_MASK_ICON_ACK (0x9LL << 60) /* it is a mx_iconnect() completion */
90 #define MXLND_MASK_CONN_ACK (0xALL << 60) /* CONN_ACK msg*/
91 #define MXLND_MASK_EAGER (0xELL << 60) /* EAGER msg */
92 #define MXLND_MASK_NOOP (0x1LL << 60) /* NOOP msg */
93 #define MXLND_MASK_PUT_REQ (0x2LL << 60) /* PUT_REQ msg */
94 #define MXLND_MASK_PUT_ACK (0x3LL << 60) /* PUT_ACK msg */
95 #define MXLND_MASK_PUT_DATA (0x4LL << 60) /* PUT_DATA msg */
96 #define MXLND_MASK_GET_REQ (0x5LL << 60) /* GET_REQ msg */
97 #define MXLND_MASK_GET_DATA (0x6LL << 60) /* GET_DATA msg */
98 //#define MXLND_MASK_NAK (0x7LL << 60) /* NAK msg */
100 #define MXLND_MAX_COOKIE ((1LL << 52) - 1) /* when to roll-over the cookie value */
101 #define MXLND_NCOMPLETIONS (MXLND_N_SCHED + 2) /* max threads for completion array */
103 /* defaults for configurable parameters */
104 #define MXLND_N_SCHED 1 /* # schedulers (mx_wait_any() threads) */
105 #define MXLND_MX_BOARD 0 /* Use the first MX NIC if more than 1 avail */
106 #define MXLND_MX_EP_ID 3 /* MX endpoint ID */
107 #define MXLND_COMM_TIMEOUT (20 * HZ) /* timeout for send/recv (jiffies) */
108 #define MXLND_WAIT_TIMEOUT HZ /* timeout for wait (jiffies) */
109 #define MXLND_POLLING 0 /* poll iterations before blocking */
110 #define MXLND_MAX_PEERS 1024 /* number of nodes talking to me */
111 #define MXLND_EAGER_NUM MXLND_MAX_PEERS /* number of pre-posted receives */
112 #define MXLND_EAGER_SIZE PAGE_SIZE /* pre-posted eager message size */
113 #define MXLND_MSG_QUEUE_DEPTH 8 /* msg queue depth */
114 #define MXLND_CREDIT_HIGHWATER (MXLND_MSG_QUEUE_DEPTH - 2)
115 /* when to send a noop to return credits */
116 #define MXLND_NTX 256 /* # of kmx_tx - total sends in flight
117 1/2 are reserved for connect messages */
119 #define MXLND_HASH_BITS 6 /* the number of bits to hash over */
120 #define MXLND_HASH_SIZE (1<<MXLND_HASH_BITS)
121 /* number of peer lists for lookup.
122 we hash over the last N bits of
123 the IP address converted to an int. */
124 #define MXLND_HASH_MASK (MXLND_HASH_SIZE - 1)
125 /* ensure we use only the last N bits */
127 /* debugging features */
128 #define MXLND_CKSUM 0 /* checksum kmx_msg_t */
129 #define MXLND_DEBUG 0 /* turn on printk()s */
131 extern inline void mxlnd_noop(char *s, ...);
133 #define MXLND_PRINT printk
135 #define MXLND_PRINT mxlnd_noop
138 /* provide wrappers around LIBCFS_ALLOC/FREE to keep MXLND specific
139 * memory usage stats that include pages */
141 #define MXLND_ALLOC(x, size) \
143 spin_lock(&kmxlnd_data.kmx_global_lock); \
144 kmxlnd_data.kmx_mem_used += size; \
145 spin_unlock(&kmxlnd_data.kmx_global_lock); \
146 LIBCFS_ALLOC(x, size); \
148 spin_lock(&kmxlnd_data.kmx_global_lock); \
149 kmxlnd_data.kmx_mem_used -= size; \
150 spin_unlock(&kmxlnd_data.kmx_global_lock); \
154 #define MXLND_FREE(x, size) \
156 spin_lock(&kmxlnd_data.kmx_global_lock); \
157 kmxlnd_data.kmx_mem_used -= size; \
158 spin_unlock(&kmxlnd_data.kmx_global_lock); \
159 LIBCFS_FREE(x, size); \
163 typedef struct kmx_tunables {
164 int *kmx_n_waitd; /* # completion threads */
165 int *kmx_max_peers; /* max # of potential peers */
166 int *kmx_cksum; /* checksum small msgs? */
167 int *kmx_ntx; /* total # of tx (1/2 for LNET 1/2 for CONN_REQ */
168 int *kmx_credits; /* concurrent sends to 1 peer */
169 int *kmx_board; /* MX board (NIC) number */
170 int *kmx_ep_id; /* MX endpoint number */
171 int *kmx_polling; /* if 0, block. if > 0, poll this many
172 iterations before blocking */
173 char **kmx_hosts; /* Location of hosts file, if used */
176 /* structure to hold IP-to-hostname resolution data */
178 struct kmx_peer *mxh_peer; /* pointer to matching peer */
179 u32 mxh_addr; /* IP address as int */
180 char *mxh_hostname; /* peer's hostname */
181 u32 mxh_board; /* peer's board rank */
182 u32 mxh_ep_id; /* peer's MX endpoint ID */
183 struct list_head mxh_list; /* position on kmx_hosts */
184 spinlock_t mxh_lock; /* lock */
187 /* global interface state */
188 typedef struct kmx_data
190 int kmx_init; /* initialization state */
191 int kmx_shutdown; /* shutting down? */
192 atomic_t kmx_nthreads; /* number of threads */
193 struct completion *kmx_completions; /* array of completion structs */
194 lnet_ni_t *kmx_ni; /* the LND instance */
195 u64 kmx_incarnation; /* my incarnation value - unused */
196 long kmx_mem_used; /* memory used */
197 struct kmx_host *kmx_localhost; /* pointer to my kmx_host info */
198 mx_endpoint_t kmx_endpt; /* the MX endpoint */
200 spinlock_t kmx_global_lock; /* global lock */
202 struct list_head kmx_conn_req; /* list of connection requests */
203 spinlock_t kmx_conn_lock; /* connection list lock */
204 struct semaphore kmx_conn_sem; /* semaphore for connection request list */
206 struct list_head kmx_hosts; /* host lookup info */
207 spinlock_t kmx_hosts_lock; /* hosts list lock */
209 struct list_head kmx_peers[MXLND_HASH_SIZE];
210 /* list of all known peers */
211 rwlock_t kmx_peers_lock; /* peer list rw lock */
212 atomic_t kmx_npeers; /* number of peers */
214 struct list_head kmx_txs; /* all tx descriptors */
215 struct list_head kmx_tx_idle; /* list of idle tx */
216 spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
217 s32 kmx_tx_used; /* txs in use */
218 u64 kmx_tx_next_cookie; /* unique id for tx */
219 struct list_head kmx_tx_queue; /* generic send queue */
220 spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
221 struct semaphore kmx_tx_queue_sem; /* semaphore for tx queue */
223 struct list_head kmx_rxs; /* all rx descriptors */
224 spinlock_t kmx_rxs_lock; /* lock for rxs list */
225 struct list_head kmx_rx_idle; /* list of idle tx */
226 spinlock_t kmx_rx_idle_lock; /* lock for idle rx list */
229 #define MXLND_INIT_NOTHING 0 /* in the beginning, there was nothing... */
230 #define MXLND_INIT_DATA 1 /* main data structures created */
231 #define MXLND_INIT_TXS 2 /* tx descriptors created */
232 #define MXLND_INIT_RXS 3 /* initial rx descriptors created */
233 #define MXLND_INIT_MX 4 /* initiate MX library, open endpoint, get NIC id */
234 #define MXLND_INIT_THREADS 5 /* waitd, timeoutd, tx_queued threads */
235 #define MXLND_INIT_ALL 6 /* startup completed */
237 #include "mxlnd_wire.h"
244 /* The life cycle of a request */
246 MXLND_CTX_INIT = 0, /* just created */
247 MXLND_CTX_IDLE = 1, /* available for use */
248 MXLND_CTX_PREP = 2, /* getting ready for send/recv */
249 MXLND_CTX_PENDING = 3, /* mx_isend() or mx_irecv() called */
250 MXLND_CTX_COMPLETED = 4, /* cleaning up after completion or timeout */
251 MXLND_CTX_CANCELED = 5, /* timed out but still in ctx list */
254 /* Context Structure - generic tx/rx descriptor
255 * It represents the context (or state) of each send or receive request.
256 * In other LNDs, they have separate TX and RX descriptors and this replaces both.
258 * We will keep the these on the global kmx_rxs and kmx_txs lists for cleanup
259 * during shutdown(). We will move them between the rx/tx idle lists and the
260 * pending list which is monitored by mxlnd_timeoutd().
263 enum kmx_req_type mxc_type; /* TX or RX */
264 u64 mxc_incarnation; /* store the peer's incarnation here
265 to verify before changing flow
266 control credits after completion */
267 unsigned long mxc_deadline; /* request time out in absolute jiffies */
268 enum kmx_req_state mxc_state; /* what is the state of the request? */
269 struct list_head mxc_global_list; /* place on kmx_rxs or kmx_txs */
270 struct list_head mxc_list; /* place on rx/tx idle list, tx q, peer tx */
271 struct list_head mxc_rx_list; /* place on mxp_rx_posted list */
272 spinlock_t mxc_lock; /* lock */
274 lnet_nid_t mxc_nid; /* dst's NID if peer is not known */
275 struct kmx_peer *mxc_peer; /* owning peer */
276 struct kmx_conn *mxc_conn; /* owning conn */
277 struct kmx_msg *mxc_msg; /* msg hdr mapped to mxc_page */
278 struct page *mxc_page; /* buffer for eager msgs */
279 lnet_msg_t *mxc_lntmsg[2]; /* lnet msgs to finalize */
281 u8 mxc_msg_type; /* what type of message is this? */
282 u64 mxc_cookie; /* completion cookie */
283 u64 mxc_match; /* MX match info */
284 mx_ksegment_t mxc_seg; /* local MX ksegment for non-DATA */
285 mx_ksegment_t *mxc_seg_list; /* MX ksegment array for DATA */
286 int mxc_nseg; /* number of segments */
287 unsigned long mxc_pin_type; /* MX_PIN_KERNEL or MX_PIN_PHYSICAL */
288 u32 mxc_nob; /* number of bytes sent/received */
289 mx_request_t mxc_mxreq; /* MX request */
290 mx_status_t mxc_status; /* MX status */
291 s64 mxc_get; /* # of times returned from idle list */
292 s64 mxc_put; /* # of times returned from idle list */
295 #define MXLND_CONN_DISCONNECT -2 /* conn is being destroyed - do not add txs */
296 #define MXLND_CONN_FAIL -1 /* connect failed (bad handshake, unavail, etc.) */
297 #define MXLND_CONN_INIT 0 /* in the beginning, there was nothing... */
298 #define MXLND_CONN_REQ 1 /* a connection request message is needed */
299 #define MXLND_CONN_ACK 2 /* a connection ack is needed */
300 #define MXLND_CONN_WAIT 3 /* waiting for req or ack to complete */
301 #define MXLND_CONN_READY 4 /* ready to send */
303 /* connection state - queues for queued and pending msgs */
306 u64 mxk_incarnation; /* connections's incarnation value */
307 atomic_t mxk_refcount; /* reference counting */
309 struct kmx_peer *mxk_peer; /* owning peer */
310 mx_endpoint_addr_t mxk_epa; /* peer's endpoint address */
312 struct list_head mxk_list; /* for placing on mxp_conns */
313 spinlock_t mxk_lock; /* lock */
314 unsigned long mxk_timeout; /* expiration of oldest pending tx/rx */
315 unsigned long mxk_last_tx; /* when last tx completed with success */
316 unsigned long mxk_last_rx; /* when last rx completed */
318 int mxk_credits; /* # of my credits for sending to peer */
319 int mxk_outstanding; /* # of credits to return */
321 int mxk_status; /* can we send messages? MXLND_CONN_* */
322 struct list_head mxk_tx_credit_queue; /* send queue for peer */
323 struct list_head mxk_tx_free_queue; /* send queue for peer */
324 int mxk_ntx_msgs; /* # of msgs on tx queues */
325 int mxk_ntx_data ; /* # of DATA on tx queues */
326 int mxk_ntx_posted; /* # of tx msgs in flight */
327 int mxk_data_posted; /* # of tx data payloads in flight */
329 struct list_head mxk_pending; /* in flight rxs and txs */
335 lnet_nid_t mxp_nid; /* peer's LNET NID */
336 u64 mxp_incarnation; /* peer's incarnation value */
337 atomic_t mxp_refcount; /* reference counts */
339 struct kmx_host *mxp_host; /* peer lookup info */
340 u64 mxp_nic_id; /* remote's MX nic_id for mx_connect() */
342 struct list_head mxp_peers; /* for placing on kmx_peers */
343 spinlock_t mxp_lock; /* lock */
345 struct list_head mxp_conns; /* list of connections */
346 struct kmx_conn *mxp_conn; /* current connection */
348 unsigned long mxp_reconnect_time; /* when to retry connect */
349 int mxp_incompatible; /* incorrect conn_req values */
352 extern kmx_data_t kmxlnd_data;
353 extern kmx_tunables_t kmxlnd_tunables;
355 /* required for the LNET API */
356 int mxlnd_startup(lnet_ni_t *ni);
357 void mxlnd_shutdown(lnet_ni_t *ni);
358 int mxlnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
359 int mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
360 int mxlnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
361 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
362 unsigned int offset, unsigned int mlen, unsigned int rlen);
365 extern void mxlnd_thread_stop(long id);
366 extern int mxlnd_ctx_alloc(struct kmx_ctx **ctxp, enum kmx_req_type type);
367 extern void mxlnd_ctx_free(struct kmx_ctx *ctx);
368 extern void mxlnd_ctx_init(struct kmx_ctx *ctx);
369 extern lnet_nid_t mxlnd_nic_id2nid(lnet_ni_t *ni, u64 nic_id);
370 extern u64 mxlnd_nid2nic_id(lnet_nid_t nid);
373 void mxlnd_eager_recv(void *context, uint64_t match_value, uint32_t length);
374 extern mx_unexp_handler_action_t mxlnd_unexpected_recv(void *context,
375 mx_endpoint_addr_t source, uint64_t match_value, uint32_t length,
376 void *data_if_available);
377 extern void mxlnd_peer_free(struct kmx_peer *peer);
378 extern void mxlnd_conn_free(struct kmx_conn *conn);
379 extern void mxlnd_sleep(unsigned long timeout);
380 extern int mxlnd_tx_queued(void *arg);
381 extern void mxlnd_handle_rx_completion(struct kmx_ctx *rx);
382 extern int mxlnd_check_sends(struct kmx_peer *peer);
383 extern int mxlnd_tx_peer_queued(void *arg);
384 extern int mxlnd_request_waitd(void *arg);
385 extern int mxlnd_unex_recvd(void *arg);
386 extern int mxlnd_timeoutd(void *arg);
387 extern int mxlnd_connd(void *arg);
389 #define mxlnd_peer_addref(peer) \
391 LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
392 atomic_inc(&(peer)->mxp_refcount); \
396 #define mxlnd_peer_decref(peer) \
398 LASSERT(atomic_read(&(peer)->mxp_refcount) > 0); \
399 if (atomic_dec_and_test(&(peer)->mxp_refcount)) \
400 mxlnd_peer_free(peer); \
403 #define mxlnd_conn_addref(conn) \
405 LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
406 atomic_inc(&(conn)->mxk_refcount); \
410 #define mxlnd_conn_decref(conn) \
412 LASSERT(atomic_read(&(conn)->mxk_refcount) > 0); \
413 if (atomic_dec_and_test(&(conn)->mxk_refcount)) \
414 mxlnd_conn_free(conn); \