1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Basic library routines.
28 # define EXPORT_SYMTAB
31 #include <qsnet/kernel.h>
32 #undef printf /* nasty QSW #define */
34 #include <linux/config.h>
35 #include <linux/module.h>
37 #include <elan/epcomms.h>
39 #include <linux/kernel.h>
41 #include <linux/string.h>
42 #include <linux/stat.h>
43 #include <linux/errno.h>
44 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
45 #include <linux/locks.h> /* wait_on_buffer */
47 #include <linux/buffer_head.h> /* wait_on_buffer */
49 #include <linux/unistd.h>
51 #include <linux/uio.h>
53 #include <asm/system.h>
54 #include <asm/uaccess.h>
57 #include <linux/file.h>
58 #include <linux/stat.h>
59 #include <linux/list.h>
60 #include <linux/sysctl.h>
61 #include <asm/segment.h>
63 #define DEBUG_SUBSYSTEM S_LND
65 #include <libcfs/kp30.h>
66 #include <lnet/lnet.h>
67 #include <lnet/lib-lnet.h>
70 #define KQSW_SMALLMSG (4<<10) /* small/large ep receiver breakpoint */
71 #define KQSW_RESCHED 100 /* # busy loops that forces scheduler to yield */
73 #define KQSW_CKSUM 0 /* enable checksumming (protocol incompatible) */
79 #define KQSW_TX_BUFFER_SIZE (offsetof(kqswnal_msg_t, \
80 kqm_u.immediate.kqim_payload[*kqswnal_tunables.kqn_tx_maxcontig]))
81 /* The pre-allocated tx buffer (hdr + small payload) */
83 #define KQSW_NTXMSGPAGES (btopr(KQSW_TX_BUFFER_SIZE) + 1 + btopr(LNET_MAX_PAYLOAD) + 1)
84 /* Reserve elan address space for pre-allocated and pre-mapped transmit
85 * buffer and a full payload too. Extra pages allow for page alignment */
87 #define KQSW_NRXMSGPAGES_SMALL (btopr(KQSW_SMALLMSG))
88 /* receive hdr/payload always contiguous and page aligned */
89 #define KQSW_NRXMSGBYTES_SMALL (KQSW_NRXMSGPAGES_SMALL * PAGE_SIZE)
91 #define KQSW_NRXMSGPAGES_LARGE (btopr(sizeof(lnet_msg_t) + LNET_MAX_PAYLOAD))
92 /* receive hdr/payload always contiguous and page aligned */
93 #define KQSW_NRXMSGBYTES_LARGE (KQSW_NRXMSGPAGES_LARGE * PAGE_SIZE)
94 /* biggest complete packet we can receive (or transmit) */
97 /* Remote memory descriptor */
100 __u32 kqrmd_nfrag; /* # frags */
101 EP_NMD kqrmd_frag[0]; /* actual frags */
102 } kqswnal_remotemd_t;
107 lnet_hdr_t kqim_hdr; /* LNET header */
108 char kqim_payload[0]; /* piggy-backed payload */
109 } WIRE_ATTR kqswnal_immediate_msg_t;
114 lnet_hdr_t kqrm_hdr; /* LNET header */
115 kqswnal_remotemd_t kqrm_rmd; /* peer's buffer */
116 } WIRE_ATTR kqswnal_rdma_msg_t;
120 __u32 kqm_magic; /* I'm a qswlnd message */
121 __u16 kqm_version; /* this is my version number */
122 __u16 kqm_type; /* msg type */
124 __u32 kqm_cksum; /* crc32 checksum */
125 __u32 kqm_nob; /* original msg length */
128 kqswnal_immediate_msg_t immediate;
129 kqswnal_rdma_msg_t rdma;
131 } WIRE_ATTR kqswnal_msg_t;
133 #if KQSW_CKSUM /* enable checksums ? */
134 # include <linux/crc32.h>
135 static inline __u32 kqswnal_csum(__u32 crc, unsigned char const *p, size_t len)
138 return crc32_le(crc, p, len);
141 crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
145 # define QSWLND_PROTO_VERSION 0xbeef
147 # define QSWLND_PROTO_VERSION 1
150 #define QSWLND_MSG_IMMEDIATE 0
151 #define QSWLND_MSG_RDMA 1
154 EP_STATUSBLK ep_statusblk;
166 } kqswnal_rpc_reply_t;
168 typedef struct kqswnal_rx
170 struct list_head krx_list; /* enqueue -> thread */
171 struct kqswnal_rx *krx_alloclist; /* stack in kqn_rxds */
172 EP_RCVR *krx_eprx; /* port to post receives to */
173 EP_RXD *krx_rxd; /* receive descriptor (for repost) */
174 EP_NMD krx_elanbuffer; /* contiguous Elan buffer */
175 int krx_npages; /* # pages in receive buffer */
176 int krx_nob; /* Number Of Bytes received into buffer */
177 int krx_rpc_reply_needed:1; /* peer waiting for EKC RPC reply */
178 int krx_raw_lnet_hdr:1; /* msg is a raw lnet hdr (portals compatible) */
179 int krx_state; /* what this RX is doing */
180 atomic_t krx_refcount; /* how to tell when rpc is done */
182 __u32 krx_cksum; /* checksum */
184 kqswnal_rpc_reply_t krx_rpc_reply; /* rpc reply status block */
185 lnet_kiov_t krx_kiov[KQSW_NRXMSGPAGES_LARGE]; /* buffer frags */
188 #define KRX_POSTED 1 /* receiving */
189 #define KRX_PARSE 2 /* ready to be parsed */
190 #define KRX_COMPLETING 3 /* waiting to be completed */
193 typedef struct kqswnal_tx
195 struct list_head ktx_list; /* enqueue idle/active */
196 struct list_head ktx_schedlist; /* enqueue on scheduler */
197 struct kqswnal_tx *ktx_alloclist; /* stack in kqn_txds */
198 unsigned int ktx_state:7; /* What I'm doing */
199 unsigned int ktx_firsttmpfrag:1; /* ktx_frags[0] is in my ebuffer ? 0 : 1 */
200 uint32_t ktx_basepage; /* page offset in reserved elan tx vaddrs for mapping pages */
201 int ktx_npages; /* pages reserved for mapping messages */
202 int ktx_nmappedpages; /* # pages mapped for current message */
203 int ktx_port; /* destination ep port */
204 lnet_nid_t ktx_nid; /* destination node */
205 void *ktx_args[3]; /* completion passthru */
206 char *ktx_buffer; /* pre-allocated contiguous buffer for hdr + small payloads */
207 unsigned long ktx_launchtime; /* when (in jiffies) the transmit was launched */
208 int ktx_status; /* completion status */
210 __u32 ktx_cksum; /* optimized GET payload checksum */
212 /* debug/info fields */
213 pid_t ktx_launcher; /* pid of launching process */
215 int ktx_nfrag; /* # message frags */
216 int ktx_rail; /* preferred rail */
217 EP_NMD ktx_ebuffer; /* elan mapping of ktx_buffer */
218 EP_NMD ktx_frags[EP_MAXFRAG];/* elan mapping of msg frags */
221 #define KTX_IDLE 0 /* on kqn_idletxds */
222 #define KTX_SENDING 1 /* normal send */
223 #define KTX_GETTING 2 /* sending optimised get */
224 #define KTX_PUTTING 3 /* sending optimised put */
225 #define KTX_RDMA_FETCH 4 /* handling optimised put */
226 #define KTX_RDMA_STORE 5 /* handling optimised get */
230 int *kqn_tx_maxcontig; /* maximum payload to defrag */
231 int *kqn_ntxmsgs; /* # normal tx msgs */
232 int *kqn_credits; /* # concurrent sends */
233 int *kqn_peercredits; /* # concurrent sends to 1 peer */
234 int *kqn_nrxmsgs_large; /* # 'large' rx msgs */
235 int *kqn_ep_envelopes_large; /* # 'large' rx ep envelopes */
236 int *kqn_nrxmsgs_small; /* # 'small' rx msgs */
237 int *kqn_ep_envelopes_small; /* # 'small' rx ep envelopes */
238 int *kqn_optimized_puts; /* optimized PUTs? */
239 int *kqn_optimized_gets; /* optimized GETs? */
241 int *kqn_inject_csum_error; /* # csum errors to inject */
244 #if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
245 struct ctl_table_header *kqn_sysctl; /* sysctl interface */
247 } kqswnal_tunables_t;
251 char kqn_init; /* what's been initialised */
252 char kqn_shuttingdown; /* I'm trying to shut down */
253 atomic_t kqn_nthreads; /* # threads running */
254 lnet_ni_t *kqn_ni; /* _the_ instance of me */
256 kqswnal_rx_t *kqn_rxds; /* stack of all the receive descriptors */
257 kqswnal_tx_t *kqn_txds; /* stack of all the transmit descriptors */
259 struct list_head kqn_idletxds; /* transmit descriptors free to use */
260 struct list_head kqn_activetxds; /* transmit descriptors being used */
261 spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
262 atomic_t kqn_pending_txs; /* # transmits being prepped */
264 spinlock_t kqn_sched_lock; /* serialise packet schedulers */
265 wait_queue_head_t kqn_sched_waitq; /* scheduler blocks here */
267 struct list_head kqn_readyrxds; /* rxds full of data */
268 struct list_head kqn_donetxds; /* completed transmits */
269 struct list_head kqn_delayedtxds; /* delayed transmits */
271 EP_SYS *kqn_ep; /* elan system */
272 EP_NMH *kqn_ep_tx_nmh; /* elan reserved tx vaddrs */
273 EP_NMH *kqn_ep_rx_nmh; /* elan reserved rx vaddrs */
274 EP_XMTR *kqn_eptx; /* elan transmitter */
275 EP_RCVR *kqn_eprx_small; /* elan receiver (small messages) */
276 EP_RCVR *kqn_eprx_large; /* elan receiver (large messages) */
278 int kqn_nnodes; /* this cluster's size */
279 int kqn_elanid; /* this nodes's elan ID */
281 EP_STATUSBLK kqn_rpc_success; /* preset RPC reply status blocks */
282 EP_STATUSBLK kqn_rpc_failed;
283 EP_STATUSBLK kqn_rpc_version; /* reply to future version query */
284 EP_STATUSBLK kqn_rpc_magic; /* reply to future version query */
288 #define KQN_INIT_NOTHING 0 /* MUST BE ZERO so zeroed state is initialised OK */
289 #define KQN_INIT_DATA 1
290 #define KQN_INIT_ALL 2
292 extern kqswnal_tunables_t kqswnal_tunables;
293 extern kqswnal_data_t kqswnal_data;
295 extern int kqswnal_thread_start (int (*fn)(void *arg), void *arg);
296 extern void kqswnal_rxhandler(EP_RXD *rxd);
297 extern int kqswnal_scheduler (void *);
298 extern void kqswnal_rx_done (kqswnal_rx_t *krx);
300 static inline lnet_nid_t
301 kqswnal_elanid2nid (int elanid)
303 return LNET_MKNID(LNET_NIDNET(kqswnal_data.kqn_ni->ni_nid), elanid);
307 kqswnal_nid2elanid (lnet_nid_t nid)
309 __u32 elanid = LNET_NIDADDR(nid);
311 /* not in this cluster? */
312 return (elanid >= kqswnal_data.kqn_nnodes) ? -1 : elanid;
315 static inline lnet_nid_t
316 kqswnal_rx_nid(kqswnal_rx_t *krx)
318 return (kqswnal_elanid2nid(ep_rxd_node(krx->krx_rxd)));
322 kqswnal_pages_spanned (void *base, int nob)
324 unsigned long first_page = ((unsigned long)base) >> PAGE_SHIFT;
325 unsigned long last_page = (((unsigned long)base) + (nob - 1)) >> PAGE_SHIFT;
327 LASSERT (last_page >= first_page); /* can't wrap address space */
328 return (last_page - first_page + 1);
331 static inline void kqswnal_rx_decref (kqswnal_rx_t *krx)
333 LASSERT (atomic_read (&krx->krx_refcount) > 0);
334 if (atomic_dec_and_test (&krx->krx_refcount))
335 kqswnal_rx_done(krx);
338 int kqswnal_startup (lnet_ni_t *ni);
339 void kqswnal_shutdown (lnet_ni_t *ni);
340 int kqswnal_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
341 int kqswnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
342 int kqswnal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
343 int delayed, unsigned int niov,
344 struct iovec *iov, lnet_kiov_t *kiov,
345 unsigned int offset, unsigned int mlen, unsigned int rlen);
347 int kqswnal_tunables_init(void);
348 void kqswnal_tunables_fini(void);
350 #endif /* _QSWNAL_H */