1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@zabbo.net>
6 * Author: Peter J. Braam <braam@clusterfs.com>
7 * Author: Phil Schwan <phil@clusterfs.com>
8 * Author: Eric Barton <eric@bartonsoftware.com>
10 * This file is part of Portals, http://www.sf.net/projects/lustre/
12 * Portals is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Portals is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Portals; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #define DEBUG_PORTAL_ALLOC
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/stat.h>
36 #include <linux/errno.h>
37 #include <linux/smp_lock.h>
38 #include <linux/unistd.h>
41 #include <linux/uio.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
47 #include <linux/file.h>
48 #include <linux/stat.h>
49 #include <linux/list.h>
50 #include <linux/kmod.h>
51 #include <asm/uaccess.h>
52 #include <asm/segment.h>
53 #include <asm/div64.h>
55 #define DEBUG_SUBSYSTEM S_SOCKNAL
57 #include <linux/kp30.h>
58 #include <portals/p30.h>
59 #include <portals/lib-p30.h>
62 # define SOCKNAL_N_SCHED smp_num_cpus /* # socknal schedulers */
64 # define SOCKNAL_N_SCHED 1 /* # socknal schedulers */
66 #define SOCKNAL_N_AUTOCONNECTD 4 /* # socknal autoconnect daemons */
68 #define SOCKNAL_MIN_RECONNECT_INTERVAL HZ /* first failed connection retry... */
69 #define SOCKNAL_MAX_RECONNECT_INTERVAL (60*HZ) /* ...exponentially increasing to this */
71 #define SOCKNAL_IO_TIMEOUT (60*HZ) /* default comms timeout */
73 #define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
76 # define SOCKNAL_MAX_FWD_PAYLOAD (256<<10) /* biggest payload I can forward */
78 # define SOCKNAL_MAX_FWD_PAYLOAD (64<<10) /* biggest payload I can forward */
81 #define SOCKNAL_ZC_MIN_FRAG (2<<10) /* default smallest zerocopy fragment */
83 #define SOCKNAL_NLTXS 128 /* # normal transmit messages */
84 #define SOCKNAL_NNBLK_LTXS 128 /* # transmit messages reserved if can't block */
86 #define SOCKNAL_SMALL_FWD_NMSGS 128 /* # small messages I can be forwarding at any time */
87 #define SOCKNAL_LARGE_FWD_NMSGS 64 /* # large messages I can be forwarding at any time */
89 #define SOCKNAL_SMALL_FWD_PAGES 1 /* # pages in a small message fwd buffer */
91 #define SOCKNAL_LARGE_FWD_PAGES (PAGE_ALIGN (sizeof (ptl_hdr_t) + SOCKNAL_MAX_FWD_PAYLOAD) >> PAGE_SHIFT)
92 /* # pages in a large message fwd buffer */
94 #define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
96 #define SOCKNAL_TX_LOW_WATER(sk) (((sk)->sk_sndbuf*8)/10)
98 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
99 # define jiffies_64 jiffies
102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,72))
103 # define sk_data_ready data_ready
104 # define sk_write_space write_space
105 # define sk_user_data user_data
106 # define sk_prot prot
107 # define sk_sndbuf sndbuf
108 # define sk_socket socket
111 typedef struct /* pool of forwarding buffers */
113 spinlock_t fmp_lock; /* serialise */
114 struct list_head fmp_idle_fmbs; /* buffers waiting for a connection */
115 struct list_head fmp_blocked_conns; /* connections waiting for a buffer */
119 typedef struct /* per scheduler state */
121 spinlock_t kss_lock; /* serialise */
122 struct list_head kss_rx_conns; /* conn waiting to be read */
123 struct list_head kss_tx_conns; /* conn waiting to be written */
125 struct list_head kss_zctxdone_list; /* completed ZC transmits */
127 wait_queue_head_t kss_waitq; /* where scheduler sleeps */
128 int kss_nconns; /* # connections assigned to this scheduler */
132 int ksni_valid:1; /* been set yet? */
133 int ksni_bound:1; /* bound to a cpu yet? */
134 int ksni_sched:6; /* which scheduler (assumes < 64) */
138 int ksnd_init; /* initialisation state */
140 rwlock_t ksnd_global_lock; /* stabilize peer/conn ops */
141 struct list_head *ksnd_peers; /* hash table of all my known peers */
142 int ksnd_peer_hash_size; /* size of ksnd_peers */
144 nal_cb_t *ksnd_nal_cb;
145 spinlock_t ksnd_nal_cb_lock; /* lib cli/sti lock */
147 atomic_t ksnd_nthreads; /* # live threads */
148 int ksnd_shuttingdown; /* tell threads to exit */
149 ksock_sched_t *ksnd_schedulers; /* scheduler state */
151 atomic_t ksnd_npeers; /* total # peers extant */
152 atomic_t ksnd_nclosing_conns; /* # closed conns extant */
154 kpr_router_t ksnd_router; /* THE router */
156 void *ksnd_fmbs; /* all the pre-allocated FMBs */
157 ksock_fmb_pool_t ksnd_small_fmp; /* small message forwarding buffers */
158 ksock_fmb_pool_t ksnd_large_fmp; /* large message forwarding buffers */
160 void *ksnd_ltxs; /* all the pre-allocated LTXs */
161 spinlock_t ksnd_idle_ltx_lock; /* serialise ltx alloc/free */
162 struct list_head ksnd_idle_ltx_list; /* where to get an idle LTX */
163 struct list_head ksnd_idle_nblk_ltx_list; /* where to get an idle LTX if you can't block */
164 wait_queue_head_t ksnd_idle_ltx_waitq; /* where to block for an idle LTX */
165 int ksnd_active_ltxs; /* #active ltxs */
167 struct list_head ksnd_deathrow_conns; /* conns to be closed */
168 struct list_head ksnd_zombie_conns; /* conns to be freed */
169 wait_queue_head_t ksnd_reaper_waitq; /* reaper sleep here */
170 spinlock_t ksnd_reaper_lock; /* serialise */
172 int ksnd_stall_tx; /* test sluggish sender */
173 int ksnd_stall_rx; /* test sluggish receiver */
175 struct list_head ksnd_autoconnectd_routes; /* routes waiting to be connected */
176 wait_queue_head_t ksnd_autoconnectd_waitq; /* autoconnectds sleep here */
177 spinlock_t ksnd_autoconnectd_lock; /* serialise */
179 ksock_irqinfo_t ksnd_irqinfo[NR_IRQS];/* irq->scheduler lookup */
182 #define SOCKNAL_INIT_NOTHING 0
183 #define SOCKNAL_INIT_DATA 1
184 #define SOCKNAL_INIT_PTL 2
185 #define SOCKNAL_INIT_ALL 3
187 /* A packet just assembled for transmission is represented by 1 or more
188 * struct iovec fragments and 0 or more ptl_kiov_t fragments. Forwarded
189 * messages, or messages from an MD with PTL_MD_KIOV _not_ set have 0
190 * ptl_kiov_t fragments. Messages from an MD with PTL_MD_KIOV set, have 1
191 * struct iovec fragment (the header) and up to PTL_MD_MAX_IOV ptl_kiov_t
194 * On the receive side, initially 1 struct iovec fragment is posted for
195 * receive (the header). Once the header has been received, if the message
196 * requires forwarding or will be received into mapped memory, up to
197 * PTL_MD_MAX_IOV struct iovec fragments describe the target memory.
198 * Otherwise up to PTL_MD_MAX_IOV ptl_kiov_t fragments are used.
201 struct ksock_conn; /* forward ref */
202 struct ksock_peer; /* forward ref */
203 struct ksock_route; /* forward ref */
205 typedef struct /* transmit packet */
207 struct list_head tx_list; /* queue on conn for transmission etc */
208 __u64 tx_deadline; /* when (in jiffies) tx times out */
209 char tx_isfwd; /* forwarding / sourced here */
210 int tx_nob; /* # packet bytes */
211 int tx_resid; /* residual bytes */
212 int tx_niov; /* # packet iovec frags */
213 struct iovec *tx_iov; /* packet iovec frags */
214 int tx_nkiov; /* # packet page frags */
215 ptl_kiov_t *tx_kiov; /* packet page frags */
216 struct ksock_conn *tx_conn; /* owning conn */
217 ptl_hdr_t *tx_hdr; /* packet header (for debug only) */
219 zccd_t tx_zccd; /* zero copy callback descriptor */
223 #define KSOCK_ZCCD_2_TX(ptr) list_entry (ptr, ksock_tx_t, tx_zccd)
224 /* network zero copy callback descriptor embedded in ksock_tx_t */
226 /* space for the tx frag descriptors: hdr is always 1 iovec
227 * and payload is PTL_MD_MAX of either type. */
232 struct iovec iov[PTL_MD_MAX_IOV];
233 ptl_kiov_t kiov[PTL_MD_MAX_IOV];
235 } ksock_txiovspace_t;
237 typedef struct /* locally transmitted packet */
239 ksock_tx_t ltx_tx; /* send info */
240 struct list_head *ltx_idle; /* where to put when idle */
241 void *ltx_private; /* lib_finalize() callback arg */
242 void *ltx_cookie; /* lib_finalize() callback arg */
243 ksock_txiovspace_t ltx_iov_space; /* where to stash frag descriptors */
244 ptl_hdr_t ltx_hdr; /* buffer for packet header */
247 #define KSOCK_TX_2_KPR_FWD_DESC(ptr) list_entry ((kprfd_scratch_t *)ptr, kpr_fwd_desc_t, kprfd_scratch)
248 /* forwarded packets (router->socknal) embedded in kpr_fwd_desc_t::kprfd_scratch */
250 #define KSOCK_TX_2_KSOCK_LTX(ptr) list_entry (ptr, ksock_ltx_t, ltx_tx)
251 /* local packets (lib->socknal) embedded in ksock_ltx_t::ltx_tx */
253 /* NB list_entry() is used here as convenient macro for calculating a
254 * pointer to a struct from the address of a member. */
256 typedef struct /* Kernel portals Socket Forwarding message buffer */
257 { /* (socknal->router) */
258 struct list_head fmb_list; /* queue idle */
259 kpr_fwd_desc_t fmb_fwd; /* router's descriptor */
260 int fmb_npages; /* # pages allocated */
261 ksock_fmb_pool_t *fmb_pool; /* owning pool */
262 struct ksock_peer *fmb_peer; /* peer received from */
263 struct page *fmb_pages[SOCKNAL_LARGE_FWD_PAGES];
264 struct iovec fmb_iov[SOCKNAL_LARGE_FWD_PAGES];
267 /* space for the rx frag descriptors; we either read a single contiguous
268 * header, or PTL_MD_MAX_IOV frags of payload of either type. */
270 struct iovec iov[PTL_MD_MAX_IOV];
271 ptl_kiov_t kiov[PTL_MD_MAX_IOV];
272 } ksock_rxiovspace_t;
274 #define SOCKNAL_RX_HEADER 1 /* reading header */
275 #define SOCKNAL_RX_BODY 2 /* reading body (to deliver here) */
276 #define SOCKNAL_RX_BODY_FWD 3 /* reading body (to forward) */
277 #define SOCKNAL_RX_SLOP 4 /* skipping body */
278 #define SOCKNAL_RX_GET_FMB 5 /* scheduled for forwarding */
279 #define SOCKNAL_RX_FMB_SLEEP 6 /* blocked waiting for a fwd desc */
281 typedef struct ksock_conn
283 struct ksock_peer *ksnc_peer; /* owning peer */
284 struct ksock_route *ksnc_route; /* owning route */
285 struct list_head ksnc_list; /* stash on peer's conn list */
286 struct socket *ksnc_sock; /* actual socket */
287 void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
288 void *ksnc_saved_write_space; /* socket's original write_space() callback */
289 atomic_t ksnc_refcount; /* # users */
290 ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
291 __u32 ksnc_ipaddr; /* peer's IP */
292 int ksnc_port; /* peer's port */
293 int ksnc_closing; /* being shut down */
296 struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
297 __u64 ksnc_rx_deadline; /* when receive times out */
298 int ksnc_rx_ready; /* data ready to read */
299 int ksnc_rx_scheduled; /* being progressed */
300 int ksnc_rx_state; /* what is being read */
301 int ksnc_rx_nob_left; /* # bytes to next hdr/body */
302 int ksnc_rx_nob_wanted; /* bytes actually wanted */
303 int ksnc_rx_niov; /* # iovec frags */
304 struct iovec *ksnc_rx_iov; /* the iovec frags */
305 int ksnc_rx_nkiov; /* # page frags */
306 ptl_kiov_t *ksnc_rx_kiov; /* the page frags */
307 ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
308 void *ksnc_cookie; /* rx lib_finalize passthru arg */
309 ptl_hdr_t ksnc_hdr; /* where I read headers into */
312 struct list_head ksnc_tx_list; /* where I enq waiting for output space */
313 struct list_head ksnc_tx_queue; /* packets waiting to be sent */
315 struct list_head ksnc_tx_pending; /* zc packets pending callback */
317 atomic_t ksnc_tx_nob; /* # bytes queued */
318 int ksnc_tx_ready; /* write space */
319 int ksnc_tx_scheduled; /* being progressed */
322 typedef struct ksock_route
324 struct list_head ksnr_list; /* chain on peer route list */
325 struct list_head ksnr_connect_list; /* chain on autoconnect list */
326 struct ksock_peer *ksnr_peer; /* owning peer */
327 atomic_t ksnr_refcount; /* # users */
328 int ksnr_sharecount; /* lconf usage counter */
329 __u64 ksnr_timeout; /* when reconnection can happen next */
330 unsigned int ksnr_retry_interval; /* how long between retries */
331 __u32 ksnr_ipaddr; /* an IP address for this peer */
332 int ksnr_port; /* port to connect to */
333 int ksnr_buffer_size; /* size of socket buffers */
334 unsigned int ksnr_irq_affinity:1; /* set affinity? */
335 unsigned int ksnr_xchange_nids:1; /* do hello protocol? */
336 unsigned int ksnr_nonagel:1; /* disable nagle? */
337 unsigned int ksnr_connecting; /* autoconnect in progress? */
338 unsigned int ksnr_deleted; /* been removed from peer? */
339 int ksnr_generation; /* connection incarnation # */
340 ksock_conn_t *ksnr_conn; /* NULL/active connection */
343 typedef struct ksock_peer
345 struct list_head ksnp_list; /* stash on global peer list */
346 ptl_nid_t ksnp_nid; /* who's on the other end(s) */
347 atomic_t ksnp_refcount; /* # users */
348 int ksnp_closing; /* being closed */
349 struct list_head ksnp_conns; /* all active connections */
350 struct list_head ksnp_routes; /* routes */
351 struct list_head ksnp_tx_queue; /* waiting packets */
356 extern nal_cb_t ksocknal_lib;
357 extern ksock_nal_data_t ksocknal_data;
359 static inline struct list_head *
360 ksocknal_nid2peerlist (ptl_nid_t nid)
362 unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
364 return (&ksocknal_data.ksnd_peers [hash]);
368 ksocknal_getconnsock (ksock_conn_t *conn)
372 read_lock (&ksocknal_data.ksnd_global_lock);
373 if (!conn->ksnc_closing) {
375 get_file (conn->ksnc_sock->file);
377 read_unlock (&ksocknal_data.ksnd_global_lock);
383 ksocknal_putconnsock (ksock_conn_t *conn)
385 fput (conn->ksnc_sock->file);
388 extern void ksocknal_put_route (ksock_route_t *route);
389 extern void ksocknal_put_peer (ksock_peer_t *peer);
390 extern ksock_peer_t *ksocknal_find_peer_locked (ptl_nid_t nid);
391 extern ksock_peer_t *ksocknal_get_peer (ptl_nid_t nid);
392 extern int ksocknal_del_route (ptl_nid_t nid, __u32 ipaddr,
393 int single, int keep_conn);
394 extern int ksocknal_create_conn (ptl_nid_t nid, ksock_route_t *route,
395 struct socket *sock, int bind_irq);
396 extern void ksocknal_close_conn_locked (ksock_conn_t *conn);
397 extern int ksocknal_close_conn_unlocked (ksock_conn_t *conn);
398 extern void ksocknal_terminate_conn (ksock_conn_t *conn);
399 extern void ksocknal_destroy_conn (ksock_conn_t *conn);
400 extern void ksocknal_put_conn (ksock_conn_t *conn);
401 extern int ksocknal_close_conn (ptl_nid_t nid, __u32 ipaddr);
403 extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
404 extern void ksocknal_tx_done (ksock_tx_t *tx, int asynch);
405 extern void ksocknal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd);
406 extern void ksocknal_fmb_callback (void *arg, int error);
407 extern int ksocknal_thread_start (int (*fn)(void *arg), void *arg);
408 extern int ksocknal_new_packet (ksock_conn_t *conn, int skip);
409 extern int ksocknal_scheduler (void *arg);
410 extern void ksocknal_data_ready(struct sock *sk, int n);
411 extern void ksocknal_write_space(struct sock *sk);
412 extern int ksocknal_autoconnectd (void *arg);
413 extern int ksocknal_reaper (void *arg);
414 extern int ksocknal_set_linger (struct socket *sock);