Whamcloud - gitweb
Landing b_lock_replay so that Phil can use my ldlm iterators and whatnot for his
[fs/lustre-release.git] / lustre / include / linux / lustre_net.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #ifndef _LUSTRE_NET_H
24 #define _LUSTRE_NET_H
25
26 #include <linux/tqueue.h>
27 #include <linux/kp30.h>
28 // #include <linux/obd.h>
29 #include <portals/p30.h>
30 #include <linux/lustre_idl.h>
31 #include <linux/lustre_ha.h>
32 #include <linux/lustre_import.h>
33
34 /* The following constants determine how much memory is devoted to
35  * buffering in the lustre services.
36  *
37  * ?_NEVENTS            # event queue entries
38  *
39  * ?_NBUFS              # request buffers
40  * ?_BUFSIZE            # bytes in a single request buffer
41  * total memory = ?_NBUFS * ?_BUFSIZE
42  *
43  * ?_MAXREQSIZE         # maximum request service will receive
44  * larger messages will get dropped.
45  * request buffers are auto-unlinked when less than ?_MAXREQSIZE
46  * is left in them.
47  */
48
49 #define LDLM_NUM_THREADS        4
50 #define LDLM_NEVENTS    1024
51 #define LDLM_NBUFS      10
52 #define LDLM_BUFSIZE    (64 * 1024)
53 #define LDLM_MAXREQSIZE 1024
54
55 #define MDT_NUM_THREADS 8
56 #define MDS_NEVENTS     1024
57 #define MDS_NBUFS       10
58 #define MDS_BUFSIZE     (64 * 1024)
59 #define MDS_MAXREQSIZE  1024
60
61 #define OST_NUM_THREADS 6
62 #define OST_NEVENTS     min(num_physpages / 16, 32768UL)
63 #define OST_NBUFS       min(OST_NEVENTS / 128, 256UL)
64 #define OST_BUFSIZE     ((OST_NEVENTS > 4096UL ? 128 : 64) * 1024)
65 #define OST_MAXREQSIZE  (8 * 1024)
66
67 #define CONN_INVALID 1
68
69 struct ptlrpc_connection {
70         struct list_head        c_link;
71         struct lustre_peer      c_peer;
72         __u8                    c_local_uuid[37];  /* XXX do we need this? */
73         __u8                    c_remote_uuid[37];
74
75         __u32                   c_generation;  /* changes upon new connection */
76         __u32                   c_epoch;       /* changes when peer changes */
77         __u32                   c_bootcount;   /* peer's boot count */
78
79         spinlock_t              c_lock;        /* also protects req->rq_list */
80
81         atomic_t                c_refcount;
82         __u64                   c_token;
83         __u64                   c_remote_conn;
84         __u64                   c_remote_token;
85
86         struct list_head        c_delayed_head;/* delayed until post-recovery XXX imp? */
87         struct recovd_data      c_recovd_data;
88
89         struct list_head        c_imports;
90         struct list_head        c_exports;
91         struct list_head        c_sb_chain;
92         __u32                   c_flags; /* can we indicate INVALID elsewhere? */
93 };
94
95 struct ptlrpc_client {
96         __u32                     cli_request_portal;
97         __u32                     cli_reply_portal;
98
99         __u32                     cli_target_devno;
100
101         void                     *cli_data;
102         // struct semaphore          cli_rpc_sem; /* limits outstanding requests */
103
104         char                     *cli_name;
105 };
106
107 /* state flags of requests */
108 #define PTL_RPC_FL_INTR      (1 << 0)
109 #define PTL_RPC_FL_REPLIED   (1 << 1)  /* reply was received */
110 #define PTL_RPC_FL_SENT      (1 << 2)
111 #define PTL_BULK_FL_SENT     (1 << 3)
112 #define PTL_BULK_FL_RCVD     (1 << 4)
113 #define PTL_RPC_FL_ERR       (1 << 5)
114 #define PTL_RPC_FL_TIMEOUT   (1 << 6)
115 #define PTL_RPC_FL_RESEND    (1 << 7)
116 #define PTL_RPC_FL_RESTART   (1 << 8)  /* operation must be restarted */
117 #define PTL_RPC_FL_FINISHED  (1 << 9)
118 #define PTL_RPC_FL_RETAIN    (1 << 10) /* retain for replay after reply */
119 #define PTL_RPC_FL_REPLAY    (1 << 11) /* replay upon recovery */
120 #define PTL_RPC_FL_ALLOCREP  (1 << 12) /* reply buffer allocated */
121
122 struct ptlrpc_request {
123         int rq_type; /* one of PTL_RPC_MSG_* */
124         struct list_head rq_list;
125         struct obd_device *rq_obd;
126         int rq_status;
127         int rq_flags;
128         atomic_t rq_refcount;
129
130         int rq_request_portal; /* XXX FIXME bug 625069 */
131         int rq_reply_portal; /* XXX FIXME bug 625069 */
132
133         int rq_reqlen;
134         struct lustre_msg *rq_reqmsg;
135
136         int rq_replen;
137         struct lustre_msg *rq_repmsg;
138         __u64 rq_transno;
139         __u64 rq_xid;
140
141         int rq_level;
142         time_t rq_timeout;
143         //        void * rq_reply_handle;
144         wait_queue_head_t rq_wait_for_rep;
145
146         /* incoming reply */
147         ptl_md_t rq_reply_md;
148         ptl_handle_me_t rq_reply_me_h;
149
150         /* outgoing req/rep */
151         ptl_md_t rq_req_md;
152
153         struct lustre_peer rq_peer; /* XXX see service.c can this be factored away? */
154         struct obd_export *rq_export;
155         struct ptlrpc_connection *rq_connection;
156         struct obd_import *rq_import;
157         struct ptlrpc_service *rq_svc;
158
159         void (*rq_replay_cb)(struct ptlrpc_request *);
160 };
161
162 #define DEBUG_REQ(level, req, fmt, args...)                                    \
163 do {                                                                           \
164 CDEBUG(level,                                                                  \
165        "@@@ " fmt " req x"LPD64"/t"LPD64" o%d->%s:%d lens %d/%d ref %d fl "    \
166        "%x\n" ,  ## args, req->rq_xid, req->rq_transno,                        \
167        req->rq_reqmsg ? req->rq_reqmsg->opc : -1,                              \
168        req->rq_connection ? (char *)req->rq_connection->c_remote_uuid : "<?>", \
169        (req->rq_import && req->rq_import->imp_client) ?                        \
170            req->rq_import->imp_client->cli_request_portal : -1,                \
171        req->rq_reqlen, req->rq_replen, req->rq_refcount, req->rq_flags);       \
172 } while (0)
173
174 struct ptlrpc_bulk_page {
175         struct ptlrpc_bulk_desc *bp_desc;
176         struct list_head bp_link;
177         void *bp_buf;
178         int bp_buflen;
179         struct page *bp_page;
180         __u32 bp_xid;
181         __u32 bp_flags;
182         struct dentry *bp_dentry;
183         int (*bp_cb)(struct ptlrpc_bulk_page *);
184 };
185
186
187 struct ptlrpc_bulk_desc {
188         struct list_head bd_set_chain; /* entry in obd_brw_set */
189         struct obd_brw_set *bd_brw_set;
190         int bd_flags;
191         struct ptlrpc_connection *bd_connection;
192         struct ptlrpc_client *bd_client;
193         __u32 bd_portal;
194         struct lustre_handle bd_conn;
195         void (*bd_ptl_ev_hdlr)(struct ptlrpc_bulk_desc *);
196
197         wait_queue_head_t bd_waitq;
198         struct list_head bd_page_list;
199         __u32 bd_page_count;
200         atomic_t bd_refcount;
201         void *bd_desc_private;
202
203 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
204         struct work_struct bd_queue;
205 #else
206         struct tq_struct bd_queue;
207 #endif
208
209         ptl_md_t bd_md;
210         ptl_handle_md_t bd_md_h;
211         ptl_handle_me_t bd_me_h;
212
213         atomic_t bd_source_callback_count;
214
215         struct iovec bd_iov[16];    /* self-sized pre-allocated iov */
216 };
217
218 struct ptlrpc_thread {
219         struct list_head t_link;
220
221         __u32 t_flags;
222         wait_queue_head_t t_ctl_waitq;
223 };
224
225 struct ptlrpc_request_buffer_desc {
226         struct list_head       rqbd_list;
227         struct ptlrpc_service *rqbd_service;
228         ptl_handle_me_t        rqbd_me_h;
229         atomic_t               rqbd_refcount;
230         char                  *rqbd_buffer;
231 };
232
233 struct ptlrpc_service {
234         time_t srv_time;
235         time_t srv_timeout;
236
237         /* incoming request buffers */
238         /* FIXME: perhaps a list of EQs, if multiple NIs are used? */
239
240         __u32            srv_max_req_size;     /* biggest request to receive */
241         __u32            srv_buf_size;         /* # bytes in a request buffer */
242         struct list_head srv_rqbds;            /* all the request buffer descriptors */
243         __u32            srv_nrqbds;           /* # request buffers */
244         atomic_t         srv_nrqbds_receiving; /* # request buffers posted for input */
245
246         __u32 srv_req_portal;
247         __u32 srv_rep_portal;
248
249         __u32 srv_xid;
250
251         /* event queue */
252         ptl_handle_eq_t srv_eq_h;
253
254         struct lustre_peer srv_self;
255
256         wait_queue_head_t srv_waitq; /* all threads sleep on this */
257
258         spinlock_t srv_lock;
259         struct list_head srv_threads;
260         int (*srv_handler)(struct ptlrpc_request *req);
261         char *srv_name;  /* only statically allocated strings here; we don't clean them */
262 };
263
264 static inline void ptlrpc_hdl2req(struct ptlrpc_request *req,
265                                   struct lustre_handle *h)
266 {
267         req->rq_reqmsg->addr = h->addr;
268         req->rq_reqmsg->cookie = h->cookie;
269 }
270
271 typedef void (*bulk_callback_t)(struct ptlrpc_bulk_desc *, void *);
272
273 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
274
275 /* rpc/connection.c */
276 void ptlrpc_readdress_connection(struct ptlrpc_connection *, obd_uuid_t uuid);
277 struct ptlrpc_connection *ptlrpc_get_connection(struct lustre_peer *peer,
278                                                 obd_uuid_t uuid);
279 int ptlrpc_put_connection(struct ptlrpc_connection *c);
280 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
281 void ptlrpc_init_connection(void);
282 void ptlrpc_cleanup_connection(void);
283
284 /* rpc/niobuf.c */
285 int ptlrpc_check_bulk_sent(struct ptlrpc_bulk_desc *bulk);
286 int ptlrpc_check_bulk_received(struct ptlrpc_bulk_desc *bulk);
287 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *);
288 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *);
289 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *bulk);
290 struct obd_brw_set *obd_brw_set_new(void);
291 void obd_brw_set_add(struct obd_brw_set *, struct ptlrpc_bulk_desc *);
292 void obd_brw_set_free(struct obd_brw_set *);
293
294 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req);
295 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req);
296 void ptlrpc_resend_req(struct ptlrpc_request *request);
297 int ptl_send_rpc(struct ptlrpc_request *request);
298 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd);
299
300 /* rpc/client.c */
301 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
302                         struct ptlrpc_client *);
303 void ptlrpc_cleanup_client(struct obd_import *imp);
304 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req);
305 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid);
306
307 int ll_brw_sync_wait(struct obd_brw_set *, int phase);
308
309 int ptlrpc_queue_wait(struct ptlrpc_request *req);
310 void ptlrpc_continue_req(struct ptlrpc_request *req);
311 int ptlrpc_replay_req(struct ptlrpc_request *req);
312 void ptlrpc_restart_req(struct ptlrpc_request *req);
313
314 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
315                                        int count, int *lengths, char **bufs);
316 void ptlrpc_free_req(struct ptlrpc_request *request);
317 void ptlrpc_req_finished(struct ptlrpc_request *request);
318 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *);
319 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
320 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc);
321 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *page);
322
323 /* rpc/service.c */
324 struct ptlrpc_service *
325 ptlrpc_init_svc(__u32 nevents, __u32 nbufs, __u32 bufsize, __u32 max_req_size,
326                 int req_portal, int rep_portal,
327                 obd_uuid_t uuid, svc_handler_t, char *name);
328 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
329 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
330                         char *name);
331 int ptlrpc_unregister_service(struct ptlrpc_service *service);
332
333 struct ptlrpc_svc_data {
334         char *name;
335         struct ptlrpc_service *svc;
336         struct ptlrpc_thread *thread;
337         struct obd_device *dev;
338 };
339
340 /* rpc/pack_generic.c */
341 int lustre_pack_msg(int count, int *lens, char **bufs, int *len,
342                     struct lustre_msg **msg);
343 int lustre_msg_size(int count, int *lengths);
344 int lustre_unpack_msg(struct lustre_msg *m, int len);
345 void *lustre_msg_buf(struct lustre_msg *m, int n);
346
347 static inline void ptlrpc_bulk_decref(struct ptlrpc_bulk_desc *desc)
348 {
349         if (atomic_dec_and_test(&desc->bd_refcount)) {
350                 CDEBUG(D_PAGE, "Released last ref on %p, freeing\n", desc);
351                 ptlrpc_free_bulk(desc);
352         } else {
353                 CDEBUG(D_PAGE, "%p -> %d\n", desc,
354                        atomic_read(&desc->bd_refcount));
355         }
356 }
357
358 static inline void ptlrpc_bulk_addref(struct ptlrpc_bulk_desc *desc)
359 {
360         atomic_inc(&desc->bd_refcount);
361         CDEBUG(D_PAGE, "Set refcount of %p to %d\n", desc,
362                atomic_read(&desc->bd_refcount));
363 }
364
365 #endif