1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/tqueue.h>
27 #include <linux/kp30.h>
28 // #include <linux/obd.h>
29 #include <portals/p30.h>
30 #include <linux/lustre_idl.h>
31 #include <linux/lustre_ha.h>
32 #include <linux/lustre_import.h>
34 /* The following constants determine how much memory is devoted to
35 * buffering in the lustre services.
37 * ?_NEVENTS # event queue entries
39 * ?_NBUFS # request buffers
40 * ?_BUFSIZE # bytes in a single request buffer
41 * total memory = ?_NBUFS * ?_BUFSIZE
43 * ?_MAXREQSIZE # maximum request service will receive
44 * larger messages will get dropped.
45 * request buffers are auto-unlinked when less than ?_MAXREQSIZE
49 #define LDLM_NUM_THREADS 4
50 #define LDLM_NEVENT_MAX 8192UL
51 #define LDLM_NEVENTS min(num_physpages / 64, LDLM_NEVENT_MAX)
52 #define LDLM_NBUF_MAX 256UL
53 #define LDLM_NBUFS min(LDLM_NEVENTS / 16, LDLM_NBUF_MAX)
54 #define LDLM_BUFSIZE (8 * 1024)
55 #define LDLM_MAXREQSIZE 1024
57 #define MDT_NUM_THREADS 8
58 #define MDS_NEVENT_MAX 8192UL
59 #define MDS_NEVENTS min(num_physpages / 64, MDS_NEVENT_MAX)
60 #define MDS_NBUF_MAX 512UL
61 #define MDS_NBUFS min(MDS_NEVENTS / 16, MDS_NBUF_MAX)
62 #define MDS_BUFSIZE (8 * 1024)
63 /* Assume file name length = FNAME_MAX = 256 (true for extN).
64 * path name length = PATH_MAX = 4096
65 * LOV MD size max = EA_MAX = 4000
66 * symlink: FNAME_MAX + PATH_MAX <- largest
67 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
68 * rename: FNAME_MAX + FNAME_MAX
69 * open: FNAME_MAX + EA_MAX
71 * MDS_MAXREQSIZE ~= 4736 bytes =
72 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
74 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
75 * except in the open case where there are a large number of OSTs in a LOV.
77 #define MDS_MAXREQSIZE (5 * 1024)
79 #define OST_NUM_THREADS 6
80 #define OST_NEVENT_MAX 32768UL
81 #define OST_NEVENTS min(num_physpages / 16, OST_NEVENT_MAX)
82 #define OST_NBUF_MAX 1280UL
83 #define OST_NBUFS min(OST_NEVENTS / 64, OST_NBUF_MAX)
84 #define OST_BUFSIZE (8 * 1024)
85 /* OST_MAXREQSIZE ~= 1896 bytes =
86 * lustre_msg + obdo + 16 * obd_ioobj + 64 * niobuf_remote
88 * single object with 16 pages is 576 bytes
90 #define OST_MAXREQSIZE (2 * 1024)
92 #define PTLBD_NUM_THREADS 4
93 #define PTLBD_NEVENTS 1024
94 #define PTLBD_NBUFS 20
95 #define PTLBD_BUFSIZE (32 * 1024)
96 #define PTLBD_MAXREQSIZE 1024
98 #define CONN_INVALID 1
100 struct ptlrpc_connection {
101 struct list_head c_link;
102 struct lustre_peer c_peer;
103 struct obd_uuid c_local_uuid; /* XXX do we need this? */
104 struct obd_uuid c_remote_uuid;
106 __u32 c_generation; /* changes upon new connection */
107 __u32 c_epoch; /* changes when peer changes */
108 __u32 c_bootcount; /* peer's boot count */
110 spinlock_t c_lock; /* also protects req->rq_list */
115 __u64 c_remote_token;
117 struct list_head c_delayed_head;/* delayed until post-recovery XXX imp? */
118 struct recovd_data c_recovd_data;
120 struct list_head c_imports;
121 struct list_head c_exports;
122 struct list_head c_sb_chain;
123 __u32 c_flags; // can we indicate INVALID elsewhere?
126 struct ptlrpc_client {
127 __u32 cli_request_portal;
128 __u32 cli_reply_portal;
130 __u32 cli_target_devno;
136 /* state flags of requests */
137 #define PTL_RPC_FL_INTR (1 << 0)
138 #define PTL_RPC_FL_REPLIED (1 << 1) /* reply was received */
139 #define PTL_RPC_FL_SENT (1 << 2)
140 #define PTL_BULK_FL_SENT (1 << 3)
141 #define PTL_BULK_FL_RCVD (1 << 4)
142 #define PTL_RPC_FL_ERR (1 << 5)
143 #define PTL_RPC_FL_TIMEOUT (1 << 6)
144 #define PTL_RPC_FL_RESEND (1 << 7)
145 #define PTL_RPC_FL_RESTART (1 << 8) /* operation must be restarted */
146 #define PTL_RPC_FL_FINISHED (1 << 9)
147 #define PTL_RPC_FL_RETAIN (1 << 10) /* retain for replay after reply */
148 #define PTL_RPC_FL_REPLAY (1 << 11) /* replay upon recovery */
149 #define PTL_RPC_FL_ALLOCREP (1 << 12) /* reply buffer allocated */
151 struct ptlrpc_request {
152 int rq_type; /* one of PTL_RPC_MSG_* */
153 struct list_head rq_list;
154 struct obd_device *rq_obd;
157 atomic_t rq_refcount;
159 int rq_request_portal; /* XXX FIXME bug 249 */
160 int rq_reply_portal; /* XXX FIXME bug 249 */
163 struct lustre_msg *rq_reqmsg;
166 struct lustre_msg *rq_repmsg;
171 // void * rq_reply_handle;
172 wait_queue_head_t rq_wait_for_rep;
175 ptl_md_t rq_reply_md;
176 ptl_handle_me_t rq_reply_me_h;
178 /* outgoing req/rep */
181 struct lustre_peer rq_peer; /* XXX see service.c can this be factored away? */
182 struct obd_export *rq_export;
183 struct ptlrpc_connection *rq_connection;
184 struct obd_import *rq_import;
185 struct ptlrpc_service *rq_svc;
187 void (*rq_replay_cb)(struct ptlrpc_request *);
188 void *rq_replay_data;
191 #define DEBUG_REQ(level, req, fmt, args...) \
194 "@@@ " fmt " req@%p x"LPD64"/t"LPD64" o%d->%s:%d lens %d/%d ref %d fl " \
195 "%x/%x/%x rc %x\n" , ## args, req, req->rq_xid, \
196 req->rq_reqmsg ? req->rq_reqmsg->transno : -1, \
197 req->rq_reqmsg ? req->rq_reqmsg->opc : -1, \
198 req->rq_connection ? \
199 (char *)req->rq_connection->c_remote_uuid.uuid : "<?>", \
200 (req->rq_import && req->rq_import->imp_client) ? \
201 req->rq_import->imp_client->cli_request_portal : -1, \
202 req->rq_reqlen, req->rq_replen, \
203 atomic_read (&req->rq_refcount), req->rq_flags, \
204 req->rq_reqmsg ? req->rq_reqmsg->flags : 0, \
205 req->rq_repmsg ? req->rq_repmsg->flags : 0, \
209 struct ptlrpc_bulk_page {
210 struct ptlrpc_bulk_desc *bp_desc;
211 struct list_head bp_link;
214 struct page *bp_page;
217 struct dentry *bp_dentry;
218 int (*bp_cb)(struct ptlrpc_bulk_page *);
222 struct ptlrpc_bulk_desc {
223 struct list_head bd_set_chain; /* entry in obd_brw_set */
224 struct obd_brw_set *bd_brw_set;
226 struct ptlrpc_connection *bd_connection;
227 struct ptlrpc_client *bd_client;
229 struct lustre_handle bd_conn;
230 void (*bd_ptl_ev_hdlr)(struct ptlrpc_bulk_desc *);
232 wait_queue_head_t bd_waitq;
233 struct list_head bd_page_list;
235 atomic_t bd_refcount;
236 void *bd_desc_private;
238 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
239 struct work_struct bd_queue;
241 struct tq_struct bd_queue;
245 ptl_handle_md_t bd_md_h;
246 ptl_handle_me_t bd_me_h;
248 atomic_t bd_source_callback_count;
250 struct iovec bd_iov[16]; /* self-sized pre-allocated iov */
253 struct ptlrpc_thread {
254 struct list_head t_link;
257 wait_queue_head_t t_ctl_waitq;
260 struct ptlrpc_request_buffer_desc {
261 struct list_head rqbd_list;
262 struct ptlrpc_service *rqbd_service;
263 ptl_handle_me_t rqbd_me_h;
264 atomic_t rqbd_refcount;
268 struct ptlrpc_service {
272 /* incoming request buffers */
273 /* FIXME: perhaps a list of EQs, if multiple NIs are used? */
275 __u32 srv_max_req_size; /* biggest request to receive */
276 __u32 srv_buf_size; /* # bytes in a request buffer */
277 struct list_head srv_rqbds; /* all the request buffer descriptors */
278 __u32 srv_nrqbds; /* # request buffers */
279 atomic_t srv_nrqbds_receiving; /* # request buffers posted for input */
281 __u32 srv_req_portal;
282 __u32 srv_rep_portal;
287 ptl_handle_eq_t srv_eq_h;
289 struct lustre_peer srv_self;
291 wait_queue_head_t srv_waitq; /* all threads sleep on this */
294 struct list_head srv_threads;
295 int (*srv_handler)(struct ptlrpc_request *req);
296 char *srv_name; /* only statically allocated strings here; we don't clean them */
299 static inline void ptlrpc_hdl2req(struct ptlrpc_request *req,
300 struct lustre_handle *h)
302 req->rq_reqmsg->addr = h->addr;
303 req->rq_reqmsg->cookie = h->cookie;
306 typedef void (*bulk_callback_t)(struct ptlrpc_bulk_desc *, void *);
308 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
310 /* rpc/connection.c */
311 void ptlrpc_readdress_connection(struct ptlrpc_connection *, struct obd_uuid *uuid);
312 struct ptlrpc_connection *ptlrpc_get_connection(struct lustre_peer *peer,
313 struct obd_uuid *uuid);
314 int ptlrpc_put_connection(struct ptlrpc_connection *c);
315 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
316 void ptlrpc_init_connection(void);
317 void ptlrpc_cleanup_connection(void);
320 int ptlrpc_check_bulk_sent(struct ptlrpc_bulk_desc *bulk);
321 int ptlrpc_check_bulk_received(struct ptlrpc_bulk_desc *bulk);
322 int ptlrpc_bulk_put(struct ptlrpc_bulk_desc *);
323 int ptlrpc_bulk_get(struct ptlrpc_bulk_desc *);
324 int ptlrpc_register_bulk_put(struct ptlrpc_bulk_desc *);
325 int ptlrpc_register_bulk_get(struct ptlrpc_bulk_desc *);
326 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *bulk);
327 struct obd_brw_set *obd_brw_set_new(void);
328 void obd_brw_set_add(struct obd_brw_set *, struct ptlrpc_bulk_desc *);
329 void obd_brw_set_free(struct obd_brw_set *);
331 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req);
332 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req);
333 void ptlrpc_resend_req(struct ptlrpc_request *request);
334 int ptl_send_rpc(struct ptlrpc_request *request);
335 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd);
338 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
339 struct ptlrpc_client *);
340 void ptlrpc_cleanup_client(struct obd_import *imp);
341 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req);
342 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
344 int ll_brw_sync_wait(struct obd_brw_set *, int phase);
346 int ptlrpc_queue_wait(struct ptlrpc_request *req);
347 void ptlrpc_continue_req(struct ptlrpc_request *req);
348 int ptlrpc_replay_req(struct ptlrpc_request *req);
349 void ptlrpc_restart_req(struct ptlrpc_request *req);
350 void ptlrpc_abort_inflight(struct obd_import *imp, int dying_import);
352 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
353 int count, int *lengths, char **bufs);
354 void ptlrpc_free_req(struct ptlrpc_request *request);
355 void ptlrpc_req_finished(struct ptlrpc_request *request);
356 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
357 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *);
358 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
359 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc);
360 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *page);
361 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
362 struct obd_import *imp);
365 struct ptlrpc_service *
366 ptlrpc_init_svc(__u32 nevents, __u32 nbufs, __u32 bufsize, __u32 max_req_size,
367 int req_portal, int rep_portal,
368 struct obd_uuid *uuid, svc_handler_t, char *name);
369 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
370 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
372 int ptlrpc_unregister_service(struct ptlrpc_service *service);
374 struct ptlrpc_svc_data {
376 struct ptlrpc_service *svc;
377 struct ptlrpc_thread *thread;
378 struct obd_device *dev;
381 /* rpc/pack_generic.c */
382 int lustre_pack_msg(int count, int *lens, char **bufs, int *len,
383 struct lustre_msg **msg);
384 int lustre_msg_size(int count, int *lengths);
385 int lustre_unpack_msg(struct lustre_msg *m, int len);
386 void *lustre_msg_buf(struct lustre_msg *m, int n);
388 static inline void ptlrpc_bulk_decref(struct ptlrpc_bulk_desc *desc)
390 CDEBUG(D_PAGE, "%p -> %d\n", desc, atomic_read(&desc->bd_refcount) - 1);
392 if (atomic_dec_and_test(&desc->bd_refcount)) {
393 CDEBUG(D_PAGE, "Released last ref on %p, freeing\n", desc);
394 ptlrpc_free_bulk(desc);
398 static inline void ptlrpc_bulk_addref(struct ptlrpc_bulk_desc *desc)
400 atomic_inc(&desc->bd_refcount);
401 CDEBUG(D_PAGE, "Set refcount of %p to %d\n", desc,
402 atomic_read(&desc->bd_refcount));