1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/tqueue.h>
27 #include <linux/kp30.h>
28 // #include <linux/obd.h>
29 #include <portals/p30.h>
30 #include <linux/lustre_idl.h>
31 #include <linux/lustre_ha.h>
32 #include <linux/lustre_import.h>
34 /* The following constants determine how much memory is devoted to
35 * buffering in the lustre services.
37 * ?_NEVENTS # event queue entries
39 * ?_NBUFS # request buffers
40 * ?_BUFSIZE # bytes in a single request buffer
41 * total memory = ?_NBUFS * ?_BUFSIZE
43 * ?_MAXREQSIZE # maximum request service will receive
44 * larger messages will get dropped.
45 * request buffers are auto-unlinked when less than ?_MAXREQSIZE
49 #define LDLM_NEVENTS 1024
51 #define LDLM_BUFSIZE (64 * 1024)
52 #define LDLM_MAXREQSIZE 1024
54 #define MDS_NEVENTS 1024
56 #define MDS_BUFSIZE (64 * 1024)
57 #define MDS_MAXREQSIZE 1024
60 #define OST_NEVENTS 1024
62 #define OST_BUFSIZE (64 * 1024)
63 #define OST_MAXREQSIZE (8 * 1024)
65 #define OST_NEVENTS 4096
67 #define OST_BUFSIZE (128 * 1024)
68 #define OST_MAXREQSIZE (8 * 1024)
71 struct ptlrpc_connection {
72 struct list_head c_link;
73 struct lustre_peer c_peer;
74 __u8 c_local_uuid[37]; /* XXX do we need this? */
75 __u8 c_remote_uuid[37];
78 __u32 c_generation; /* changes upon new connection */
79 __u32 c_epoch; /* changes when peer changes */
80 __u32 c_bootcount; /* peer's boot count */
82 spinlock_t c_lock; /* also protects req->rq_list */
91 __u64 c_last_xid; /* protected by c_lock */
92 __u64 c_last_committed;/* protected by c_lock */
93 struct list_head c_delayed_head;/* delayed until post-recovery */
94 struct list_head c_sending_head;/* protected by c_lock */
95 struct list_head c_dying_head; /* protected by c_lock */
96 struct recovd_data c_recovd_data;
98 struct list_head c_imports;
99 struct list_head c_exports;
100 struct list_head c_sb_chain;
103 struct ptlrpc_client {
104 __u32 cli_request_portal;
105 __u32 cli_reply_portal;
107 __u32 cli_target_devno;
110 // struct semaphore cli_rpc_sem; /* limits outstanding requests */
115 /* state flags of requests */
116 #define PTL_RPC_FL_INTR (1 << 0)
117 #define PTL_RPC_FL_REPLIED (1 << 1) /* reply was received */
118 #define PTL_RPC_FL_SENT (1 << 2)
119 #define PTL_BULK_FL_SENT (1 << 3)
120 #define PTL_BULK_FL_RCVD (1 << 4)
121 #define PTL_RPC_FL_ERR (1 << 5)
122 #define PTL_RPC_FL_TIMEOUT (1 << 6)
123 #define PTL_RPC_FL_RESEND (1 << 7)
124 #define PTL_RPC_FL_RECOVERY (1 << 8) /* retransmission for recovery */
125 #define PTL_RPC_FL_FINISHED (1 << 9)
126 #define PTL_RPC_FL_RETAIN (1 << 10) /* retain for replay after reply */
127 #define PTL_RPC_FL_REPLAY (1 << 11) /* replay upon recovery */
128 #define PTL_RPC_FL_ALLOCREP (1 << 12) /* reply buffer allocated */
130 struct ptlrpc_request {
131 int rq_type; /* one of PTL_RPC_MSG_* */
132 struct list_head rq_list;
133 struct list_head rq_multi;
134 struct obd_device *rq_obd;
138 atomic_t rq_refcount;
141 struct lustre_msg *rq_reqmsg;
144 struct lustre_msg *rq_repmsg;
154 // void * rq_reply_handle;
155 wait_queue_head_t rq_wait_for_rep;
158 ptl_md_t rq_reply_md;
159 ptl_handle_md_t rq_reply_md_h;
160 ptl_handle_me_t rq_reply_me_h;
162 /* outgoing req/rep */
164 ptl_handle_md_t rq_req_md_h;
166 struct lustre_peer rq_peer; /* XXX see service.c can this be factored away? */
167 struct obd_export *rq_export;
168 struct ptlrpc_connection *rq_connection;
169 struct obd_import *rq_import;
170 struct ptlrpc_service *rq_svc;
172 void (*rq_replay_cb)(struct ptlrpc_request *, void *);
173 void *rq_replay_cb_data;
176 struct ptlrpc_bulk_page {
177 struct ptlrpc_bulk_desc *bp_desc;
178 struct list_head bp_link;
181 struct page *bp_page;
184 struct dentry *bp_dentry;
185 int (*bp_cb)(struct ptlrpc_bulk_page *);
188 struct ptlrpc_bulk_desc {
190 struct ptlrpc_connection *bd_connection;
191 struct ptlrpc_client *bd_client;
193 struct lustre_handle bd_conn;
194 void (*bd_cb)(struct ptlrpc_bulk_desc *, void *);
197 wait_queue_head_t bd_waitq;
198 struct list_head bd_page_list;
200 atomic_t bd_refcount;
201 void *bd_desc_private;
202 struct tq_struct bd_queue;
205 ptl_handle_md_t bd_md_h;
206 ptl_handle_me_t bd_me_h;
208 atomic_t bd_source_callback_count;
210 struct iovec bd_iov[16]; /* self-sized pre-allocated iov */
213 struct ptlrpc_thread {
214 struct list_head t_link;
217 wait_queue_head_t t_ctl_waitq;
220 struct ptlrpc_request_buffer_desc {
221 struct list_head rqbd_list;
222 struct ptlrpc_service *rqbd_service;
223 ptl_handle_me_t rqbd_me_h;
224 atomic_t rqbd_refcount;
228 struct ptlrpc_service {
232 /* incoming request buffers */
233 /* FIXME: perhaps a list of EQs, if multiple NIs are used? */
235 __u32 srv_max_req_size; /* biggest request to receive */
236 __u32 srv_buf_size; /* # bytes in a request buffer */
237 struct list_head srv_rqbds; /* all the request buffer descriptors */
238 __u32 srv_nrqbds; /* # request buffers */
239 atomic_t srv_nrqbds_receiving; /* # request buffers posted for input */
241 __u32 srv_req_portal;
242 __u32 srv_rep_portal;
247 ptl_handle_eq_t srv_eq_h;
249 struct lustre_peer srv_self;
251 wait_queue_head_t srv_waitq; /* all threads sleep on this */
254 struct list_head srv_threads;
255 int (*srv_handler)(struct ptlrpc_request *req);
256 char *srv_name; /* only statically allocated strings here; we don't clean them */
259 static inline void ptlrpc_hdl2req(struct ptlrpc_request *req, struct lustre_handle *h)
261 req->rq_reqmsg->addr = h->addr;
262 req->rq_reqmsg->cookie = h->cookie;
265 typedef void (*bulk_callback_t)(struct ptlrpc_bulk_desc *, void *);
267 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
269 /* rpc/connection.c */
270 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn, obd_uuid_t uuid);
271 struct ptlrpc_connection *ptlrpc_get_connection(struct lustre_peer *peer,
273 int ptlrpc_put_connection(struct ptlrpc_connection *c);
274 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
275 void ptlrpc_init_connection(void);
276 void ptlrpc_cleanup_connection(void);
279 int ptlrpc_check_bulk_sent(struct ptlrpc_bulk_desc *bulk);
280 int ptlrpc_check_bulk_received(struct ptlrpc_bulk_desc *bulk);
281 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *);
282 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *);
283 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *bulk);
284 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req);
285 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req);
286 void ptlrpc_resend_req(struct ptlrpc_request *request);
287 int ptl_send_rpc(struct ptlrpc_request *request);
288 void ptlrpc_link_svc_me(struct ptlrpc_request_buffer_desc *rqbd);
291 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
292 struct ptlrpc_client *);
293 void ptlrpc_cleanup_client(struct obd_import *imp);
294 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req);
295 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid);
297 int ptlrpc_queue_wait(struct ptlrpc_request *req);
298 void ptlrpc_continue_req(struct ptlrpc_request *req);
299 int ptlrpc_replay_req(struct ptlrpc_request *req);
300 void ptlrpc_restart_req(struct ptlrpc_request *req);
302 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
303 int count, int *lengths, char **bufs);
304 void ptlrpc_free_req(struct ptlrpc_request *request);
305 void ptlrpc_req_finished(struct ptlrpc_request *request);
306 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *);
307 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
308 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc);
309 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *page);
310 int ptlrpc_check_status(struct ptlrpc_request *req, int err);
313 struct ptlrpc_service *
314 ptlrpc_init_svc(__u32 nevents, __u32 nbufs, __u32 bufsize, __u32 max_req_size,
315 int req_portal, int rep_portal,
316 obd_uuid_t uuid, svc_handler_t, char *name);
317 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
318 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
320 int ptlrpc_unregister_service(struct ptlrpc_service *service);
322 struct ptlrpc_svc_data {
324 struct ptlrpc_service *svc;
325 struct ptlrpc_thread *thread;
326 struct obd_device *dev;
329 /* rpc/pack_generic.c */
330 int lustre_pack_msg(int count, int *lens, char **bufs, int *len,
331 struct lustre_msg **msg);
332 int lustre_msg_size(int count, int *lengths);
333 int lustre_unpack_msg(struct lustre_msg *m, int len);
334 void *lustre_msg_buf(struct lustre_msg *m, int n);
336 static inline void ptlrpc_bulk_decref(struct ptlrpc_bulk_desc *desc)
338 if (atomic_dec_and_test(&desc->bd_refcount)) {
339 CDEBUG(D_PAGE, "Released last ref on %p, freeing\n", desc);
340 ptlrpc_free_bulk(desc);
342 CDEBUG(D_PAGE, "%p -> %d\n", desc,
343 atomic_read(&desc->bd_refcount));
347 static inline void ptlrpc_bulk_addref(struct ptlrpc_bulk_desc *desc)
349 atomic_inc(&desc->bd_refcount);
350 CDEBUG(D_PAGE, "Set refcount of %p to %d\n", desc,
351 atomic_read(&desc->bd_refcount));