1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/version.h>
28 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
29 #include <linux/tqueue.h>
31 #include <linux/workqueue.h>
35 #include <libcfs/kp30.h>
36 // #include <linux/obd.h>
37 #include <lnet/lnet.h>
38 #include <linux/lustre_idl.h>
39 #include <linux/lustre_ha.h>
40 #include <linux/lustre_import.h>
41 #include <linux/lprocfs_status.h>
43 /* MD flags we _always_ use */
44 #define PTLRPC_MD_OPTIONS 0
46 /* Define maxima for bulk I/O
47 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
48 * these limits are system wide and not interface-local. */
49 #define PTLRPC_MAX_BRW_SIZE LNET_MTU
50 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE/PAGE_SIZE)
52 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
54 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
55 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
57 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
58 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
60 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
61 # error "PTLRPC_MAX_BRW_SIZE too big"
63 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
64 # error "PTLRPC_MAX_BRW_PAGES too big"
66 #endif /* __KERNEL__ */
68 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
70 #define SVC_BUF_VMALLOC_THRESHOLD (2 * PAGE_SIZE)
72 /* The following constants determine how memory is used to buffer incoming
75 * ?_NBUFS # buffers to allocate when growing the pool
76 * ?_BUFSIZE # bytes in a single request buffer
77 * ?_MAXREQSIZE # maximum request service will receive
79 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
80 * of ?_NBUFS is added to the pool.
82 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
83 * considered full when less than ?_MAXREQSIZE is left in them.
86 #define LDLM_NUM_THREADS min((int)(smp_num_cpus * smp_num_cpus * 8), 64)
88 #define LDLM_BUFSIZE (8 * 1024)
89 #define LDLM_MAXREQSIZE (5 * 1024)
90 #define LDLM_MAXREPSIZE (1024)
92 #define MDT_MAX_THREADS 32UL
93 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
94 num_physpages >> (25 - PAGE_SHIFT)), 2UL)
95 #define MDS_NBUFS (64 * smp_num_cpus)
96 #define MDS_BUFSIZE (8 * 1024)
97 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
98 * path name length = PATH_MAX = 4096
99 * LOV MD size max = EA_MAX = 4000
100 * symlink: FNAME_MAX + PATH_MAX <- largest
101 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
102 * rename: FNAME_MAX + FNAME_MAX
103 * open: FNAME_MAX + EA_MAX
105 * MDS_MAXREQSIZE ~= 4736 bytes =
106 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
107 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
108 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
109 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
111 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
112 * except in the open case where there are a large number of OSTs in a LOV.
114 #define MDS_MAXREQSIZE (5 * 1024)
115 #define MDS_MAXREPSIZE max(9 * 1024, 280 + LOV_MAX_STRIPE_COUNT * 56)
117 /* FIXME fix all constants here */
118 #define MGS_MAX_THREADS 32UL
119 #define MGS_NUM_THREADS max(min_t(unsigned long, num_physpages / 8192, \
120 MGS_MAX_THREADS), 2UL)
121 #define MGS_NBUFS (64 * smp_num_cpus)
122 #define MGS_BUFSIZE (8 * 1024)
123 #define MGS_MAXREQSIZE (5 * 1024)
124 #define MGS_MAXREPSIZE (9 * 1024)
126 #define OST_MAX_THREADS 512UL
127 #define OST_DEF_THREADS max_t(unsigned long, 2, \
128 (num_physpages >> (26-PAGE_SHIFT)) * smp_num_cpus)
129 #define OST_NBUFS (64 * smp_num_cpus)
130 #define OST_BUFSIZE (8 * 1024)
131 /* OST_MAXREQSIZE ~= 4768 bytes =
132 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
134 * - single object with 16 pages is 512 bytes
135 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
137 #define OST_MAXREQSIZE (5 * 1024)
138 #define OST_MAXREPSIZE (9 * 1024)
140 struct ptlrpc_connection {
141 struct list_head c_link;
143 lnet_process_id_t c_peer;
144 struct obd_uuid c_remote_uuid;
148 struct ptlrpc_client {
149 __u32 cli_request_portal;
150 __u32 cli_reply_portal;
154 /* state flags of requests */
155 /* XXX only ones left are those used by the bulk descs as well! */
156 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
157 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
159 #define REQ_MAX_ACK_LOCKS 8
161 #define SWAB_PARANOIA 1
163 /* unpacking: assert idx not unpacked already */
164 #define LASSERT_REQSWAB(rq, idx) \
166 LASSERT ((idx) < sizeof ((rq)->rq_req_swab_mask) * 8); \
167 LASSERT (((rq)->rq_req_swab_mask & (1 << (idx))) == 0); \
168 (rq)->rq_req_swab_mask |= (1 << (idx)); \
171 #define LASSERT_REPSWAB(rq, idx) \
173 LASSERT ((idx) < sizeof ((rq)->rq_rep_swab_mask) * 8); \
174 LASSERT (((rq)->rq_rep_swab_mask & (1 << (idx))) == 0); \
175 (rq)->rq_rep_swab_mask |= (1 << (idx)); \
178 /* just looking: assert idx already unpacked */
179 #define LASSERT_REQSWABBED(rq, idx) \
180 LASSERT ((idx) < sizeof ((rq)->rq_req_swab_mask) * 8 && \
181 ((rq)->rq_req_swab_mask & (1 << (idx))) != 0)
183 #define LASSERT_REPSWABBED(rq, idx) \
184 LASSERT ((idx) < sizeof ((rq)->rq_rep_swab_mask) * 8 && \
185 ((rq)->rq_rep_swab_mask & (1 << (idx))) != 0)
187 #define LASSERT_REQSWAB(rq, idx)
188 #define LASSERT_REPSWAB(rq, idx)
189 #define LASSERT_REQSWABBED(rq, idx)
190 #define LASSERT_REPSWABBED(rq, idx)
193 union ptlrpc_async_args {
194 /* Scratchpad for passing args to completion interpreter. Users
195 * cast to the struct of their choosing, and LASSERT that this is
196 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
197 * a pointer to it here. The pointer_arg ensures this struct is at
198 * least big enough for that. */
199 void *pointer_arg[9];
203 struct ptlrpc_request_set;
204 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
206 struct ptlrpc_request_set {
207 int set_remaining; /* # uncompleted requests */
208 wait_queue_head_t set_waitq;
209 wait_queue_head_t *set_wakeup_ptr;
210 struct list_head set_requests;
211 set_interpreter_func set_interpret; /* completion callback */
212 void *set_arg; /* completion context */
213 /* locked so that any old caller can communicate requests to
214 * the set holder who can then fold them into the lock-free set */
215 spinlock_t set_new_req_lock;
216 struct list_head set_new_requests;
219 struct ptlrpc_bulk_desc;
222 * ptlrpc callback & work item stuff
224 struct ptlrpc_cb_id {
225 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
226 void *cbid_arg; /* additional arg */
229 #define RS_MAX_LOCKS 4
232 struct ptlrpc_reply_state {
233 struct ptlrpc_cb_id rs_cb_id;
234 struct list_head rs_list;
235 struct list_head rs_exp_list;
236 struct list_head rs_obd_list;
238 struct list_head rs_debug_list;
240 /* updates to following flag serialised by srv_request_lock */
241 unsigned int rs_difficult:1; /* ACK/commit stuff */
242 unsigned int rs_scheduled:1; /* being handled? */
243 unsigned int rs_scheduled_ever:1;/* any schedule attempts? */
244 unsigned int rs_handled:1; /* been handled yet? */
245 unsigned int rs_on_net:1; /* reply_out_callback pending? */
246 unsigned int rs_prealloc:1; /* rs from prealloc list */
251 struct obd_export *rs_export;
252 struct ptlrpc_service *rs_service;
253 lnet_handle_md_t rs_md_h;
254 atomic_t rs_refcount;
256 /* locks awaiting client reply ACK */
258 struct lustre_handle rs_locks[RS_MAX_LOCKS];
259 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
260 /* last member: variable sized reply message */
261 struct lustre_msg rs_msg;
264 struct ptlrpc_thread;
267 RQ_PHASE_NEW = 0xebc0de00,
268 RQ_PHASE_RPC = 0xebc0de01,
269 RQ_PHASE_BULK = 0xebc0de02,
270 RQ_PHASE_INTERPRET = 0xebc0de03,
271 RQ_PHASE_COMPLETE = 0xebc0de04,
274 struct ptlrpc_request_pool {
276 struct list_head prp_req_list; /* list of ptlrpc_request structs */
278 void (*prp_populate)(struct ptlrpc_request_pool *, int);
281 struct ptlrpc_request {
282 int rq_type; /* one of PTL_RPC_MSG_* */
283 struct list_head rq_list;
284 struct list_head rq_history_list; /* server-side history */
285 __u64 rq_history_seq; /* history sequence # */
288 /* client-side flags */
289 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
290 rq_timedout:1, rq_resend:1, rq_restart:1,
292 * when ->rq_replay is set, request is kept by the client even
293 * after server commits corresponding transaction. This is
294 * used for operations that require sequence of multiple
295 * requests to be replayed. The only example currently is file
296 * open/close. When last request in such a sequence is
297 * committed, ->rq_replay is cleared on all requests in the
301 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
302 rq_no_delay:1, rq_net_err:1;
303 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
304 atomic_t rq_refcount; /* client-side refcount for SENT race */
306 struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
308 int rq_request_portal; /* XXX FIXME bug 249 */
309 int rq_reply_portal; /* XXX FIXME bug 249 */
311 int rq_nob_received; /* client-side # reply bytes actually received */
314 struct lustre_msg *rq_reqmsg;
316 int rq_timeout; /* time to wait for reply (seconds) */
318 struct lustre_msg *rq_repmsg;
321 struct list_head rq_replay_list;
324 __u32 rq_req_swab_mask;
325 __u32 rq_rep_swab_mask;
328 int rq_import_generation;
329 enum lustre_imp_state rq_send_state;
331 /* client+server request */
332 lnet_handle_md_t rq_req_md_h;
333 struct ptlrpc_cb_id rq_req_cbid;
336 struct timeval rq_arrival_time; /* request arrival time */
337 struct ptlrpc_reply_state *rq_reply_state; /* separated reply state */
338 struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/
340 __u32 rq_uid; /* peer uid, used in MDS only */
343 /* client-only incoming reply */
344 lnet_handle_md_t rq_reply_md_h;
345 wait_queue_head_t rq_reply_waitq;
346 struct ptlrpc_cb_id rq_reply_cbid;
349 lnet_process_id_t rq_peer;
350 struct obd_export *rq_export;
351 struct obd_import *rq_import;
353 void (*rq_replay_cb)(struct ptlrpc_request *);
354 void (*rq_commit_cb)(struct ptlrpc_request *);
357 struct ptlrpc_bulk_desc *rq_bulk; /* client side bulk */
358 time_t rq_sent; /* when request sent, seconds */
361 struct list_head rq_set_chain;
362 struct ptlrpc_request_set *rq_set;
363 void *rq_interpret_reply; /* Async completion handler */
364 union ptlrpc_async_args rq_async_args; /* Async completion context */
365 void *rq_ptlrpcd_data;
366 struct ptlrpc_request_pool *rq_pool; /* Pool if request from
370 static inline const char *
371 ptlrpc_rqphase2str(struct ptlrpc_request *req)
373 switch (req->rq_phase) {
380 case RQ_PHASE_INTERPRET:
382 case RQ_PHASE_COMPLETE:
389 /* Spare the preprocessor, spoil the bugs. */
390 #define FLAG(field, str) (field ? str : "")
392 #define DEBUG_REQ_FLAGS(req) \
393 ptlrpc_rqphase2str(req), \
394 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
395 FLAG(req->rq_err, "E"), \
396 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
397 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
398 FLAG(req->rq_no_resend, "N"), \
399 FLAG(req->rq_waiting, "W")
401 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s"
403 #define __DEBUG_REQ(CDEB_TYPE, level, req, fmt, args...) \
404 CDEB_TYPE(level, "@@@ " fmt \
405 " req@%p x"LPD64"/t"LPD64" o%d->%s@%s:%d lens %d/%d ref %d fl " \
406 REQ_FLAGS_FMT"/%x/%x rc %d/%d\n" , ## args, req, req->rq_xid, \
408 req->rq_reqmsg ? req->rq_reqmsg->opc : -1, \
409 req->rq_import ? (char *)req->rq_import->imp_target_uuid.uuid : "<?>", \
411 (char *)req->rq_import->imp_connection->c_remote_uuid.uuid : "<?>", \
412 (req->rq_import && req->rq_import->imp_client) ? \
413 req->rq_import->imp_client->cli_request_portal : -1, \
414 req->rq_reqlen, req->rq_replen, \
415 atomic_read(&req->rq_refcount), \
416 DEBUG_REQ_FLAGS(req), \
417 req->rq_reqmsg ? req->rq_reqmsg->flags : 0, \
418 req->rq_repmsg ? req->rq_repmsg->flags : 0, \
419 req->rq_status, req->rq_repmsg ? req->rq_repmsg->status : 0)
421 /* for most callers (level is a constant) this is resolved at compile time */
422 #define DEBUG_REQ(level, req, fmt, args...) \
424 if ((level) & (D_ERROR | D_WARNING)) \
425 __DEBUG_REQ(CDEBUG_LIMIT, level, req, fmt, ## args); \
427 __DEBUG_REQ(CDEBUG, level, req, fmt, ## args); \
430 struct ptlrpc_bulk_page {
431 struct list_head bp_link;
433 int bp_pageoffset; /* offset within a page */
434 struct page *bp_page;
437 #define BULK_GET_SOURCE 0
438 #define BULK_PUT_SINK 1
439 #define BULK_GET_SINK 2
440 #define BULK_PUT_SOURCE 3
442 struct ptlrpc_bulk_desc {
443 unsigned int bd_success:1; /* completed successfully */
444 unsigned int bd_network_rw:1; /* accessible to the network */
445 unsigned int bd_type:2; /* {put,get}{source,sink} */
446 unsigned int bd_registered:1; /* client side */
447 spinlock_t bd_lock; /* serialise with callback */
448 int bd_import_generation;
449 struct obd_export *bd_export;
450 struct obd_import *bd_import;
452 struct ptlrpc_request *bd_req; /* associated request */
453 wait_queue_head_t bd_waitq; /* server side only WQ */
454 int bd_iov_count; /* # entries in bd_iov */
455 int bd_max_iov; /* allocated size of bd_iov */
456 int bd_nob; /* # bytes covered */
457 int bd_nob_transferred; /* # bytes GOT/PUT */
461 struct ptlrpc_cb_id bd_cbid; /* network callback info */
462 lnet_handle_md_t bd_md_h; /* associated MD */
464 #if defined(__KERNEL__)
465 lnet_kiov_t bd_iov[0];
467 lnet_md_iovec_t bd_iov[0];
471 struct ptlrpc_thread {
473 struct list_head t_link; /* active threads for service, from svc->srv_threads */
475 void *t_data; /* thread-private data (preallocated memory) */
478 unsigned int t_id; /* service thread index, from ptlrpc_start_threads */
479 wait_queue_head_t t_ctl_waitq;
482 struct ptlrpc_request_buffer_desc {
483 struct list_head rqbd_list;
484 struct list_head rqbd_reqs;
485 struct ptlrpc_service *rqbd_service;
486 lnet_handle_md_t rqbd_md_h;
489 struct ptlrpc_cb_id rqbd_cbid;
490 struct ptlrpc_request rqbd_req;
493 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
494 typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
496 struct ptlrpc_service {
497 struct list_head srv_list; /* chain thru all services */
498 int srv_max_req_size; /* biggest request to receive */
499 int srv_max_reply_size; /* biggest reply to send */
500 int srv_buf_size; /* size of individual buffers */
501 int srv_nbuf_per_group; /* # buffers to allocate in 1 group */
502 int srv_nbufs; /* total # req buffer descs allocated */
503 int srv_nthreads; /* # running threads */
504 int srv_n_difficult_replies; /* # 'difficult' replies */
505 int srv_n_active_reqs; /* # reqs being served */
506 int srv_rqbd_timeout; /* timeout before re-posting reqs */
507 int srv_watchdog_timeout; /* soft watchdog timeout, in ms */
508 int srv_num_threads; /* # threads to start/started */
509 unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
511 __u32 srv_req_portal;
512 __u32 srv_rep_portal;
514 int srv_n_queued_reqs; /* # reqs waiting to be served */
515 struct list_head srv_request_queue; /* reqs waiting for service */
517 struct list_head srv_request_history; /* request history */
518 __u64 srv_request_seq; /* next request sequence # */
519 __u64 srv_request_max_cull_seq; /* highest seq culled from history */
520 svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
522 struct list_head srv_idle_rqbds; /* request buffers to be reposted */
523 struct list_head srv_active_rqbds; /* req buffers receiving */
524 struct list_head srv_history_rqbds; /* request buffer history */
525 int srv_nrqbd_receiving; /* # posted request buffers */
526 int srv_n_history_rqbds; /* # request buffers in history */
527 int srv_max_history_rqbds; /* max # request buffers in history */
529 atomic_t srv_outstanding_replies;
530 struct list_head srv_active_replies; /* all the active replies */
531 struct list_head srv_reply_queue; /* replies waiting for service */
533 wait_queue_head_t srv_waitq; /* all threads sleep on this. This
534 * wait-queue is signalled when new
535 * incoming request arrives and when
536 * difficult reply has to be handled. */
538 struct list_head srv_threads;
539 svc_handler_t srv_handler;
541 char *srv_name; /* only statically allocated strings here; we don't clean them */
545 struct proc_dir_entry *srv_procroot;
546 struct lprocfs_stats *srv_stats;
548 /* List of free reply_states */
549 struct list_head srv_free_rs_list;
550 /* waitq to run, when adding stuff to srv_free_rs_list */
551 wait_queue_head_t srv_free_rs_waitq;
554 * if non-NULL called during thread creation (ptlrpc_start_thread())
555 * to initialize service specific per-thread state.
557 int (*srv_init)(struct ptlrpc_thread *thread);
559 * if non-NULL called during thread shutdown (ptlrpc_main()) to
560 * destruct state created by ->srv_init().
562 void (*srv_done)(struct ptlrpc_thread *thread);
564 //struct ptlrpc_srv_ni srv_interfaces[0];
567 /* ptlrpc/events.c */
568 extern lnet_handle_eq_t ptlrpc_eq_h;
569 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
570 lnet_process_id_t *peer, lnet_nid_t *self);
571 extern void request_out_callback (lnet_event_t *ev);
572 extern void reply_in_callback(lnet_event_t *ev);
573 extern void client_bulk_callback (lnet_event_t *ev);
574 extern void request_in_callback(lnet_event_t *ev);
575 extern void reply_out_callback(lnet_event_t *ev);
576 extern void server_bulk_callback (lnet_event_t *ev);
578 /* ptlrpc/connection.c */
579 void ptlrpc_dump_connections(void);
580 void ptlrpc_readdress_connection(struct ptlrpc_connection *, struct obd_uuid *);
581 struct ptlrpc_connection *ptlrpc_get_connection(lnet_process_id_t peer,
582 lnet_nid_t self, struct obd_uuid *uuid);
583 int ptlrpc_put_connection(struct ptlrpc_connection *c);
584 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
585 void ptlrpc_init_connection(void);
586 void ptlrpc_cleanup_connection(void);
587 extern lnet_pid_t ptl_get_pid(void);
589 /* ptlrpc/niobuf.c */
590 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
591 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
592 int ptlrpc_register_bulk(struct ptlrpc_request *req);
593 void ptlrpc_unregister_bulk (struct ptlrpc_request *req);
595 static inline int ptlrpc_bulk_active (struct ptlrpc_bulk_desc *desc)
600 spin_lock_irqsave (&desc->bd_lock, flags);
601 rc = desc->bd_network_rw;
602 spin_unlock_irqrestore (&desc->bd_lock, flags);
606 int ptlrpc_send_reply(struct ptlrpc_request *req, int);
607 int ptlrpc_reply(struct ptlrpc_request *req);
608 int ptlrpc_error(struct ptlrpc_request *req);
609 void ptlrpc_resend_req(struct ptlrpc_request *request);
610 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
611 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
613 /* ptlrpc/client.c */
614 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
615 struct ptlrpc_client *);
616 void ptlrpc_cleanup_client(struct obd_import *imp);
617 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
620 ptlrpc_client_receiving_reply (struct ptlrpc_request *req)
625 spin_lock_irqsave(&req->rq_lock, flags);
626 rc = req->rq_receiving_reply;
627 spin_unlock_irqrestore(&req->rq_lock, flags);
632 ptlrpc_client_replied (struct ptlrpc_request *req)
637 spin_lock_irqsave(&req->rq_lock, flags);
638 rc = req->rq_replied;
639 spin_unlock_irqrestore(&req->rq_lock, flags);
644 ptlrpc_wake_client_req (struct ptlrpc_request *req)
646 if (req->rq_set == NULL)
647 wake_up(&req->rq_reply_waitq);
649 wake_up(&req->rq_set->set_waitq);
652 int ptlrpc_queue_wait(struct ptlrpc_request *req);
653 int ptlrpc_replay_req(struct ptlrpc_request *req);
654 void ptlrpc_unregister_reply(struct ptlrpc_request *req);
655 void ptlrpc_restart_req(struct ptlrpc_request *req);
656 void ptlrpc_abort_inflight(struct obd_import *imp);
658 struct ptlrpc_request_set *ptlrpc_prep_set(void);
659 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
660 int ptlrpc_check_set(struct ptlrpc_request_set *set);
661 int ptlrpc_set_wait(struct ptlrpc_request_set *);
662 int ptlrpc_expired_set(void *data);
663 void ptlrpc_interrupted_set(void *data);
664 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
665 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
666 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
667 void ptlrpc_set_add_new_req(struct ptlrpc_request_set *,
668 struct ptlrpc_request *);
670 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
671 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
672 struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int, int,
673 void (*populate_pool)(struct ptlrpc_request_pool *, int));
674 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode,
675 int count, int *lengths, char **bufs);
676 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp, __u32 version, int opcode,
677 int count, int *lengths, char **bufs,
678 struct ptlrpc_request_pool *pool);
679 void ptlrpc_free_req(struct ptlrpc_request *request);
680 void ptlrpc_req_finished(struct ptlrpc_request *request);
681 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
682 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
683 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
684 int npages, int type, int portal);
685 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
686 int npages, int type, int portal);
687 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
688 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
689 struct page *page, int pageoffset, int len);
690 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
691 struct obd_import *imp);
692 __u64 ptlrpc_next_xid(void);
693 __u64 ptlrpc_sample_next_xid(void);
694 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
696 /* ptlrpc/service.c */
697 void ptlrpc_save_lock (struct ptlrpc_request *req,
698 struct lustre_handle *lock, int mode);
699 void ptlrpc_commit_replies (struct obd_device *obd);
700 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
701 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
703 int req_portal, int rep_portal,
704 int watchdog_timeout, /* in ms */
705 svc_handler_t, char *name,
706 struct proc_dir_entry *proc_entry,
707 svcreq_printfn_t, int num_threads);
708 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
710 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc,
712 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc,
714 int ptlrpc_unregister_service(struct ptlrpc_service *service);
715 int liblustre_check_services (void *arg);
716 void ptlrpc_daemonize(void);
717 int ptlrpc_service_health_check(struct ptlrpc_service *);
720 struct ptlrpc_svc_data {
722 struct ptlrpc_service *svc;
723 struct ptlrpc_thread *thread;
724 struct obd_device *dev;
727 /* ptlrpc/import.c */
728 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
729 int ptlrpc_init_import(struct obd_import *imp);
730 int ptlrpc_disconnect_import(struct obd_import *imp);
731 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
733 /* ptlrpc/pack_generic.c */
734 int lustre_msg_swabbed(struct lustre_msg *msg);
735 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
736 int lustre_pack_request(struct ptlrpc_request *, int count, const int *lens,
738 int lustre_pack_reply(struct ptlrpc_request *, int count, const int *lens,
740 void lustre_shrink_reply(struct ptlrpc_request *req,
741 int segment, unsigned int newlen, int move_data);
742 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
743 int lustre_msg_size(int count, const int *lengths);
744 int lustre_unpack_msg(struct lustre_msg *m, int len);
745 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
746 int lustre_msg_buflen(struct lustre_msg *m, int n);
747 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
748 void *lustre_swab_buf(struct lustre_msg *, int n, int minlen, void *swabber);
749 void *lustre_swab_reqbuf (struct ptlrpc_request *req, int n, int minlen,
751 void *lustre_swab_repbuf (struct ptlrpc_request *req, int n, int minlen,
755 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
757 LASSERT(atomic_read(&rs->rs_refcount) > 0);
758 atomic_inc(&rs->rs_refcount);
762 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
764 LASSERT(atomic_read(&rs->rs_refcount) > 0);
765 if (atomic_dec_and_test(&rs->rs_refcount))
766 lustre_free_reply_state(rs);
769 /* ldlm/ldlm_lib.c */
770 int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf);
771 int client_obd_cleanup(struct obd_device * obddev);
772 int client_connect_import(struct lustre_handle *conn, struct obd_device *obd,
773 struct obd_uuid *cluuid, struct obd_connect_data *);
774 int client_disconnect_export(struct obd_export *exp);
775 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
777 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
778 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
780 /* ptlrpc/pinger.c */
781 int ptlrpc_pinger_add_import(struct obd_import *imp);
782 int ptlrpc_pinger_del_import(struct obd_import *imp);
784 /* ptlrpc/ptlrpcd.c */
785 void ptlrpcd_wake(struct ptlrpc_request *req);
786 void ptlrpcd_add_req(struct ptlrpc_request *req);
787 int ptlrpcd_addref(void);
788 void ptlrpcd_decref(void);
790 /* ptlrpc/lproc_ptlrpc.c */
792 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
793 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
795 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
796 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
799 /* ptlrpc/llog_server.c */
800 int llog_origin_handle_create(struct ptlrpc_request *req);
801 int llog_origin_handle_destroy(struct ptlrpc_request *req);
802 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
803 int llog_origin_handle_next_block(struct ptlrpc_request *req);
804 int llog_origin_handle_read_header(struct ptlrpc_request *req);
805 int llog_origin_handle_close(struct ptlrpc_request *req);
806 int llog_origin_handle_cancel(struct ptlrpc_request *req);
807 int llog_catinfo(struct ptlrpc_request *req);
809 /* ptlrpc/llog_client.c */
810 extern struct llog_operations llog_client_ops;