1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
40 #if defined(__linux__)
41 #include <linux/lustre_net.h>
42 #elif defined(__APPLE__)
43 #include <darwin/lustre_net.h>
44 #elif defined(__WINNT__)
45 #include <winnt/lustre_net.h>
47 #error Unsupported operating system.
50 #include <libcfs/kp30.h>
52 #include <lnet/lnet.h>
53 #include <lustre/lustre_idl.h>
54 #include <lustre_ha.h>
55 #include <lustre_import.h>
56 #include <lprocfs_status.h>
58 #include <obd_support.h>
60 /* MD flags we _always_ use */
61 #define PTLRPC_MD_OPTIONS 0
63 /* Define maxima for bulk I/O
64 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
65 * these limits are system wide and not interface-local. */
66 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
67 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
68 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
70 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
72 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
73 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
75 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
76 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
78 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
79 # error "PTLRPC_MAX_BRW_SIZE too big"
81 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
82 # error "PTLRPC_MAX_BRW_PAGES too big"
84 #endif /* __KERNEL__ */
86 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
88 #define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
90 /* The following constants determine how memory is used to buffer incoming
93 * ?_NBUFS # buffers to allocate when growing the pool
94 * ?_BUFSIZE # bytes in a single request buffer
95 * ?_MAXREQSIZE # maximum request service will receive
97 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
98 * of ?_NBUFS is added to the pool.
100 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
101 * considered full when less than ?_MAXREQSIZE is left in them.
104 #define LDLM_THREADS_AUTO_MIN (2)
105 #define LDLM_THREADS_AUTO_MAX min_t(unsigned, num_online_cpus()*num_online_cpus()*32, 128)
106 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
107 #define LDLM_NBUFS (64 * num_online_cpus())
108 #define LDLM_BUFSIZE (8 * 1024)
109 #define LDLM_MAXREQSIZE (5 * 1024)
110 #define LDLM_MAXREPSIZE (1024)
112 /* Absolute limits */
113 #define MDS_THREADS_MIN 2
114 #define MDS_THREADS_MAX 512
115 #define MDS_THREADS_MIN_READPAGE 2
116 #define MDS_NBUFS (64 * num_online_cpus())
117 #define MDS_BUFSIZE (8 * 1024)
118 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
119 * path name length = PATH_MAX = 4096
120 * LOV MD size max = EA_MAX = 4000
121 * symlink: FNAME_MAX + PATH_MAX <- largest
122 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
123 * rename: FNAME_MAX + FNAME_MAX
124 * open: FNAME_MAX + EA_MAX
126 * MDS_MAXREQSIZE ~= 4736 bytes =
127 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
128 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
129 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
130 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
132 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
133 * except in the open case where there are a large number of OSTs in a LOV.
135 #define MDS_MAXREQSIZE (5 * 1024)
136 #define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
138 #define MGS_THREADS_AUTO_MIN 2
139 #define MGS_THREADS_AUTO_MAX 32
140 #define MGS_NBUFS (64 * num_online_cpus())
141 #define MGS_BUFSIZE (8 * 1024)
142 #define MGS_MAXREQSIZE (8 * 1024)
143 #define MGS_MAXREPSIZE (9 * 1024)
145 /* Absolute limits */
146 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
147 #define OSS_THREADS_MAX 512
148 #define OST_NBUFS (64 * num_online_cpus())
149 #define OST_BUFSIZE (8 * 1024)
150 /* OST_MAXREQSIZE ~= 4768 bytes =
151 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
153 * - single object with 16 pages is 512 bytes
154 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
156 #define OST_MAXREQSIZE (5 * 1024)
157 #define OST_MAXREPSIZE (9 * 1024)
159 /* Macro to hide a typecast. */
160 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
162 struct ptlrpc_connection {
163 struct hlist_node c_hash;
165 lnet_process_id_t c_peer;
166 struct obd_uuid c_remote_uuid;
170 struct ptlrpc_client {
171 __u32 cli_request_portal;
172 __u32 cli_reply_portal;
176 /* state flags of requests */
177 /* XXX only ones left are those used by the bulk descs as well! */
178 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
179 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
181 #define REQ_MAX_ACK_LOCKS 8
183 union ptlrpc_async_args {
184 /* Scratchpad for passing args to completion interpreter. Users
185 * cast to the struct of their choosing, and LASSERT that this is
186 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
187 * a pointer to it here. The pointer_arg ensures this struct is at
188 * least big enough for that. */
189 void *pointer_arg[9];
193 struct ptlrpc_request_set;
194 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
196 struct ptlrpc_request_set {
197 atomic_t set_remaining; /* # uncompleted requests */
198 cfs_waitq_t set_waitq;
199 cfs_waitq_t *set_wakeup_ptr;
200 struct list_head set_requests;
201 struct list_head set_cblist; /* list of completion callbacks */
202 set_interpreter_func set_interpret; /* completion callback */
203 void *set_arg; /* completion context */
204 /* locked so that any old caller can communicate requests to
205 * the set holder who can then fold them into the lock-free set */
206 spinlock_t set_new_req_lock;
207 struct list_head set_new_requests;
210 struct ptlrpc_set_cbdata {
211 struct list_head psc_item;
212 set_interpreter_func psc_interpret;
216 struct ptlrpc_bulk_desc;
219 * ptlrpc callback & work item stuff
221 struct ptlrpc_cb_id {
222 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
223 void *cbid_arg; /* additional arg */
226 #define RS_MAX_LOCKS 4
229 struct ptlrpc_reply_state {
230 struct ptlrpc_cb_id rs_cb_id;
231 struct list_head rs_list;
232 struct list_head rs_exp_list;
233 struct list_head rs_obd_list;
235 struct list_head rs_debug_list;
237 /* updates to following flag serialised by srv_request_lock */
238 unsigned long rs_difficult:1; /* ACK/commit stuff */
239 unsigned long rs_scheduled:1; /* being handled? */
240 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
241 unsigned long rs_handled:1; /* been handled yet? */
242 unsigned long rs_on_net:1; /* reply_out_callback pending? */
243 unsigned long rs_prealloc:1; /* rs from prealloc list */
248 struct obd_export *rs_export;
249 struct ptlrpc_service *rs_service;
250 lnet_handle_md_t rs_md_h;
251 atomic_t rs_refcount;
253 /* locks awaiting client reply ACK */
255 struct lustre_handle rs_locks[RS_MAX_LOCKS];
256 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
257 /* last member: variable sized reply message */
258 struct lustre_msg *rs_msg;
261 struct ptlrpc_thread;
264 RQ_PHASE_NEW = 0xebc0de00,
265 RQ_PHASE_RPC = 0xebc0de01,
266 RQ_PHASE_BULK = 0xebc0de02,
267 RQ_PHASE_INTERPRET = 0xebc0de03,
268 RQ_PHASE_COMPLETE = 0xebc0de04,
269 RQ_PHASE_UNREGISTERING = 0xebc0de05,
270 RQ_PHASE_UNDEFINED = 0xebc0de06
273 struct ptlrpc_request_pool {
275 struct list_head prp_req_list; /* list of ptlrpc_request structs */
277 void (*prp_populate)(struct ptlrpc_request_pool *, int);
282 struct ptlrpc_hpreq_ops {
284 * Check if the lock handle of the given lock is the same as
285 * taken from the request.
287 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
289 * Check if the request is a high priority one.
291 int (*hpreq_check)(struct ptlrpc_request *);
294 struct ptlrpc_request {
295 int rq_type; /* one of PTL_RPC_MSG_* */
296 struct list_head rq_list;
297 struct list_head rq_timed_list; /* server-side early replies */
298 struct list_head rq_history_list; /* server-side history */
299 struct list_head rq_exp_list; /* server-side per-export list */
300 struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
302 __u64 rq_history_seq; /* history sequence # */
303 /* the index of service's srv_at_array into which request is linked */
307 /* client-side flags are serialized by rq_lock */
308 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
309 rq_timedout:1, rq_resend:1, rq_restart:1,
311 * when ->rq_replay is set, request is kept by the client even
312 * after server commits corresponding transaction. This is
313 * used for operations that require sequence of multiple
314 * requests to be replayed. The only example currently is file
315 * open/close. When last request in such a sequence is
316 * committed, ->rq_replay is cleared on all requests in the
320 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
321 rq_no_delay:1, rq_net_err:1, rq_early:1, rq_must_unlink:1,
322 /* server-side flags */
323 rq_packed_final:1, /* packed final reply */
324 rq_hp:1, /* high priority RPC */
325 rq_at_linked:1, /* link into service's srv_at_array */
326 rq_reply_truncate:1, /* reply is truncated */
327 rq_fake:1, /* fake request - just for timeout only */
328 /* the request is queued to replay during recovery */
330 /* whether the "rq_set" is a valid one */
332 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
333 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
334 atomic_t rq_refcount; /* client-side refcount for SENT race,
335 server-side refcounf for multiple replies */
337 struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
339 int rq_request_portal; /* XXX FIXME bug 249 */
340 int rq_reply_portal; /* XXX FIXME bug 249 */
342 int rq_nob_received; /* client-side:
343 * !rq_truncate : # reply bytes actually received,
344 * rq_truncate : required repbuf_len for resend */
346 struct lustre_msg *rq_reqmsg;
349 struct lustre_msg *rq_repbuf; /* client only, buf may be bigger than msg */
350 struct lustre_msg *rq_repmsg;
353 struct list_head rq_replay_list;
355 __u32 rq_req_swab_mask;
356 __u32 rq_rep_swab_mask;
358 int rq_import_generation;
359 enum lustre_imp_state rq_send_state;
361 int rq_early_count; /* how many early replies (for stats) */
363 /* client+server request */
364 lnet_handle_md_t rq_req_md_h;
365 struct ptlrpc_cb_id rq_req_cbid;
368 struct timeval rq_arrival_time; /* request arrival time */
369 struct ptlrpc_reply_state *rq_reply_state; /* separated reply state */
370 struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/
372 __u32 rq_uid; /* peer uid, used in MDS only */
375 /* client-only incoming reply */
376 lnet_handle_md_t rq_reply_md_h;
377 cfs_waitq_t rq_reply_waitq;
378 struct ptlrpc_cb_id rq_reply_cbid;
381 lnet_process_id_t rq_peer;
382 struct obd_export *rq_export;
383 struct obd_import *rq_import;
385 void (*rq_replay_cb)(struct ptlrpc_request *);
386 void (*rq_commit_cb)(struct ptlrpc_request *);
389 struct ptlrpc_bulk_desc *rq_bulk; /* client side bulk */
390 /* client outgoing req */
391 time_t rq_sent; /* when request sent, seconds,
392 * or time when request should
394 volatile time_t rq_deadline; /* when request must finish. volatile
395 so that servers' early reply updates to the deadline aren't
396 kept in per-cpu cache */
397 time_t rq_reply_deadline; /* when req reply unlink must finish. */
398 time_t rq_bulk_deadline; /* when req bulk unlink must finish. */
399 int rq_timeout; /* service time estimate (secs) */
402 struct list_head rq_set_chain;
403 cfs_waitq_t rq_set_waitq;
404 struct ptlrpc_request_set *rq_set;
405 int (*rq_interpret_reply)(struct ptlrpc_request *req, void *data,
406 int rc); /* async interpret handler */
407 union ptlrpc_async_args rq_async_args; /* Async completion context */
408 struct ptlrpc_request_pool *rq_pool; /* Pool if request from
412 static inline int ptlrpc_req_interpret(struct ptlrpc_request *req, int rc)
414 if (req->rq_interpret_reply != NULL) {
415 int (*interpreter)(struct ptlrpc_request *, void *, int) =
416 req->rq_interpret_reply;
418 req->rq_status = interpreter(req, &req->rq_async_args, rc);
419 return req->rq_status;
424 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
426 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
427 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
428 req->rq_req_swab_mask |= 1 << index;
431 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
433 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
434 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
435 req->rq_rep_swab_mask |= 1 << index;
438 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
440 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
441 return req->rq_req_swab_mask & (1 << index);
444 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
446 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
447 return req->rq_rep_swab_mask & (1 << index);
450 static inline int lustre_req_need_swab(struct ptlrpc_request *req)
452 return req->rq_req_swab_mask & (1 << MSG_PTLRPC_HEADER_OFF);
455 static inline int lustre_rep_need_swab(struct ptlrpc_request *req)
457 return req->rq_rep_swab_mask & (1 << MSG_PTLRPC_HEADER_OFF);
460 static inline const char *
461 ptlrpc_phase2str(enum rq_phase phase)
470 case RQ_PHASE_INTERPRET:
472 case RQ_PHASE_COMPLETE:
474 case RQ_PHASE_UNREGISTERING:
475 return "Unregistering";
481 static inline const char *
482 ptlrpc_rqphase2str(struct ptlrpc_request *req)
484 return ptlrpc_phase2str(req->rq_phase);
487 /* Spare the preprocessor, spoil the bugs. */
488 #define FLAG(field, str) (field ? str : "")
490 #define DEBUG_REQ_FLAGS(req) \
491 ptlrpc_rqphase2str(req), \
492 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
493 FLAG(req->rq_err, "E"), \
494 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
495 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
496 FLAG(req->rq_no_resend, "N"), \
497 FLAG(req->rq_waiting, "W"), FLAG(req->rq_hp, "H")
499 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s"
501 void _debug_req(struct ptlrpc_request *req, __u32 mask,
502 struct libcfs_debug_msg_data *data, const char *fmt, ...)
503 __attribute__ ((format (printf, 4, 5)));
505 #define debug_req(cdls, level, req, file, func, line, fmt, a...) \
509 if (((level) & D_CANTMASK) != 0 || \
510 ((libcfs_debug & (level)) != 0 && \
511 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
512 static struct libcfs_debug_msg_data _req_dbg_data = \
513 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
514 _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \
518 /* for most callers (level is a constant) this is resolved at compile time */
519 #define DEBUG_REQ(level, req, fmt, args...) \
521 if ((level) & (D_ERROR | D_WARNING)) { \
522 static cfs_debug_limit_state_t cdls; \
523 debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \
524 "@@@ "fmt" ", ## args); \
526 debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \
527 "@@@ "fmt" ", ## args); \
530 struct ptlrpc_bulk_page {
531 struct list_head bp_link;
533 int bp_pageoffset; /* offset within a page */
534 struct page *bp_page;
537 #define BULK_GET_SOURCE 0
538 #define BULK_PUT_SINK 1
539 #define BULK_GET_SINK 2
540 #define BULK_PUT_SOURCE 3
542 struct ptlrpc_bulk_desc {
543 unsigned long bd_success:1; /* completed successfully */
544 unsigned long bd_network_rw:1; /* accessible to the network */
545 unsigned long bd_type:2; /* {put,get}{source,sink} */
546 unsigned long bd_registered:1; /* client side */
547 spinlock_t bd_lock; /* serialise with callback */
548 int bd_import_generation;
549 struct obd_export *bd_export;
550 struct obd_import *bd_import;
552 struct ptlrpc_request *bd_req; /* associated request */
553 cfs_waitq_t bd_waitq; /* server side only WQ */
554 int bd_iov_count; /* # entries in bd_iov */
555 int bd_max_iov; /* allocated size of bd_iov */
556 int bd_nob; /* # bytes covered */
557 int bd_nob_transferred; /* # bytes GOT/PUT */
561 struct ptlrpc_cb_id bd_cbid; /* network callback info */
562 lnet_handle_md_t bd_md_h; /* associated MD */
563 lnet_nid_t bd_sender; /* stash event::sender */
565 #if defined(__KERNEL__)
566 lnet_kiov_t bd_iov[0];
568 lnet_md_iovec_t bd_iov[0];
572 struct ptlrpc_thread {
574 struct list_head t_link; /* active threads in svc->srv_threads */
576 void *t_data; /* thread-private data (preallocated memory) */
579 unsigned int t_id; /* service thread index, from ptlrpc_start_threads */
580 struct lc_watchdog *t_watchdog; /* put watchdog in the structure per
582 struct ptlrpc_service *t_svc; /* the svc this thread belonged to
584 cfs_waitq_t t_ctl_waitq;
587 struct ptlrpc_request_buffer_desc {
588 struct list_head rqbd_list;
589 struct list_head rqbd_reqs;
590 struct ptlrpc_service *rqbd_service;
591 lnet_handle_md_t rqbd_md_h;
594 struct ptlrpc_cb_id rqbd_cbid;
595 struct ptlrpc_request rqbd_req;
598 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
599 typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
600 typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
602 #define PTLRPC_SVC_HP_RATIO 10
604 struct ptlrpc_service {
605 struct list_head srv_list; /* chain thru all services */
606 int srv_max_req_size; /* biggest request to receive */
607 int srv_max_reply_size; /* biggest reply to send */
608 int srv_buf_size; /* size of individual buffers */
609 int srv_nbuf_per_group; /* # buffers to allocate in 1 group */
610 int srv_nbufs; /* total # req buffer descs allocated */
611 int srv_threads_min; /* threads to start at SOW */
612 int srv_threads_max; /* thread upper limit */
613 int srv_threads_started; /* index of last started thread */
614 int srv_threads_running; /* # running threads */
615 int srv_n_difficult_replies; /* # 'difficult' replies */
616 int srv_n_active_reqs; /* # reqs being served */
617 int srv_n_hpreq; /* # HPreqs being served */
618 cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */
619 int srv_watchdog_factor; /* soft watchdog timeout mutiplier */
620 unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
621 unsigned srv_at_check:1; /* check early replies */
622 cfs_time_t srv_at_checktime; /* debug */
624 __u32 srv_req_portal;
625 __u32 srv_rep_portal;
628 struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
629 spinlock_t srv_at_lock;
630 struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
631 cfs_timer_t srv_at_timer; /* early reply timer */
633 int srv_n_queued_reqs; /* # reqs in either of the queues below */
634 int srv_hpreq_count; /* # hp requests handled */
635 int srv_hpreq_ratio; /* # hp per lp reqs to handle */
636 struct list_head srv_req_in_queue; /* incoming reqs */
637 struct list_head srv_request_queue; /* reqs waiting for service */
638 struct list_head srv_request_hpq; /* high priority queue */
640 struct list_head srv_request_history; /* request history */
641 __u64 srv_request_seq; /* next request sequence # */
642 __u64 srv_request_max_cull_seq; /* highest seq culled from history */
643 svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
645 struct list_head srv_idle_rqbds; /* request buffers to be reposted */
646 struct list_head srv_active_rqbds; /* req buffers receiving */
647 struct list_head srv_history_rqbds; /* request buffer history */
648 int srv_nrqbd_receiving; /* # posted request buffers */
649 int srv_n_history_rqbds; /* # request buffers in history */
650 int srv_max_history_rqbds;/* max # request buffers in history */
652 atomic_t srv_outstanding_replies;
653 struct list_head srv_active_replies; /* all the active replies */
654 struct list_head srv_reply_queue; /* replies waiting for service */
656 cfs_waitq_t srv_waitq; /* all threads sleep on this. This
657 * wait-queue is signalled when new
658 * incoming request arrives and when
659 * difficult reply has to be handled. */
661 struct list_head srv_threads; /* service thread list */
662 svc_handler_t srv_handler;
663 svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
665 char *srv_name; /* only statically allocated strings here; we don't clean them */
666 char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
670 cfs_proc_dir_entry_t *srv_procroot;
671 struct lprocfs_stats *srv_stats;
673 /* List of free reply_states */
674 struct list_head srv_free_rs_list;
675 /* waitq to run, when adding stuff to srv_free_rs_list */
676 cfs_waitq_t srv_free_rs_waitq;
679 * if non-NULL called during thread creation (ptlrpc_start_thread())
680 * to initialize service specific per-thread state.
682 int (*srv_init)(struct ptlrpc_thread *thread);
684 * if non-NULL called during thread shutdown (ptlrpc_main()) to
685 * destruct state created by ->srv_init().
687 void (*srv_done)(struct ptlrpc_thread *thread);
689 //struct ptlrpc_srv_ni srv_interfaces[0];
694 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
696 unsigned long pc_flags;
698 * Thread lock protecting structure fields.
704 struct completion pc_starting;
708 struct completion pc_finishing;
710 * Thread requests set.
712 struct ptlrpc_request_set *pc_set;
714 * Thread name used in cfs_daemonize()
719 * Async rpcs flag to make sure that ptlrpcd_check() is called only
724 * Currently not used.
728 * User-space async rpcs callback.
730 void *pc_wait_callback;
732 * User-space check idle rpcs callback.
734 void *pc_idle_callback;
738 /* Bits for pc_flags */
739 enum ptlrpcd_ctl_flags {
741 * Ptlrpc thread start flag.
745 * Ptlrpc thread stop flag.
749 * Ptlrpc thread force flag (only stop force so far).
750 * This will cause aborting any inflight rpcs handled
751 * by thread if LIOD_STOP is specified.
756 /* ptlrpc/events.c */
757 extern lnet_handle_eq_t ptlrpc_eq_h;
758 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
759 lnet_process_id_t *peer, lnet_nid_t *self);
760 extern void request_out_callback (lnet_event_t *ev);
761 extern void reply_in_callback(lnet_event_t *ev);
762 extern void client_bulk_callback (lnet_event_t *ev);
763 extern void request_in_callback(lnet_event_t *ev);
764 extern void reply_out_callback(lnet_event_t *ev);
765 extern void server_bulk_callback (lnet_event_t *ev);
767 /* ptlrpc/connection.c */
768 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
770 struct obd_uuid *uuid);
771 int ptlrpc_connection_put(struct ptlrpc_connection *c);
772 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
773 int ptlrpc_connection_init(void);
774 void ptlrpc_connection_fini(void);
775 extern lnet_pid_t ptl_get_pid(void);
777 /* ptlrpc/niobuf.c */
778 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
779 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
780 int ptlrpc_register_bulk(struct ptlrpc_request *req);
781 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
783 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
787 LASSERT(desc != NULL);
789 spin_lock(&desc->bd_lock);
790 rc = desc->bd_network_rw;
791 spin_unlock(&desc->bd_lock);
795 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
797 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
800 LASSERT(req != NULL);
802 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
803 req->rq_bulk_deadline > cfs_time_current_sec())
809 spin_lock(&desc->bd_lock);
810 rc = desc->bd_network_rw;
811 spin_unlock(&desc->bd_lock);
815 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
816 #define PTLRPC_REPLY_EARLY 0x02
817 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
818 int ptlrpc_reply(struct ptlrpc_request *req);
819 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
820 int ptlrpc_error(struct ptlrpc_request *req);
821 void ptlrpc_resend_req(struct ptlrpc_request *request);
822 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
823 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
824 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
826 /* ptlrpc/client.c */
827 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
828 struct ptlrpc_client *);
829 void ptlrpc_cleanup_client(struct obd_import *imp);
830 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
832 int ptlrpc_queue_wait(struct ptlrpc_request *req);
833 int ptlrpc_replay_req(struct ptlrpc_request *req);
834 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
835 void ptlrpc_restart_req(struct ptlrpc_request *req);
836 void ptlrpc_abort_inflight(struct obd_import *imp);
837 void ptlrpc_cleanup_imp(struct obd_import *imp);
838 void ptlrpc_evict_imp(struct obd_import *imp);
839 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
841 struct ptlrpc_request_set *ptlrpc_prep_set(void);
842 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
843 set_interpreter_func fn, void *data);
844 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
845 int ptlrpc_check_set(struct ptlrpc_request_set *set);
846 int ptlrpc_set_wait(struct ptlrpc_request_set *);
847 int ptlrpc_expired_set(void *data);
848 void ptlrpc_interrupted_set(void *data);
849 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
850 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
851 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
852 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
853 struct ptlrpc_request *req);
855 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
856 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
858 struct ptlrpc_request_pool *
859 ptlrpc_init_rq_pool(int, int,
860 void (*populate_pool)(struct ptlrpc_request_pool *, int));
862 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
863 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
864 unsigned int timeout,
865 int (*interpreter)(struct ptlrpc_request *,
867 void ptlrpc_fakereq_finished(struct ptlrpc_request *req);
869 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
870 int opcode, int count, __u32 *lengths,
872 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
873 __u32 version, int opcode,
874 int count, __u32 *lengths, char **bufs,
875 struct ptlrpc_request_pool *pool);
876 void ptlrpc_req_finished(struct ptlrpc_request *request);
877 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
878 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
879 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
880 int npages, int type, int portal);
881 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
882 int npages, int type, int portal);
883 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
884 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
885 cfs_page_t *page, int pageoffset, int len);
886 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
887 struct obd_import *imp);
888 __u64 ptlrpc_next_xid(void);
889 __u64 ptlrpc_sample_next_xid(void);
890 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
892 /* ptlrpc/service.c */
893 void ptlrpc_save_lock (struct ptlrpc_request *req,
894 struct lustre_handle *lock, int mode);
895 void ptlrpc_commit_replies (struct obd_export *exp);
896 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
897 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
899 int req_portal, int rep_portal,
901 svc_handler_t, char *name,
902 cfs_proc_dir_entry_t *proc_entry,
904 int min_threads, int max_threads,
905 char *threadname, svc_hpreq_handler_t);
906 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
908 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc);
909 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc);
910 int ptlrpc_unregister_service(struct ptlrpc_service *service);
911 int liblustre_check_services (void *arg);
912 void ptlrpc_daemonize(char *name);
913 int ptlrpc_service_health_check(struct ptlrpc_service *);
914 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
915 void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
916 void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
917 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
920 struct ptlrpc_svc_data {
922 struct ptlrpc_service *svc;
923 struct ptlrpc_thread *thread;
924 struct obd_device *dev;
927 /* ptlrpc/import.c */
928 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
929 int ptlrpc_init_import(struct obd_import *imp);
930 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
931 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
932 void ptlrpc_import_setasync(struct obd_import *imp, int count);
933 int ptlrpc_reconnect_import(struct obd_import *imp);
935 /* ptlrpc/pack_generic.c */
936 int lustre_msg_swabbed(struct lustre_msg *msg);
937 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
938 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
939 __u32 *lens, char **bufs);
940 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
942 #define LPRFL_EARLY_REPLY 1
943 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
944 char **bufs, int flags);
945 void lustre_shrink_reply(struct ptlrpc_request *req, int segment,
946 unsigned int newlen, int move_data);
947 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
948 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
949 int lustre_packed_msg_size(struct lustre_msg *msg);
950 int lustre_msg_early_size(struct ptlrpc_request *req);
951 int lustre_unpack_msg(struct lustre_msg *m, int len);
952 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
953 int lustre_msg_buflen(struct lustre_msg *m, int n);
954 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
955 int lustre_msg_bufcount(struct lustre_msg *m);
956 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
957 void *lustre_swab_reqbuf(struct ptlrpc_request *req, int n, int minlen,
959 void *lustre_swab_repbuf(struct ptlrpc_request *req, int n, int minlen,
961 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
962 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
963 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
964 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
965 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
966 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
967 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
968 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
969 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
970 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
971 __u32 lustre_msg_get_type(struct lustre_msg *msg);
972 __u32 lustre_msg_get_version(struct lustre_msg *msg);
973 void lustre_msg_add_version(struct lustre_msg *msg, int version);
974 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
975 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
976 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
977 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
978 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
979 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
980 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
981 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
982 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
983 int lustre_msg_get_status(struct lustre_msg *msg);
984 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
985 int lustre_msg_is_v1(struct lustre_msg *msg);
986 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
987 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
988 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
989 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
990 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
991 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
992 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
993 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
994 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
995 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
996 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
997 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
998 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
999 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1000 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1001 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1002 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1005 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1007 if (req->rq_phase == new_phase)
1010 if (new_phase == RQ_PHASE_UNREGISTERING) {
1011 req->rq_next_phase = req->rq_phase;
1013 atomic_inc(&req->rq_import->imp_unregistering);
1016 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1018 atomic_dec(&req->rq_import->imp_unregistering);
1021 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
1022 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1024 req->rq_phase = new_phase;
1028 ptlrpc_client_early(struct ptlrpc_request *req)
1030 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1031 req->rq_reply_deadline > cfs_time_current_sec())
1033 return req->rq_early;
1037 ptlrpc_client_replied(struct ptlrpc_request *req)
1039 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1040 req->rq_reply_deadline > cfs_time_current_sec())
1042 return req->rq_replied;
1046 ptlrpc_client_recv(struct ptlrpc_request *req)
1048 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1049 req->rq_reply_deadline > cfs_time_current_sec())
1051 return req->rq_receiving_reply;
1055 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1059 spin_lock(&req->rq_lock);
1060 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1061 req->rq_reply_deadline > cfs_time_current_sec()) {
1062 spin_unlock(&req->rq_lock);
1065 rc = req->rq_receiving_reply || req->rq_must_unlink;
1066 spin_unlock(&req->rq_lock);
1071 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1073 if (req->rq_set == NULL)
1074 cfs_waitq_signal(&req->rq_reply_waitq);
1076 cfs_waitq_signal(&req->rq_set->set_waitq);
1080 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1082 LASSERT(atomic_read(&rs->rs_refcount) > 0);
1083 atomic_inc(&rs->rs_refcount);
1087 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1089 LASSERT(atomic_read(&rs->rs_refcount) > 0);
1090 if (atomic_dec_and_test(&rs->rs_refcount))
1091 lustre_free_reply_state(rs);
1094 /* Should only be called once per req */
1095 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1097 if (req->rq_reply_state == NULL)
1098 return; /* shouldn't occur */
1099 ptlrpc_rs_decref(req->rq_reply_state);
1100 req->rq_reply_state = NULL;
1101 req->rq_repmsg = NULL;
1104 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1106 return lustre_msg_get_magic(req->rq_reqmsg);
1109 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1111 switch (req->rq_reqmsg->lm_magic) {
1112 case LUSTRE_MSG_MAGIC_V1:
1113 CERROR("function not supported for lustre_msg V1!\n");
1115 case LUSTRE_MSG_MAGIC_V2:
1116 return req->rq_reqmsg->lm_repsize;
1118 LASSERTF(0, "incorrect message magic: %08x\n",
1119 req->rq_reqmsg->lm_magic);
1125 ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1127 int size = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1129 req->rq_replen = size + lustre_msg_early_size(req);
1130 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1131 req->rq_reqmsg->lm_repsize = size;
1134 /* ldlm/ldlm_lib.c */
1135 int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf);
1136 int client_obd_cleanup(struct obd_device * obddev);
1137 int client_connect_import(struct lustre_handle *conn, struct obd_device *obd,
1138 struct obd_uuid *cluuid, struct obd_connect_data *,
1140 int client_disconnect_export(struct obd_export *exp);
1141 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1143 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1144 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1145 int server_disconnect_export(struct obd_export *exp);
1147 /* ptlrpc/pinger.c */
1148 enum timeout_event {
1151 struct timeout_item;
1152 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1153 int ptlrpc_pinger_add_import(struct obd_import *imp);
1154 int ptlrpc_pinger_del_import(struct obd_import *imp);
1155 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1156 timeout_cb_t cb, void *data,
1157 struct list_head *obd_list);
1158 int ptlrpc_del_timeout_client(struct list_head *obd_list,
1159 enum timeout_event event);
1160 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
1161 int ptlrpc_obd_ping(struct obd_device *obd);
1163 void ping_evictor_start(void);
1164 void ping_evictor_stop(void);
1165 int ping_evictor_wake(struct obd_export *exp);
1167 #define ping_evictor_start() do {} while (0)
1168 #define ping_evictor_stop() do {} while (0)
1169 static inline int ping_evictor_wake(struct obd_export *exp)
1175 /* ptlrpc/ptlrpcd.c */
1176 int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc);
1177 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
1178 void ptlrpcd_wake(struct ptlrpc_request *req);
1179 int ptlrpcd_add_req(struct ptlrpc_request *req);
1180 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
1181 int ptlrpcd_addref(void);
1182 void ptlrpcd_decref(void);
1184 /* ptlrpc/lproc_ptlrpc.c */
1185 const char* ll_opcode2str(__u32 opcode);
1187 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
1188 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
1189 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
1191 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
1192 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
1193 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
1196 /* ptlrpc/llog_server.c */
1197 int llog_origin_handle_create(struct ptlrpc_request *req);
1198 int llog_origin_handle_destroy(struct ptlrpc_request *req);
1199 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
1200 int llog_origin_handle_next_block(struct ptlrpc_request *req);
1201 int llog_origin_handle_read_header(struct ptlrpc_request *req);
1202 int llog_origin_handle_close(struct ptlrpc_request *req);
1203 int llog_origin_handle_cancel(struct ptlrpc_request *req);
1204 int llog_catinfo(struct ptlrpc_request *req);
1206 /* ptlrpc/llog_client.c */
1207 extern struct llog_operations llog_client_ops;