4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 /** \defgroup PtlRPC Portal RPC and networking module.
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
44 * PortalRPC utilizes LNet as its transport layer.
58 #if defined(__linux__)
59 #include <linux/lustre_net.h>
60 #elif defined(__APPLE__)
61 #include <darwin/lustre_net.h>
62 #elif defined(__WINNT__)
63 #include <winnt/lustre_net.h>
65 #error Unsupported operating system.
68 #include <libcfs/libcfs.h>
70 #include <lnet/lnet.h>
71 #include <lustre/lustre_idl.h>
72 #include <lustre_ha.h>
73 #include <lustre_sec.h>
74 #include <lustre_import.h>
75 #include <lprocfs_status.h>
76 #include <lu_object.h>
77 #include <lustre_req_layout.h>
79 #include <obd_support.h>
80 #include <lustre_ver.h>
82 /* MD flags we _always_ use */
83 #define PTLRPC_MD_OPTIONS 0
86 * Define maxima for bulk I/O
87 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
88 * these limits are system wide and not interface-local. */
89 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
90 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
91 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
93 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
95 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
96 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
98 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
99 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
101 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
102 # error "PTLRPC_MAX_BRW_SIZE too big"
104 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
105 # error "PTLRPC_MAX_BRW_PAGES too big"
107 #endif /* __KERNEL__ */
109 #define PTLRPC_NTHRS_MIN 2
112 * The following constants determine how memory is used to buffer incoming
115 * ?_NBUFS # buffers to allocate when growing the pool
116 * ?_BUFSIZE # bytes in a single request buffer
117 * ?_MAXREQSIZE # maximum request service will receive
119 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
120 * of ?_NBUFS is added to the pool.
122 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
123 * considered full when less than ?_MAXREQSIZE is left in them.
125 #define LDLM_THREADS_AUTO_MIN (2)
126 #define LDLM_THREADS_AUTO_MAX min_t(unsigned, cfs_num_online_cpus() * \
127 cfs_num_online_cpus() * 32, 128)
128 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
129 #define LDLM_NBUFS (64 * cfs_num_online_cpus())
130 #define LDLM_BUFSIZE (8 * 1024)
131 #define LDLM_MAXREQSIZE (5 * 1024)
132 #define LDLM_MAXREPSIZE (1024)
134 /** Absolute limits */
135 #ifndef MDT_MAX_THREADS
136 #define MDT_MIN_THREADS PTLRPC_NTHRS_MIN
137 #define MDT_MAX_THREADS 512UL
139 #define MDS_NBUFS (64 * cfs_num_online_cpus())
141 * Assume file name length = FNAME_MAX = 256 (true for ext3).
142 * path name length = PATH_MAX = 4096
143 * LOV MD size max = EA_MAX = 48000 (2000 stripes)
144 * symlink: FNAME_MAX + PATH_MAX <- largest
145 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
146 * rename: FNAME_MAX + FNAME_MAX
147 * open: FNAME_MAX + EA_MAX
149 * MDS_MAXREQSIZE ~= 4736 bytes =
150 * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
151 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
152 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
153 * = 9210 bytes = lustre_msg + mdt_body + 160 * (easize + cookiesize)
155 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
156 * except in the open case where there are a large number of OSTs in a LOV.
158 #define MDS_MAXREPSIZE max(10 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
159 #define MDS_MAXREQSIZE MDS_MAXREPSIZE
161 /** MDS_BUFSIZE = max_reqsize + max sptlrpc payload size */
162 #define MDS_BUFSIZE (MDS_MAXREQSIZE + 1024)
164 /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
165 #define FLD_MAXREQSIZE (160)
167 /** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
168 #define FLD_MAXREPSIZE (152)
171 * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
173 #define SEQ_MAXREQSIZE (160)
175 /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
176 #define SEQ_MAXREPSIZE (152)
178 /** MGS threads must be >= 3, see bug 22458 comment #28 */
179 #define MGS_THREADS_AUTO_MIN 3
180 #define MGS_THREADS_AUTO_MAX 32
181 #define MGS_NBUFS (64 * cfs_num_online_cpus())
182 #define MGS_BUFSIZE (8 * 1024)
183 #define MGS_MAXREQSIZE (7 * 1024)
184 #define MGS_MAXREPSIZE (9 * 1024)
186 /** Absolute OSS limits */
187 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
188 #define OSS_THREADS_MAX 512
189 #define OST_NBUFS (64 * cfs_num_online_cpus())
190 #define OST_BUFSIZE (8 * 1024)
193 * OST_MAXREQSIZE ~= 4768 bytes =
194 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
196 * - single object with 16 pages is 512 bytes
197 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
199 #define OST_MAXREQSIZE (5 * 1024)
200 #define OST_MAXREPSIZE (9 * 1024)
202 /* Macro to hide a typecast. */
203 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
206 * Structure to single define portal connection.
208 struct ptlrpc_connection {
209 /** linkage for connections hash table */
210 cfs_hlist_node_t c_hash;
211 /** Our own lnet nid for this connection */
213 /** Remote side nid for this connection */
214 lnet_process_id_t c_peer;
215 /** UUID of the other side */
216 struct obd_uuid c_remote_uuid;
217 /** reference counter for this connection */
218 cfs_atomic_t c_refcount;
221 /** Client definition for PortalRPC */
222 struct ptlrpc_client {
223 /** What lnet portal does this client send messages to by default */
224 __u32 cli_request_portal;
225 /** What portal do we expect replies on */
226 __u32 cli_reply_portal;
227 /** Name of the client */
231 /** state flags of requests */
232 /* XXX only ones left are those used by the bulk descs as well! */
233 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
234 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
236 #define REQ_MAX_ACK_LOCKS 8
238 union ptlrpc_async_args {
240 * Scratchpad for passing args to completion interpreter. Users
241 * cast to the struct of their choosing, and CLASSERT that this is
242 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
243 * a pointer to it here. The pointer_arg ensures this struct is at
244 * least big enough for that.
246 void *pointer_arg[11];
250 struct ptlrpc_request_set;
251 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
252 typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
255 * Definition of request set structure.
256 * Request set is a list of requests (not necessary to the same target) that
257 * once populated with RPCs could be sent in parallel.
258 * There are two kinds of request sets. General purpose and with dedicated
259 * serving thread. Example of the latter is ptlrpcd set.
260 * For general purpose sets once request set started sending it is impossible
261 * to add new requests to such set.
262 * Provides a way to call "completion callbacks" when all requests in the set
265 struct ptlrpc_request_set {
266 cfs_atomic_t set_refcount;
267 /** number of in queue requests */
268 cfs_atomic_t set_new_count;
269 /** number of uncompleted requests */
270 cfs_atomic_t set_remaining;
271 /** wait queue to wait on for request events */
272 cfs_waitq_t set_waitq;
273 cfs_waitq_t *set_wakeup_ptr;
274 /** List of requests in the set */
275 cfs_list_t set_requests;
277 * List of completion callbacks to be called when the set is completed
278 * This is only used if \a set_interpret is NULL.
279 * Links struct ptlrpc_set_cbdata.
281 cfs_list_t set_cblist;
282 /** Completion callback, if only one. */
283 set_interpreter_func set_interpret;
284 /** opaq argument passed to completion \a set_interpret callback. */
287 * Lock for \a set_new_requests manipulations
288 * locked so that any old caller can communicate requests to
289 * the set holder who can then fold them into the lock-free set
291 cfs_spinlock_t set_new_req_lock;
292 /** List of new yet unsent requests. Only used with ptlrpcd now. */
293 cfs_list_t set_new_requests;
295 /** rq_status of requests that have been freed already */
297 /** Additional fields used by the flow control extension */
298 /** Maximum number of RPCs in flight */
299 int set_max_inflight;
300 /** Callback function used to generate RPCs */
301 set_producer_func set_producer;
302 /** opaq argument passed to the producer callback */
303 void *set_producer_arg;
307 * Description of a single ptrlrpc_set callback
309 struct ptlrpc_set_cbdata {
310 /** List linkage item */
312 /** Pointer to interpreting function */
313 set_interpreter_func psc_interpret;
314 /** Opaq argument to pass to the callback */
318 struct ptlrpc_bulk_desc;
319 struct ptlrpc_service_part;
322 * ptlrpc callback & work item stuff
324 struct ptlrpc_cb_id {
325 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
326 void *cbid_arg; /* additional arg */
329 /** Maximum number of locks to fit into reply state */
330 #define RS_MAX_LOCKS 8
334 * Structure to define reply state on the server
335 * Reply state holds various reply message information. Also for "difficult"
336 * replies (rep-ack case) we store the state after sending reply and wait
337 * for the client to acknowledge the reception. In these cases locks could be
338 * added to the state for replay/failover consistency guarantees.
340 struct ptlrpc_reply_state {
341 /** Callback description */
342 struct ptlrpc_cb_id rs_cb_id;
343 /** Linkage for list of all reply states in a system */
345 /** Linkage for list of all reply states on same export */
346 cfs_list_t rs_exp_list;
347 /** Linkage for list of all reply states for same obd */
348 cfs_list_t rs_obd_list;
350 cfs_list_t rs_debug_list;
352 /** A spinlock to protect the reply state flags */
353 cfs_spinlock_t rs_lock;
354 /** Reply state flags */
355 unsigned long rs_difficult:1; /* ACK/commit stuff */
356 unsigned long rs_no_ack:1; /* no ACK, even for
357 difficult requests */
358 unsigned long rs_scheduled:1; /* being handled? */
359 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
360 unsigned long rs_handled:1; /* been handled yet? */
361 unsigned long rs_on_net:1; /* reply_out_callback pending? */
362 unsigned long rs_prealloc:1; /* rs from prealloc list */
363 unsigned long rs_committed:1;/* the transaction was committed
364 and the rs was dispatched
365 by ptlrpc_commit_replies */
366 /** Size of the state */
370 /** Transaction number */
374 struct obd_export *rs_export;
375 struct ptlrpc_service_part *rs_svcpt;
376 /** Lnet metadata handle for the reply */
377 lnet_handle_md_t rs_md_h;
378 cfs_atomic_t rs_refcount;
380 /** Context for the sevice thread */
381 struct ptlrpc_svc_ctx *rs_svc_ctx;
382 /** Reply buffer (actually sent to the client), encoded if needed */
383 struct lustre_msg *rs_repbuf; /* wrapper */
384 /** Size of the reply buffer */
385 int rs_repbuf_len; /* wrapper buf length */
386 /** Size of the reply message */
387 int rs_repdata_len; /* wrapper msg length */
389 * Actual reply message. Its content is encrupted (if needed) to
390 * produce reply buffer for actual sending. In simple case
391 * of no network encryption we jus set \a rs_repbuf to \a rs_msg
393 struct lustre_msg *rs_msg; /* reply message */
395 /** Number of locks awaiting client ACK */
397 /** Handles of locks awaiting client reply ACK */
398 struct lustre_handle rs_locks[RS_MAX_LOCKS];
399 /** Lock modes of locks in \a rs_locks */
400 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
403 struct ptlrpc_thread;
407 RQ_PHASE_NEW = 0xebc0de00,
408 RQ_PHASE_RPC = 0xebc0de01,
409 RQ_PHASE_BULK = 0xebc0de02,
410 RQ_PHASE_INTERPRET = 0xebc0de03,
411 RQ_PHASE_COMPLETE = 0xebc0de04,
412 RQ_PHASE_UNREGISTERING = 0xebc0de05,
413 RQ_PHASE_UNDEFINED = 0xebc0de06
416 /** Type of request interpreter call-back */
417 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
418 struct ptlrpc_request *req,
422 * Definition of request pool structure.
423 * The pool is used to store empty preallocated requests for the case
424 * when we would actually need to send something without performing
425 * any allocations (to avoid e.g. OOM).
427 struct ptlrpc_request_pool {
428 /** Locks the list */
429 cfs_spinlock_t prp_lock;
430 /** list of ptlrpc_request structs */
431 cfs_list_t prp_req_list;
432 /** Maximum message size that would fit into a rquest from this pool */
434 /** Function to allocate more requests for this pool */
435 void (*prp_populate)(struct ptlrpc_request_pool *, int);
444 * Basic request prioritization operations structure.
445 * The whole idea is centered around locks and RPCs that might affect locks.
446 * When a lock is contended we try to give priority to RPCs that might lead
447 * to fastest release of that lock.
448 * Currently only implemented for OSTs only in a way that makes all
449 * IO and truncate RPCs that are coming from a locked region where a lock is
450 * contended a priority over other requests.
452 struct ptlrpc_hpreq_ops {
454 * Check if the lock handle of the given lock is the same as
455 * taken from the request.
457 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
459 * Check if the request is a high priority one.
461 int (*hpreq_check)(struct ptlrpc_request *);
463 * Called after the request has been handled.
465 void (*hpreq_fini)(struct ptlrpc_request *);
469 * Represents remote procedure call.
471 * This is a staple structure used by everybody wanting to send a request
474 struct ptlrpc_request {
475 /* Request type: one of PTL_RPC_MSG_* */
478 * Linkage item through which this request is included into
479 * sending/delayed lists on client and into rqbd list on server
483 * Server side list of incoming unserved requests sorted by arrival
484 * time. Traversed from time to time to notice about to expire
485 * requests and sent back "early replies" to clients to let them
486 * know server is alive and well, just very busy to service their
489 cfs_list_t rq_timed_list;
490 /** server-side history, used for debuging purposes. */
491 cfs_list_t rq_history_list;
492 /** server-side per-export list */
493 cfs_list_t rq_exp_list;
494 /** server-side hp handlers */
495 struct ptlrpc_hpreq_ops *rq_ops;
496 /** history sequence # */
497 __u64 rq_history_seq;
498 /** the index of service's srv_at_array into which request is linked */
500 /** Result of request processing */
502 /** Lock to protect request flags and some other important bits, like
505 cfs_spinlock_t rq_lock;
506 /** client-side flags are serialized by rq_lock */
507 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
508 rq_timedout:1, rq_resend:1, rq_restart:1,
510 * when ->rq_replay is set, request is kept by the client even
511 * after server commits corresponding transaction. This is
512 * used for operations that require sequence of multiple
513 * requests to be replayed. The only example currently is file
514 * open/close. When last request in such a sequence is
515 * committed, ->rq_replay is cleared on all requests in the
519 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
520 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
521 rq_early:1, rq_must_unlink:1,
522 rq_fake:1, /* this fake req */
523 rq_memalloc:1, /* req originated from "kswapd" */
524 /* server-side flags */
525 rq_packed_final:1, /* packed final reply */
526 rq_hp:1, /* high priority RPC */
527 rq_at_linked:1, /* link into service's srv_at_array */
530 /* whether the "rq_set" is a valid one */
534 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
535 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
536 cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
537 server-side refcounf for multiple replies */
539 /** initial thread servicing this request */
540 struct ptlrpc_thread *rq_svc_thread;
542 /** Portal to which this request would be sent */
543 int rq_request_portal; /* XXX FIXME bug 249 */
544 /** Portal where to wait for reply and where reply would be sent */
545 int rq_reply_portal; /* XXX FIXME bug 249 */
549 * !rq_truncate : # reply bytes actually received,
550 * rq_truncate : required repbuf_len for resend
553 /** Request length */
555 /** Request message - what client sent */
556 struct lustre_msg *rq_reqmsg;
560 /** Reply message - server response */
561 struct lustre_msg *rq_repmsg;
562 /** Transaction number */
567 * List item to for replay list. Not yet commited requests get linked
569 * Also see \a rq_replay comment above.
571 cfs_list_t rq_replay_list;
574 * security and encryption data
576 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
577 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
578 cfs_list_t rq_ctx_chain; /**< link to waited ctx */
580 struct sptlrpc_flavor rq_flvr; /**< for client & server */
581 enum lustre_sec_part rq_sp_from;
583 unsigned long /* client/server security flags */
584 rq_ctx_init:1, /* context initiation */
585 rq_ctx_fini:1, /* context destroy */
586 rq_bulk_read:1, /* request bulk read */
587 rq_bulk_write:1, /* request bulk write */
588 /* server authentication flags */
589 rq_auth_gss:1, /* authenticated by gss */
590 rq_auth_remote:1, /* authed as remote user */
591 rq_auth_usr_root:1, /* authed as root */
592 rq_auth_usr_mdt:1, /* authed as mdt */
593 rq_auth_usr_ost:1, /* authed as ost */
594 /* security tfm flags */
597 /* doesn't expect reply FIXME */
599 rq_pill_init:1; /* pill initialized */
601 uid_t rq_auth_uid; /* authed uid */
602 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
604 /* (server side), pointed directly into req buffer */
605 struct ptlrpc_user_desc *rq_user_desc;
607 /** early replies go to offset 0, regular replies go after that */
608 unsigned int rq_reply_off;
610 /* various buffer pointers */
611 struct lustre_msg *rq_reqbuf; /* req wrapper */
612 int rq_reqbuf_len; /* req wrapper buf len */
613 int rq_reqdata_len; /* req wrapper msg len */
614 char *rq_repbuf; /* rep buffer */
615 int rq_repbuf_len; /* rep buffer len */
616 struct lustre_msg *rq_repdata; /* rep wrapper msg */
617 int rq_repdata_len; /* rep wrapper msg len */
618 struct lustre_msg *rq_clrbuf; /* only in priv mode */
619 int rq_clrbuf_len; /* only in priv mode */
620 int rq_clrdata_len; /* only in priv mode */
624 /** Fields that help to see if request and reply were swabbed or not */
625 __u32 rq_req_swab_mask;
626 __u32 rq_rep_swab_mask;
628 /** What was import generation when this request was sent */
629 int rq_import_generation;
630 enum lustre_imp_state rq_send_state;
632 /** how many early replies (for stats) */
635 /** client+server request */
636 lnet_handle_md_t rq_req_md_h;
637 struct ptlrpc_cb_id rq_req_cbid;
638 /** optional time limit for send attempts */
639 cfs_duration_t rq_delay_limit;
640 /** time request was first queued */
641 cfs_time_t rq_queued_time;
644 /** request arrival time */
645 struct timeval rq_arrival_time;
646 /** separated reply state */
647 struct ptlrpc_reply_state *rq_reply_state;
648 /** incoming request buffer */
649 struct ptlrpc_request_buffer_desc *rq_rqbd;
651 __u32 rq_uid; /* peer uid, used in MDS only */
654 /** client-only incoming reply */
655 lnet_handle_md_t rq_reply_md_h;
656 cfs_waitq_t rq_reply_waitq;
657 struct ptlrpc_cb_id rq_reply_cbid;
661 /** Peer description (the other side) */
662 lnet_process_id_t rq_peer;
663 /** Server-side, export on which request was received */
664 struct obd_export *rq_export;
665 /** Client side, import where request is being sent */
666 struct obd_import *rq_import;
668 /** Replay callback, called after request is replayed at recovery */
669 void (*rq_replay_cb)(struct ptlrpc_request *);
671 * Commit callback, called when request is committed and about to be
674 void (*rq_commit_cb)(struct ptlrpc_request *);
675 /** Opaq data for replay and commit callbacks. */
678 /** For bulk requests on client only: bulk descriptor */
679 struct ptlrpc_bulk_desc *rq_bulk;
681 /** client outgoing req */
683 * when request/reply sent (secs), or time when request should be sent
686 /** time for request really sent out */
689 /** when request must finish. volatile
690 * so that servers' early reply updates to the deadline aren't
691 * kept in per-cpu cache */
692 volatile time_t rq_deadline;
693 /** when req reply unlink must finish. */
694 time_t rq_reply_deadline;
695 /** when req bulk unlink must finish. */
696 time_t rq_bulk_deadline;
698 * service time estimate (secs)
699 * If the requestsis not served by this time, it is marked as timed out.
703 /** Multi-rpc bits */
704 /** Link item for request set lists */
705 cfs_list_t rq_set_chain;
706 /** Per-request waitq introduced by bug 21938 for recovery waiting */
707 cfs_waitq_t rq_set_waitq;
708 /** Link back to the request set */
709 struct ptlrpc_request_set *rq_set;
710 /** Async completion handler, called when reply is received */
711 ptlrpc_interpterer_t rq_interpret_reply;
712 /** Async completion context */
713 union ptlrpc_async_args rq_async_args;
715 /** Pool if request is from preallocated list */
716 struct ptlrpc_request_pool *rq_pool;
718 struct lu_context rq_session;
719 struct lu_context rq_recov_session;
721 /** request format description */
722 struct req_capsule rq_pill;
726 * Call completion handler for rpc if any, return it's status or original
727 * rc if there was no handler defined for this request.
729 static inline int ptlrpc_req_interpret(const struct lu_env *env,
730 struct ptlrpc_request *req, int rc)
732 if (req->rq_interpret_reply != NULL) {
733 req->rq_status = req->rq_interpret_reply(env, req,
736 return req->rq_status;
742 * Returns 1 if request buffer at offset \a index was already swabbed
744 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
746 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
747 return req->rq_req_swab_mask & (1 << index);
751 * Returns 1 if request reply buffer at offset \a index was already swabbed
753 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
755 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
756 return req->rq_rep_swab_mask & (1 << index);
760 * Returns 1 if request needs to be swabbed into local cpu byteorder
762 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
764 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
768 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
770 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
772 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
776 * Mark request buffer at offset \a index that it was already swabbed
778 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
780 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
781 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
782 req->rq_req_swab_mask |= 1 << index;
786 * Mark request reply buffer at offset \a index that it was already swabbed
788 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
790 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
791 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
792 req->rq_rep_swab_mask |= 1 << index;
796 * Convert numerical request phase value \a phase into text string description
798 static inline const char *
799 ptlrpc_phase2str(enum rq_phase phase)
808 case RQ_PHASE_INTERPRET:
810 case RQ_PHASE_COMPLETE:
812 case RQ_PHASE_UNREGISTERING:
813 return "Unregistering";
820 * Convert numerical request phase of the request \a req into text stringi
823 static inline const char *
824 ptlrpc_rqphase2str(struct ptlrpc_request *req)
826 return ptlrpc_phase2str(req->rq_phase);
830 * Debugging functions and helpers to print request structure into debug log
833 /* Spare the preprocessor, spoil the bugs. */
834 #define FLAG(field, str) (field ? str : "")
836 /** Convert bit flags into a string */
837 #define DEBUG_REQ_FLAGS(req) \
838 ptlrpc_rqphase2str(req), \
839 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
840 FLAG(req->rq_err, "E"), \
841 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
842 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
843 FLAG(req->rq_no_resend, "N"), \
844 FLAG(req->rq_waiting, "W"), \
845 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
846 FLAG(req->rq_committed, "M")
848 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
850 void _debug_req(struct ptlrpc_request *req,
851 struct libcfs_debug_msg_data *data, const char *fmt, ...)
852 __attribute__ ((format (printf, 3, 4)));
855 * Helper that decides if we need to print request accordig to current debug
858 #define debug_req(msgdata, mask, cdls, req, fmt, a...) \
860 CFS_CHECK_STACK(msgdata, mask, cdls); \
862 if (((mask) & D_CANTMASK) != 0 || \
863 ((libcfs_debug & (mask)) != 0 && \
864 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
865 _debug_req((req), msgdata, fmt, ##a); \
869 * This is the debug print function you need to use to print request sturucture
870 * content into lustre debug log.
871 * for most callers (level is a constant) this is resolved at compile time */
872 #define DEBUG_REQ(level, req, fmt, args...) \
874 if ((level) & (D_ERROR | D_WARNING)) { \
875 static cfs_debug_limit_state_t cdls; \
876 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
877 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
879 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
880 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
886 * Structure that defines a single page of a bulk transfer
888 struct ptlrpc_bulk_page {
889 /** Linkage to list of pages in a bulk */
892 * Number of bytes in a page to transfer starting from \a bp_pageoffset
895 /** offset within a page */
897 /** The page itself */
898 struct page *bp_page;
901 #define BULK_GET_SOURCE 0
902 #define BULK_PUT_SINK 1
903 #define BULK_GET_SINK 2
904 #define BULK_PUT_SOURCE 3
907 * Definition of buk descriptor.
908 * Bulks are special "Two phase" RPCs where initial request message
909 * is sent first and it is followed bt a transfer (o receiving) of a large
910 * amount of data to be settled into pages referenced from the bulk descriptors.
911 * Bulks transfers (the actual data following the small requests) are done
912 * on separate LNet portals.
913 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
914 * Another user is readpage for MDT.
916 struct ptlrpc_bulk_desc {
917 /** completed successfully */
918 unsigned long bd_success:1;
919 /** accessible to the network (network io potentially in progress) */
920 unsigned long bd_network_rw:1;
921 /** {put,get}{source,sink} */
922 unsigned long bd_type:2;
924 unsigned long bd_registered:1;
925 /** For serialization with callback */
926 cfs_spinlock_t bd_lock;
927 /** Import generation when request for this bulk was sent */
928 int bd_import_generation;
929 /** Server side - export this bulk created for */
930 struct obd_export *bd_export;
931 /** Client side - import this bulk was sent on */
932 struct obd_import *bd_import;
933 /** LNet portal for this bulk */
935 /** Back pointer to the request */
936 struct ptlrpc_request *bd_req;
937 cfs_waitq_t bd_waitq; /* server side only WQ */
938 int bd_iov_count; /* # entries in bd_iov */
939 int bd_max_iov; /* allocated size of bd_iov */
940 int bd_nob; /* # bytes covered */
941 int bd_nob_transferred; /* # bytes GOT/PUT */
945 struct ptlrpc_cb_id bd_cbid; /* network callback info */
946 lnet_handle_md_t bd_md_h; /* associated MD */
947 lnet_nid_t bd_sender; /* stash event::sender */
949 #if defined(__KERNEL__)
951 * encrypt iov, size is either 0 or bd_iov_count.
953 lnet_kiov_t *bd_enc_iov;
955 lnet_kiov_t bd_iov[0];
957 lnet_md_iovec_t bd_iov[0];
962 SVC_STOPPED = 1 << 0,
963 SVC_STOPPING = 1 << 1,
964 SVC_STARTING = 1 << 2,
965 SVC_RUNNING = 1 << 3,
971 * Definition of server service thread structure
973 struct ptlrpc_thread {
975 * List of active threads in svc->srv_threads
979 * thread-private data (preallocated memory)
984 * service thread index, from ptlrpc_start_threads
992 * put watchdog in the structure per thread b=14840
994 struct lc_watchdog *t_watchdog;
996 * the svc this thread belonged to b=18582
998 struct ptlrpc_service_part *t_svcpt;
999 cfs_waitq_t t_ctl_waitq;
1000 struct lu_env *t_env;
1003 static inline int thread_is_init(struct ptlrpc_thread *thread)
1005 return thread->t_flags == 0;
1008 static inline int thread_is_stopped(struct ptlrpc_thread *thread)
1010 return !!(thread->t_flags & SVC_STOPPED);
1013 static inline int thread_is_stopping(struct ptlrpc_thread *thread)
1015 return !!(thread->t_flags & SVC_STOPPING);
1018 static inline int thread_is_starting(struct ptlrpc_thread *thread)
1020 return !!(thread->t_flags & SVC_STARTING);
1023 static inline int thread_is_running(struct ptlrpc_thread *thread)
1025 return !!(thread->t_flags & SVC_RUNNING);
1028 static inline int thread_is_event(struct ptlrpc_thread *thread)
1030 return !!(thread->t_flags & SVC_EVENT);
1033 static inline int thread_is_signal(struct ptlrpc_thread *thread)
1035 return !!(thread->t_flags & SVC_SIGNAL);
1038 static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
1040 thread->t_flags &= ~flags;
1043 static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
1045 thread->t_flags = flags;
1048 static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
1050 thread->t_flags |= flags;
1053 static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
1056 if (thread->t_flags & flags) {
1057 thread->t_flags &= ~flags;
1064 * Request buffer descriptor structure.
1065 * This is a structure that contains one posted request buffer for service.
1066 * Once data land into a buffer, event callback creates actual request and
1067 * notifies wakes one of the service threads to process new incoming request.
1068 * More than one request can fit into the buffer.
1070 struct ptlrpc_request_buffer_desc {
1071 /** Link item for rqbds on a service */
1072 cfs_list_t rqbd_list;
1073 /** History of requests for this buffer */
1074 cfs_list_t rqbd_reqs;
1075 /** Back pointer to service for which this buffer is registered */
1076 struct ptlrpc_service_part *rqbd_svcpt;
1077 /** LNet descriptor */
1078 lnet_handle_md_t rqbd_md_h;
1080 /** The buffer itself */
1082 struct ptlrpc_cb_id rqbd_cbid;
1084 * This "embedded" request structure is only used for the
1085 * last request to fit into the buffer
1087 struct ptlrpc_request rqbd_req;
1090 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
1092 struct ptlrpc_service_ops {
1094 * if non-NULL called during thread creation (ptlrpc_start_thread())
1095 * to initialize service specific per-thread state.
1097 int (*so_thr_init)(struct ptlrpc_thread *thr);
1099 * if non-NULL called during thread shutdown (ptlrpc_main()) to
1100 * destruct state created by ->srv_init().
1102 void (*so_thr_done)(struct ptlrpc_thread *thr);
1104 * Handler function for incoming requests for this service
1106 int (*so_req_handler)(struct ptlrpc_request *req);
1108 * function to determine priority of the request, it's called
1109 * on every new request
1111 int (*so_hpreq_handler)(struct ptlrpc_request *);
1113 * service-specific print fn
1115 void (*so_req_printer)(void *, struct ptlrpc_request *);
1118 #ifndef __cfs_cacheline_aligned
1119 /* NB: put it here for reducing patche dependence */
1120 # define __cfs_cacheline_aligned
1124 * How many high priority requests to serve before serving one normal
1127 #define PTLRPC_SVC_HP_RATIO 10
1130 * Definition of PortalRPC service.
1131 * The service is listening on a particular portal (like tcp port)
1132 * and perform actions for a specific server like IO service for OST
1133 * or general metadata service for MDS.
1135 struct ptlrpc_service {
1136 /** serialize /proc operations */
1137 cfs_spinlock_t srv_lock;
1138 /** most often accessed fields */
1139 /** chain thru all services */
1140 cfs_list_t srv_list;
1141 /** service operations table */
1142 struct ptlrpc_service_ops srv_ops;
1143 /** only statically allocated strings here; we don't clean them */
1145 /** only statically allocated strings here; we don't clean them */
1146 char *srv_thread_name;
1147 /** service thread list */
1148 cfs_list_t srv_threads;
1149 /** threads to start at beginning of service */
1150 int srv_threads_min;
1151 /** thread upper limit */
1152 int srv_threads_max;
1153 /** Root of /proc dir tree for this service */
1154 cfs_proc_dir_entry_t *srv_procroot;
1155 /** Pointer to statistic data for this service */
1156 struct lprocfs_stats *srv_stats;
1157 /** # hp per lp reqs to handle */
1158 int srv_hpreq_ratio;
1159 /** biggest request to receive */
1160 int srv_max_req_size;
1161 /** biggest reply to send */
1162 int srv_max_reply_size;
1163 /** size of individual buffers */
1165 /** # buffers to allocate in 1 group */
1166 int srv_nbuf_per_group;
1167 /** Local portal on which to receive requests */
1168 __u32 srv_req_portal;
1169 /** Portal on the client to send replies to */
1170 __u32 srv_rep_portal;
1172 * Tags for lu_context associated with this thread, see struct
1176 /** soft watchdog timeout multiplier */
1177 int srv_watchdog_factor;
1178 /** bind threads to CPUs */
1179 unsigned srv_cpu_affinity:1;
1180 /** under unregister_service */
1181 unsigned srv_is_stopping:1;
1184 * max # request buffers in history, it needs to be convert into
1185 * per-partition value when we have multiple partitions
1187 int srv_max_history_rqbds;
1189 * partition data for ptlrpc service, only one instance so far,
1190 * instance per CPT will come soon
1192 struct ptlrpc_service_part *srv_part;
1196 * Definition of PortalRPC service partition data.
1197 * Although a service only has one instance of it right now, but we
1198 * will have multiple instances very soon (instance per CPT).
1200 * it has four locks:
1202 * serialize operations on rqbd and requests waiting for preprocess
1204 * serialize operations active requests sent to this portal
1206 * serialize adaptive timeout stuff
1208 * serialize operations on RS list (reply states)
1210 * We don't have any use-case to take two or more locks at the same time
1211 * for now, so there is no lock order issue.
1213 struct ptlrpc_service_part {
1214 /** back reference to owner */
1215 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
1216 /* CPT id, reserved */
1218 /** always increasing number */
1220 /** # of starting threads */
1221 int scp_nthrs_starting;
1222 /** # of stopping threads, reserved for shrinking threads */
1223 int scp_nthrs_stopping;
1224 /** # running threads */
1225 int scp_nthrs_running;
1226 /** service threads list */
1227 cfs_list_t scp_threads;
1230 * serialize the following fields, used for protecting
1231 * rqbd list and incoming requests waiting for preprocess,
1232 * threads starting & stopping are also protected by this lock.
1234 cfs_spinlock_t scp_lock __cfs_cacheline_aligned;
1235 /** total # req buffer descs allocated */
1236 int scp_nrqbds_total;
1237 /** # posted request buffers for receiving */
1238 int scp_nrqbds_posted;
1239 /** # incoming reqs */
1240 int scp_nreqs_incoming;
1241 /** request buffers to be reposted */
1242 cfs_list_t scp_rqbd_idle;
1243 /** req buffers receiving */
1244 cfs_list_t scp_rqbd_posted;
1245 /** incoming reqs */
1246 cfs_list_t scp_req_incoming;
1247 /** timeout before re-posting reqs, in tick */
1248 cfs_duration_t scp_rqbd_timeout;
1250 * all threads sleep on this. This wait-queue is signalled when new
1251 * incoming request arrives and when difficult reply has to be handled.
1253 cfs_waitq_t scp_waitq;
1255 /** request history */
1256 cfs_list_t scp_hist_reqs;
1257 /** request buffer history */
1258 cfs_list_t scp_hist_rqbds;
1259 /** # request buffers in history */
1260 int scp_hist_nrqbds;
1261 /** sequence number for request */
1263 /** highest seq culled from history */
1264 __u64 scp_hist_seq_culled;
1267 * serialize the following fields, used for processing requests
1268 * sent to this portal
1270 cfs_spinlock_t scp_req_lock __cfs_cacheline_aligned;
1271 /** # reqs in either of the queues below */
1272 /** reqs waiting for service */
1273 cfs_list_t scp_req_pending;
1274 /** high priority queue */
1275 cfs_list_t scp_hreq_pending;
1276 /** # reqs being served */
1277 int scp_nreqs_active;
1278 /** # HPreqs being served */
1279 int scp_nhreqs_active;
1280 /** # hp requests handled */
1286 * serialize the following fields, used for changes on
1289 cfs_spinlock_t scp_at_lock __cfs_cacheline_aligned;
1290 /** estimated rpc service time */
1291 struct adaptive_timeout scp_at_estimate;
1292 /** reqs waiting for replies */
1293 struct ptlrpc_at_array scp_at_array;
1294 /** early reply timer */
1295 cfs_timer_t scp_at_timer;
1297 cfs_time_t scp_at_checktime;
1298 /** check early replies */
1299 unsigned scp_at_check;
1303 * serialize the following fields, used for processing
1304 * replies for this portal
1306 cfs_spinlock_t scp_rep_lock __cfs_cacheline_aligned;
1307 /** all the active replies */
1308 cfs_list_t scp_rep_active;
1310 /** replies waiting for service */
1311 cfs_list_t scp_rep_queue;
1313 /** List of free reply_states */
1314 cfs_list_t scp_rep_idle;
1315 /** waitq to run, when adding stuff to srv_free_rs_list */
1316 cfs_waitq_t scp_rep_waitq;
1317 /** # 'difficult' replies */
1318 cfs_atomic_t scp_nreps_difficult;
1322 * Declaration of ptlrpcd control structure
1324 struct ptlrpcd_ctl {
1326 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
1328 unsigned long pc_flags;
1330 * Thread lock protecting structure fields.
1332 cfs_spinlock_t pc_lock;
1336 cfs_completion_t pc_starting;
1340 cfs_completion_t pc_finishing;
1342 * Thread requests set.
1344 struct ptlrpc_request_set *pc_set;
1346 * Thread name used in cfs_daemonize()
1350 * Environment for request interpreters to run in.
1352 struct lu_env pc_env;
1354 * Index of ptlrpcd thread in the array.
1358 * Number of the ptlrpcd's partners.
1362 * Pointer to the array of partners' ptlrpcd_ctl structure.
1364 struct ptlrpcd_ctl **pc_partners;
1366 * Record the partner index to be processed next.
1371 * Async rpcs flag to make sure that ptlrpcd_check() is called only
1376 * Currently not used.
1380 * User-space async rpcs callback.
1382 void *pc_wait_callback;
1384 * User-space check idle rpcs callback.
1386 void *pc_idle_callback;
1390 /* Bits for pc_flags */
1391 enum ptlrpcd_ctl_flags {
1393 * Ptlrpc thread start flag.
1395 LIOD_START = 1 << 0,
1397 * Ptlrpc thread stop flag.
1401 * Ptlrpc thread force flag (only stop force so far).
1402 * This will cause aborting any inflight rpcs handled
1403 * by thread if LIOD_STOP is specified.
1405 LIOD_FORCE = 1 << 2,
1407 * This is a recovery ptlrpc thread.
1409 LIOD_RECOVERY = 1 << 3,
1411 * The ptlrpcd is bound to some CPU core.
1416 /* ptlrpc/events.c */
1417 extern lnet_handle_eq_t ptlrpc_eq_h;
1418 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
1419 lnet_process_id_t *peer, lnet_nid_t *self);
1421 * These callbacks are invoked by LNet when something happened to
1425 extern void request_out_callback(lnet_event_t *ev);
1426 extern void reply_in_callback(lnet_event_t *ev);
1427 extern void client_bulk_callback(lnet_event_t *ev);
1428 extern void request_in_callback(lnet_event_t *ev);
1429 extern void reply_out_callback(lnet_event_t *ev);
1430 #ifdef HAVE_SERVER_SUPPORT
1431 extern void server_bulk_callback(lnet_event_t *ev);
1435 /* ptlrpc/connection.c */
1436 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
1438 struct obd_uuid *uuid);
1439 int ptlrpc_connection_put(struct ptlrpc_connection *c);
1440 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
1441 int ptlrpc_connection_init(void);
1442 void ptlrpc_connection_fini(void);
1443 extern lnet_pid_t ptl_get_pid(void);
1445 /* ptlrpc/niobuf.c */
1447 * Actual interfacing with LNet to put/get/register/unregister stuff
1450 #ifdef HAVE_SERVER_SUPPORT
1451 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
1452 int npages, int type, int portal);
1453 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
1454 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
1456 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
1460 LASSERT(desc != NULL);
1462 cfs_spin_lock(&desc->bd_lock);
1463 rc = desc->bd_network_rw;
1464 cfs_spin_unlock(&desc->bd_lock);
1469 int ptlrpc_register_bulk(struct ptlrpc_request *req);
1470 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
1472 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
1474 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
1477 LASSERT(req != NULL);
1479 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
1480 req->rq_bulk_deadline > cfs_time_current_sec())
1486 cfs_spin_lock(&desc->bd_lock);
1487 rc = desc->bd_network_rw;
1488 cfs_spin_unlock(&desc->bd_lock);
1492 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
1493 #define PTLRPC_REPLY_EARLY 0x02
1494 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
1495 int ptlrpc_reply(struct ptlrpc_request *req);
1496 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
1497 int ptlrpc_error(struct ptlrpc_request *req);
1498 void ptlrpc_resend_req(struct ptlrpc_request *request);
1499 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
1500 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
1501 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
1504 /* ptlrpc/client.c */
1506 * Client-side portals API. Everything to send requests, receive replies,
1507 * request queues, request management, etc.
1510 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
1511 struct ptlrpc_client *);
1512 void ptlrpc_cleanup_client(struct obd_import *imp);
1513 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
1515 int ptlrpc_queue_wait(struct ptlrpc_request *req);
1516 int ptlrpc_replay_req(struct ptlrpc_request *req);
1517 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
1518 void ptlrpc_restart_req(struct ptlrpc_request *req);
1519 void ptlrpc_abort_inflight(struct obd_import *imp);
1520 void ptlrpc_cleanup_imp(struct obd_import *imp);
1521 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
1523 struct ptlrpc_request_set *ptlrpc_prep_set(void);
1524 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1526 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
1527 set_interpreter_func fn, void *data);
1528 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
1529 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
1530 int ptlrpc_set_wait(struct ptlrpc_request_set *);
1531 int ptlrpc_expired_set(void *data);
1532 void ptlrpc_interrupted_set(void *data);
1533 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
1534 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
1535 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
1536 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1537 struct ptlrpc_request *req);
1539 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
1540 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
1542 struct ptlrpc_request_pool *
1543 ptlrpc_init_rq_pool(int, int,
1544 void (*populate_pool)(struct ptlrpc_request_pool *, int));
1546 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
1547 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1548 const struct req_format *format);
1549 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
1550 struct ptlrpc_request_pool *,
1551 const struct req_format *format);
1552 void ptlrpc_request_free(struct ptlrpc_request *request);
1553 int ptlrpc_request_pack(struct ptlrpc_request *request,
1554 __u32 version, int opcode);
1555 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1556 const struct req_format *format,
1557 __u32 version, int opcode);
1558 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
1559 __u32 version, int opcode, char **bufs,
1560 struct ptlrpc_cli_ctx *ctx);
1561 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
1562 unsigned int timeout,
1563 ptlrpc_interpterer_t interpreter);
1564 void ptlrpc_fakereq_finished(struct ptlrpc_request *req);
1566 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
1567 int opcode, int count, __u32 *lengths,
1569 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
1570 __u32 version, int opcode,
1571 int count, __u32 *lengths, char **bufs,
1572 struct ptlrpc_request_pool *pool);
1573 void ptlrpc_req_finished(struct ptlrpc_request *request);
1574 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
1575 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
1576 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
1577 int npages, int type, int portal);
1578 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
1579 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
1580 cfs_page_t *page, int pageoffset, int len);
1581 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1582 struct obd_import *imp);
1583 __u64 ptlrpc_next_xid(void);
1584 __u64 ptlrpc_sample_next_xid(void);
1585 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
1587 /* Set of routines to run a function in ptlrpcd context */
1588 void *ptlrpcd_alloc_work(struct obd_import *imp,
1589 int (*cb)(const struct lu_env *, void *), void *data);
1590 void ptlrpcd_destroy_work(void *handler);
1591 int ptlrpcd_queue_work(void *handler);
1594 struct ptlrpc_service_buf_conf {
1595 /* nbufs is how many buffers to post */
1596 unsigned int bc_nbufs;
1597 /* buffer size to post */
1598 unsigned int bc_buf_size;
1599 /* portal to listed for requests on */
1600 unsigned int bc_req_portal;
1601 /* portal of where to send replies to */
1602 unsigned int bc_rep_portal;
1603 /* maximum request size to be accepted for this service */
1604 unsigned int bc_req_max_size;
1605 /* maximum reply size this service can ever send */
1606 unsigned int bc_rep_max_size;
1609 struct ptlrpc_service_thr_conf {
1610 /* threadname should be 8 characters or less - 6 will be added on */
1612 /* min number of service threads to start */
1613 unsigned int tc_nthrs_min;
1614 /* max number of service threads to start */
1615 unsigned int tc_nthrs_max;
1616 /* user specified threads number, it will be validated due to
1617 * other members of this structure. */
1618 unsigned int tc_nthrs_user;
1619 /* set NUMA node affinity for service threads */
1620 unsigned int tc_cpu_affinity;
1621 /* Tags for lu_context associated with service thread */
1625 struct ptlrpc_service_conf {
1628 /* soft watchdog timeout multiplifier to print stuck service traces */
1629 unsigned int psc_watchdog_factor;
1630 /* buffer information */
1631 struct ptlrpc_service_buf_conf psc_buf;
1632 /* thread information */
1633 struct ptlrpc_service_thr_conf psc_thr;
1634 /* function table */
1635 struct ptlrpc_service_ops psc_ops;
1638 /* ptlrpc/service.c */
1640 * Server-side services API. Register/unregister service, request state
1641 * management, service thread management
1645 void ptlrpc_save_lock(struct ptlrpc_request *req,
1646 struct lustre_handle *lock, int mode, int no_ack);
1647 void ptlrpc_commit_replies(struct obd_export *exp);
1648 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
1649 void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
1650 struct ptlrpc_service *ptlrpc_register_service(
1651 struct ptlrpc_service_conf *conf,
1652 struct proc_dir_entry *proc_entry);
1653 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
1655 int ptlrpc_start_threads(struct ptlrpc_service *svc);
1656 int ptlrpc_unregister_service(struct ptlrpc_service *service);
1657 int liblustre_check_services(void *arg);
1658 void ptlrpc_daemonize(char *name);
1659 int ptlrpc_service_health_check(struct ptlrpc_service *);
1660 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
1661 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
1664 int ptlrpc_hr_init(void);
1665 void ptlrpc_hr_fini(void);
1667 # define ptlrpc_hr_init() (0)
1668 # define ptlrpc_hr_fini() do {} while(0)
1671 struct ptlrpc_svc_data {
1673 struct ptlrpc_service *svc;
1674 struct ptlrpc_thread *thread;
1678 /* ptlrpc/import.c */
1683 int ptlrpc_connect_import(struct obd_import *imp);
1684 int ptlrpc_init_import(struct obd_import *imp);
1685 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
1686 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
1687 void deuuidify(char *uuid, const char *prefix, char **uuid_start,
1690 /* ptlrpc/pack_generic.c */
1691 int ptlrpc_reconnect_import(struct obd_import *imp);
1695 * ptlrpc msg buffer and swab interface
1699 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
1701 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
1703 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
1704 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
1706 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
1707 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
1709 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
1710 __u32 *lens, char **bufs);
1711 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
1713 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
1714 __u32 *lens, char **bufs, int flags);
1715 #define LPRFL_EARLY_REPLY 1
1716 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
1717 char **bufs, int flags);
1718 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
1719 unsigned int newlen, int move_data);
1720 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
1721 int __lustre_unpack_msg(struct lustre_msg *m, int len);
1722 int lustre_msg_hdr_size(__u32 magic, int count);
1723 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
1724 int lustre_msg_size_v2(int count, __u32 *lengths);
1725 int lustre_packed_msg_size(struct lustre_msg *msg);
1726 int lustre_msg_early_size(void);
1727 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
1728 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
1729 int lustre_msg_buflen(struct lustre_msg *m, int n);
1730 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
1731 int lustre_msg_bufcount(struct lustre_msg *m);
1732 char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
1733 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
1734 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
1735 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
1736 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
1737 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
1738 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
1739 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
1740 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
1741 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
1742 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
1743 __u32 lustre_msg_get_type(struct lustre_msg *msg);
1744 __u32 lustre_msg_get_version(struct lustre_msg *msg);
1745 void lustre_msg_add_version(struct lustre_msg *msg, int version);
1746 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
1747 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
1748 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
1749 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
1750 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
1751 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
1752 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
1753 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
1754 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
1755 int lustre_msg_get_status(struct lustre_msg *msg);
1756 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
1757 int lustre_msg_is_v1(struct lustre_msg *msg);
1758 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
1759 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
1760 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
1761 char *lustre_msg_get_jobid(struct lustre_msg *msg);
1762 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
1763 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0)
1764 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
1766 # warning "remove checksum compatibility support for b1_8"
1767 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1769 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
1770 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
1771 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
1772 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1773 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
1774 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
1775 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
1776 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
1777 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1778 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
1779 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
1780 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1781 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1782 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
1783 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1786 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
1787 unsigned int newlen, int move_data)
1789 LASSERT(req->rq_reply_state);
1790 LASSERT(req->rq_repmsg);
1791 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
1796 /** Change request phase of \a req to \a new_phase */
1798 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1800 if (req->rq_phase == new_phase)
1803 if (new_phase == RQ_PHASE_UNREGISTERING) {
1804 req->rq_next_phase = req->rq_phase;
1806 cfs_atomic_inc(&req->rq_import->imp_unregistering);
1809 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1811 cfs_atomic_dec(&req->rq_import->imp_unregistering);
1814 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
1815 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1817 req->rq_phase = new_phase;
1821 * Returns true if request \a req got early reply and hard deadline is not met
1824 ptlrpc_client_early(struct ptlrpc_request *req)
1826 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1827 req->rq_reply_deadline > cfs_time_current_sec())
1829 return req->rq_early;
1833 * Returns true if we got real reply from server for this request
1836 ptlrpc_client_replied(struct ptlrpc_request *req)
1838 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1839 req->rq_reply_deadline > cfs_time_current_sec())
1841 return req->rq_replied;
1844 /** Returns true if request \a req is in process of receiving server reply */
1846 ptlrpc_client_recv(struct ptlrpc_request *req)
1848 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1849 req->rq_reply_deadline > cfs_time_current_sec())
1851 return req->rq_receiving_reply;
1855 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1859 cfs_spin_lock(&req->rq_lock);
1860 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1861 req->rq_reply_deadline > cfs_time_current_sec()) {
1862 cfs_spin_unlock(&req->rq_lock);
1865 rc = req->rq_receiving_reply || req->rq_must_unlink;
1866 cfs_spin_unlock(&req->rq_lock);
1871 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1873 if (req->rq_set == NULL)
1874 cfs_waitq_signal(&req->rq_reply_waitq);
1876 cfs_waitq_signal(&req->rq_set->set_waitq);
1880 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1882 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1883 cfs_atomic_inc(&rs->rs_refcount);
1887 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1889 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1890 if (cfs_atomic_dec_and_test(&rs->rs_refcount))
1891 lustre_free_reply_state(rs);
1894 /* Should only be called once per req */
1895 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1897 if (req->rq_reply_state == NULL)
1898 return; /* shouldn't occur */
1899 ptlrpc_rs_decref(req->rq_reply_state);
1900 req->rq_reply_state = NULL;
1901 req->rq_repmsg = NULL;
1904 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1906 return lustre_msg_get_magic(req->rq_reqmsg);
1909 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1911 switch (req->rq_reqmsg->lm_magic) {
1912 case LUSTRE_MSG_MAGIC_V2:
1913 return req->rq_reqmsg->lm_repsize;
1915 LASSERTF(0, "incorrect message magic: %08x\n",
1916 req->rq_reqmsg->lm_magic);
1921 static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
1923 if (req->rq_delay_limit != 0 &&
1924 cfs_time_before(cfs_time_add(req->rq_queued_time,
1925 cfs_time_seconds(req->rq_delay_limit)),
1926 cfs_time_current())) {
1932 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
1934 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
1935 cfs_spin_lock(&req->rq_lock);
1936 req->rq_no_resend = 1;
1937 cfs_spin_unlock(&req->rq_lock);
1939 return req->rq_no_resend;
1943 ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
1945 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
1947 return svcpt->scp_service->srv_watchdog_factor *
1948 max_t(int, at, obd_timeout);
1951 static inline struct ptlrpc_service *
1952 ptlrpc_req2svc(struct ptlrpc_request *req)
1954 LASSERT(req->rq_rqbd != NULL);
1955 return req->rq_rqbd->rqbd_svcpt->scp_service;
1958 /* ldlm/ldlm_lib.c */
1960 * Target client logic
1963 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
1964 int client_obd_cleanup(struct obd_device *obddev);
1965 int client_connect_import(const struct lu_env *env,
1966 struct obd_export **exp, struct obd_device *obd,
1967 struct obd_uuid *cluuid, struct obd_connect_data *,
1969 int client_disconnect_export(struct obd_export *exp);
1970 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1972 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1973 int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
1974 struct obd_uuid *uuid);
1975 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1976 void client_destroy_import(struct obd_import *imp);
1979 #ifdef HAVE_SERVER_SUPPORT
1980 int server_disconnect_export(struct obd_export *exp);
1983 /* ptlrpc/pinger.c */
1985 * Pinger API (client side only)
1988 enum timeout_event {
1991 struct timeout_item;
1992 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1993 int ptlrpc_pinger_add_import(struct obd_import *imp);
1994 int ptlrpc_pinger_del_import(struct obd_import *imp);
1995 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1996 timeout_cb_t cb, void *data,
1997 cfs_list_t *obd_list);
1998 int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
1999 enum timeout_event event);
2000 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
2001 int ptlrpc_obd_ping(struct obd_device *obd);
2002 cfs_time_t ptlrpc_suspend_wakeup_time(void);
2004 void ping_evictor_start(void);
2005 void ping_evictor_stop(void);
2007 #define ping_evictor_start() do {} while (0)
2008 #define ping_evictor_stop() do {} while (0)
2010 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
2013 /* ptlrpc daemon bind policy */
2015 /* all ptlrpcd threads are free mode */
2016 PDB_POLICY_NONE = 1,
2017 /* all ptlrpcd threads are bound mode */
2018 PDB_POLICY_FULL = 2,
2019 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
2020 PDB_POLICY_PAIR = 3,
2021 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
2022 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
2023 * If kernel supports NUMA, pthrpcd threads are binded and
2024 * grouped by NUMA node */
2025 PDB_POLICY_NEIGHBOR = 4,
2028 /* ptlrpc daemon load policy
2029 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
2030 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
2031 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
2032 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
2033 * depends on which is scheduled firstly, to accelerate the RPC processing. */
2035 /* on the same CPU core as the caller */
2036 PDL_POLICY_SAME = 1,
2037 /* within the same CPU partition, but not the same core as the caller */
2038 PDL_POLICY_LOCAL = 2,
2039 /* round-robin on all CPU cores, but not the same core as the caller */
2040 PDL_POLICY_ROUND = 3,
2041 /* the specified CPU core is preferred, but not enforced */
2042 PDL_POLICY_PREFERRED = 4,
2045 /* ptlrpc/ptlrpcd.c */
2046 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
2047 void ptlrpcd_wake(struct ptlrpc_request *req);
2048 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
2049 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
2050 int ptlrpcd_addref(void);
2051 void ptlrpcd_decref(void);
2053 /* ptlrpc/lproc_ptlrpc.c */
2055 * procfs output related functions
2058 const char* ll_opcode2str(__u32 opcode);
2060 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
2061 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
2062 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
2064 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
2065 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
2066 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
2070 /* ptlrpc/llog_server.c */
2071 int llog_origin_handle_create(struct ptlrpc_request *req);
2072 int llog_origin_handle_destroy(struct ptlrpc_request *req);
2073 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
2074 int llog_origin_handle_next_block(struct ptlrpc_request *req);
2075 int llog_origin_handle_read_header(struct ptlrpc_request *req);
2076 int llog_origin_handle_close(struct ptlrpc_request *req);
2077 int llog_origin_handle_cancel(struct ptlrpc_request *req);
2078 int llog_catinfo(struct ptlrpc_request *req);
2080 /* ptlrpc/llog_client.c */
2081 extern struct llog_operations llog_client_ops;