1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 /** \defgroup PtlRPC Portal RPC and networking module.
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
44 * PortalRPC utilizes LNet as its transport layer.
58 #if defined(__linux__)
59 #include <linux/lustre_net.h>
60 #elif defined(__APPLE__)
61 #include <darwin/lustre_net.h>
62 #elif defined(__WINNT__)
63 #include <winnt/lustre_net.h>
65 #error Unsupported operating system.
68 #include <libcfs/libcfs.h>
70 #include <lnet/lnet.h>
71 #include <lustre/lustre_idl.h>
72 #include <lustre_ha.h>
73 #include <lustre_sec.h>
74 #include <lustre_import.h>
75 #include <lprocfs_status.h>
76 #include <lu_object.h>
77 #include <lustre_req_layout.h>
79 #include <obd_support.h>
80 #include <lustre_ver.h>
82 /* MD flags we _always_ use */
83 #define PTLRPC_MD_OPTIONS 0
86 * Define maxima for bulk I/O
87 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
88 * these limits are system wide and not interface-local. */
89 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
90 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
91 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
93 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
95 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
96 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
98 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
99 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
101 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
102 # error "PTLRPC_MAX_BRW_SIZE too big"
104 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
105 # error "PTLRPC_MAX_BRW_PAGES too big"
107 #endif /* __KERNEL__ */
109 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
111 #define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
114 * The following constants determine how memory is used to buffer incoming
117 * ?_NBUFS # buffers to allocate when growing the pool
118 * ?_BUFSIZE # bytes in a single request buffer
119 * ?_MAXREQSIZE # maximum request service will receive
121 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
122 * of ?_NBUFS is added to the pool.
124 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
125 * considered full when less than ?_MAXREQSIZE is left in them.
127 #define LDLM_THREADS_AUTO_MIN (2)
128 #define LDLM_THREADS_AUTO_MAX min_t(unsigned, cfs_num_online_cpus() * \
129 cfs_num_online_cpus() * 32, 128)
130 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
131 #define LDLM_NBUFS (64 * cfs_num_online_cpus())
132 #define LDLM_BUFSIZE (8 * 1024)
133 #define LDLM_MAXREQSIZE (5 * 1024)
134 #define LDLM_MAXREPSIZE (1024)
136 #define MDT_MIN_THREADS 2UL
137 #define MDT_MAX_THREADS 512UL
138 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
139 cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
142 /** Absolute limits */
143 #define MDS_THREADS_MIN 2
144 #define MDS_THREADS_MAX 512
145 #define MDS_THREADS_MIN_READPAGE 2
146 #define MDS_NBUFS (64 * cfs_num_online_cpus())
147 #define MDS_BUFSIZE (8 * 1024)
149 * Assume file name length = FNAME_MAX = 256 (true for ext3).
150 * path name length = PATH_MAX = 4096
151 * LOV MD size max = EA_MAX = 4000
152 * symlink: FNAME_MAX + PATH_MAX <- largest
153 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
154 * rename: FNAME_MAX + FNAME_MAX
155 * open: FNAME_MAX + EA_MAX
157 * MDS_MAXREQSIZE ~= 4736 bytes =
158 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
159 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
160 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
161 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
163 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
164 * except in the open case where there are a large number of OSTs in a LOV.
166 #define MDS_MAXREQSIZE (5 * 1024)
167 #define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
169 /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
170 #define FLD_MAXREQSIZE (160)
172 /** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */
173 #define FLD_MAXREPSIZE (152)
176 * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
178 #define SEQ_MAXREQSIZE (160)
180 /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
181 #define SEQ_MAXREPSIZE (152)
183 /** MGS threads must be >= 3, see bug 22458 comment #28 */
184 #define MGS_THREADS_AUTO_MIN 3
185 #define MGS_THREADS_AUTO_MAX 32
186 #define MGS_NBUFS (64 * cfs_num_online_cpus())
187 #define MGS_BUFSIZE (8 * 1024)
188 #define MGS_MAXREQSIZE (7 * 1024)
189 #define MGS_MAXREPSIZE (9 * 1024)
191 /** Absolute OSS limits */
192 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
193 #define OSS_THREADS_MAX 512
194 #define OST_NBUFS (64 * cfs_num_online_cpus())
195 #define OST_BUFSIZE (8 * 1024)
198 * OST_MAXREQSIZE ~= 4768 bytes =
199 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
201 * - single object with 16 pages is 512 bytes
202 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
204 #define OST_MAXREQSIZE (5 * 1024)
205 #define OST_MAXREPSIZE (9 * 1024)
207 /* Macro to hide a typecast. */
208 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
211 * Structure to single define portal connection.
213 struct ptlrpc_connection {
214 /** linkage for connections hash table */
215 cfs_hlist_node_t c_hash;
216 /** Our own lnet nid for this connection */
218 /** Remote side nid for this connection */
219 lnet_process_id_t c_peer;
220 /** UUID of the other side */
221 struct obd_uuid c_remote_uuid;
222 /** reference counter for this connection */
223 cfs_atomic_t c_refcount;
226 /** Client definition for PortalRPC */
227 struct ptlrpc_client {
228 /** What lnet portal does this client send messages to by default */
229 __u32 cli_request_portal;
230 /** What portal do we expect replies on */
231 __u32 cli_reply_portal;
232 /** Name of the client */
236 /** state flags of requests */
237 /* XXX only ones left are those used by the bulk descs as well! */
238 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
239 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
241 #define REQ_MAX_ACK_LOCKS 8
243 union ptlrpc_async_args {
245 * Scratchpad for passing args to completion interpreter. Users
246 * cast to the struct of their choosing, and LASSERT that this is
247 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
248 * a pointer to it here. The pointer_arg ensures this struct is at
249 * least big enough for that.
251 void *pointer_arg[11];
255 struct ptlrpc_request_set;
256 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
259 * Definition of request set structure.
260 * Request set is a list of requests (not necessary to the same target) that
261 * once populated with RPCs could be sent in parallel.
262 * There are two kinds of request sets. General purpose and with dedicated
263 * serving thread. Example of the latter is ptlrpcd set.
264 * For general purpose sets once request set started sending it is impossible
265 * to add new requests to such set.
266 * Provides a way to call "completion callbacks" when all requests in the set
269 struct ptlrpc_request_set {
270 /** number of uncompleted requests */
271 cfs_atomic_t set_remaining;
272 /** wait queue to wait on for request events */
273 cfs_waitq_t set_waitq;
274 cfs_waitq_t *set_wakeup_ptr;
275 /** List of requests in the set */
276 cfs_list_t set_requests;
278 * List of completion callbacks to be called when the set is completed
279 * This is only used if \a set_interpret is NULL.
280 * Links struct ptlrpc_set_cbdata.
282 cfs_list_t set_cblist;
283 /** Completion callback, if only one. */
284 set_interpreter_func set_interpret;
285 /** opaq argument passed to completion \a set_interpret callback. */
288 * Lock for \a set_new_requests manipulations
289 * locked so that any old caller can communicate requests to
290 * the set holder who can then fold them into the lock-free set
292 cfs_spinlock_t set_new_req_lock;
293 /** List of new yet unsent requests. Only used with ptlrpcd now. */
294 cfs_list_t set_new_requests;
298 * Description of a single ptrlrpc_set callback
300 struct ptlrpc_set_cbdata {
301 /** List linkage item */
303 /** Pointer to interpreting function */
304 set_interpreter_func psc_interpret;
305 /** Opaq argument to pass to the callback */
309 struct ptlrpc_bulk_desc;
312 * ptlrpc callback & work item stuff
314 struct ptlrpc_cb_id {
315 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
316 void *cbid_arg; /* additional arg */
319 /** Maximum number of locks to fit into reply state */
320 #define RS_MAX_LOCKS 8
324 * Structure to define reply state on the server
325 * Reply state holds various reply message information. Also for "difficult"
326 * replies (rep-ack case) we store the state after sending reply and wait
327 * for the client to acknowledge the reception. In these cases locks could be
328 * added to the state for replay/failover consistency guarantees.
330 struct ptlrpc_reply_state {
331 /** Callback description */
332 struct ptlrpc_cb_id rs_cb_id;
333 /** Linkage for list of all reply states in a system */
335 /** Linkage for list of all reply states on same export */
336 cfs_list_t rs_exp_list;
337 /** Linkage for list of all reply states for same obd */
338 cfs_list_t rs_obd_list;
340 cfs_list_t rs_debug_list;
342 /** A spinlock to protect the reply state flags */
343 cfs_spinlock_t rs_lock;
344 /** Reply state flags */
345 unsigned long rs_difficult:1; /* ACK/commit stuff */
346 unsigned long rs_no_ack:1; /* no ACK, even for
347 difficult requests */
348 unsigned long rs_scheduled:1; /* being handled? */
349 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
350 unsigned long rs_handled:1; /* been handled yet? */
351 unsigned long rs_on_net:1; /* reply_out_callback pending? */
352 unsigned long rs_prealloc:1; /* rs from prealloc list */
353 unsigned long rs_committed:1;/* the transaction was committed
354 and the rs was dispatched
355 by ptlrpc_commit_replies */
356 /** Size of the state */
360 /** Transaction number */
364 struct obd_export *rs_export;
365 struct ptlrpc_service *rs_service;
366 /** Lnet metadata handle for the reply */
367 lnet_handle_md_t rs_md_h;
368 cfs_atomic_t rs_refcount;
370 /** Context for the sevice thread */
371 struct ptlrpc_svc_ctx *rs_svc_ctx;
372 /** Reply buffer (actually sent to the client), encoded if needed */
373 struct lustre_msg *rs_repbuf; /* wrapper */
374 /** Size of the reply buffer */
375 int rs_repbuf_len; /* wrapper buf length */
376 /** Size of the reply message */
377 int rs_repdata_len; /* wrapper msg length */
379 * Actual reply message. Its content is encrupted (if needed) to
380 * produce reply buffer for actual sending. In simple case
381 * of no network encryption we jus set \a rs_repbuf to \a rs_msg
383 struct lustre_msg *rs_msg; /* reply message */
385 /** Number of locks awaiting client ACK */
387 /** Handles of locks awaiting client reply ACK */
388 struct lustre_handle rs_locks[RS_MAX_LOCKS];
389 /** Lock modes of locks in \a rs_locks */
390 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
393 struct ptlrpc_thread;
397 RQ_PHASE_NEW = 0xebc0de00,
398 RQ_PHASE_RPC = 0xebc0de01,
399 RQ_PHASE_BULK = 0xebc0de02,
400 RQ_PHASE_INTERPRET = 0xebc0de03,
401 RQ_PHASE_COMPLETE = 0xebc0de04,
402 RQ_PHASE_UNREGISTERING = 0xebc0de05,
403 RQ_PHASE_UNDEFINED = 0xebc0de06
406 /** Type of request interpreter call-back */
407 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
408 struct ptlrpc_request *req,
412 * Definition of request pool structure.
413 * The pool is used to store empty preallocated requests for the case
414 * when we would actually need to send something without performing
415 * any allocations (to avoid e.g. OOM).
417 struct ptlrpc_request_pool {
418 /** Locks the list */
419 cfs_spinlock_t prp_lock;
420 /** list of ptlrpc_request structs */
421 cfs_list_t prp_req_list;
422 /** Maximum message size that would fit into a rquest from this pool */
424 /** Function to allocate more requests for this pool */
425 void (*prp_populate)(struct ptlrpc_request_pool *, int);
434 * Basic request prioritization operations structure.
435 * The whole idea is centered around locks and RPCs that might affect locks.
436 * When a lock is contended we try to give priority to RPCs that might lead
437 * to fastest release of that lock.
438 * Currently only implemented for OSTs only in a way that makes all
439 * IO and truncate RPCs that are coming from a locked region where a lock is
440 * contended a priority over other requests.
442 struct ptlrpc_hpreq_ops {
444 * Check if the lock handle of the given lock is the same as
445 * taken from the request.
447 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
449 * Check if the request is a high priority one.
451 int (*hpreq_check)(struct ptlrpc_request *);
455 * Represents remote procedure call.
457 * This is a staple structure used by everybody wanting to send a request
460 struct ptlrpc_request {
461 /* Request type: one of PTL_RPC_MSG_* */
464 * Linkage item through which this request is included into
465 * sending/delayed lists on client and into rqbd list on server
469 * Server side list of incoming unserved requests sorted by arrival
470 * time. Traversed from time to time to notice about to expire
471 * requests and sent back "early replies" to clients to let them
472 * know server is alive and well, just very busy to service their
475 cfs_list_t rq_timed_list;
476 /** server-side history, used for debuging purposes. */
477 cfs_list_t rq_history_list;
478 /** server-side per-export list */
479 cfs_list_t rq_exp_list;
480 /** server-side hp handlers */
481 struct ptlrpc_hpreq_ops *rq_ops;
482 /** history sequence # */
483 __u64 rq_history_seq;
484 /** the index of service's srv_at_array into which request is linked */
486 /** Result of request processing */
488 /** Lock to protect request flags and some other important bits, like
491 cfs_spinlock_t rq_lock;
492 /** client-side flags are serialized by rq_lock */
493 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
494 rq_timedout:1, rq_resend:1, rq_restart:1,
496 * when ->rq_replay is set, request is kept by the client even
497 * after server commits corresponding transaction. This is
498 * used for operations that require sequence of multiple
499 * requests to be replayed. The only example currently is file
500 * open/close. When last request in such a sequence is
501 * committed, ->rq_replay is cleared on all requests in the
505 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
506 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
507 rq_early:1, rq_must_unlink:1,
508 rq_fake:1, /* this fake req */
509 rq_memalloc:1, /* req originated from "kswapd" */
510 /* server-side flags */
511 rq_packed_final:1, /* packed final reply */
512 rq_hp:1, /* high priority RPC */
513 rq_at_linked:1, /* link into service's srv_at_array */
516 /* whether the "rq_set" is a valid one */
519 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
520 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
521 cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
522 server-side refcounf for multiple replies */
524 /** initial thread servicing this request */
525 struct ptlrpc_thread *rq_svc_thread;
527 /** Portal to which this request would be sent */
528 int rq_request_portal; /* XXX FIXME bug 249 */
529 /** Portal where to wait for reply and where reply would be sent */
530 int rq_reply_portal; /* XXX FIXME bug 249 */
534 * !rq_truncate : # reply bytes actually received,
535 * rq_truncate : required repbuf_len for resend
538 /** Request length */
540 /** Request message - what client sent */
541 struct lustre_msg *rq_reqmsg;
545 /** Reply message - server response */
546 struct lustre_msg *rq_repmsg;
547 /** Transaction number */
552 * List item to for replay list. Not yet commited requests get linked
554 * Also see \a rq_replay comment above.
556 cfs_list_t rq_replay_list;
559 * security and encryption data
561 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
562 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
563 cfs_list_t rq_ctx_chain; /**< link to waited ctx */
565 struct sptlrpc_flavor rq_flvr; /**< for client & server */
566 enum lustre_sec_part rq_sp_from;
568 unsigned long /* client/server security flags */
569 rq_ctx_init:1, /* context initiation */
570 rq_ctx_fini:1, /* context destroy */
571 rq_bulk_read:1, /* request bulk read */
572 rq_bulk_write:1, /* request bulk write */
573 /* server authentication flags */
574 rq_auth_gss:1, /* authenticated by gss */
575 rq_auth_remote:1, /* authed as remote user */
576 rq_auth_usr_root:1, /* authed as root */
577 rq_auth_usr_mdt:1, /* authed as mdt */
578 rq_auth_usr_ost:1, /* authed as ost */
579 /* security tfm flags */
582 /* doesn't expect reply FIXME */
584 rq_pill_init:1; /* pill initialized */
586 uid_t rq_auth_uid; /* authed uid */
587 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
589 /* (server side), pointed directly into req buffer */
590 struct ptlrpc_user_desc *rq_user_desc;
592 /** early replies go to offset 0, regular replies go after that */
593 unsigned int rq_reply_off;
595 /* various buffer pointers */
596 struct lustre_msg *rq_reqbuf; /* req wrapper */
597 int rq_reqbuf_len; /* req wrapper buf len */
598 int rq_reqdata_len; /* req wrapper msg len */
599 char *rq_repbuf; /* rep buffer */
600 int rq_repbuf_len; /* rep buffer len */
601 struct lustre_msg *rq_repdata; /* rep wrapper msg */
602 int rq_repdata_len; /* rep wrapper msg len */
603 struct lustre_msg *rq_clrbuf; /* only in priv mode */
604 int rq_clrbuf_len; /* only in priv mode */
605 int rq_clrdata_len; /* only in priv mode */
609 /** Fields that help to see if request and reply were swabbed or not */
610 __u32 rq_req_swab_mask;
611 __u32 rq_rep_swab_mask;
613 /** What was import generation when this request was sent */
614 int rq_import_generation;
615 enum lustre_imp_state rq_send_state;
617 /** how many early replies (for stats) */
620 /** client+server request */
621 lnet_handle_md_t rq_req_md_h;
622 struct ptlrpc_cb_id rq_req_cbid;
623 /** optional time limit for send attempts */
624 cfs_duration_t rq_delay_limit;
625 /** time request was first queued */
626 cfs_time_t rq_queued_time;
629 /** request arrival time */
630 struct timeval rq_arrival_time;
631 /** separated reply state */
632 struct ptlrpc_reply_state *rq_reply_state;
633 /** incoming request buffer */
634 struct ptlrpc_request_buffer_desc *rq_rqbd;
636 __u32 rq_uid; /* peer uid, used in MDS only */
639 /** client-only incoming reply */
640 lnet_handle_md_t rq_reply_md_h;
641 cfs_waitq_t rq_reply_waitq;
642 struct ptlrpc_cb_id rq_reply_cbid;
646 /** Peer description (the other side) */
647 lnet_process_id_t rq_peer;
648 /** Server-side, export on which request was received */
649 struct obd_export *rq_export;
650 /** Client side, import where request is being sent */
651 struct obd_import *rq_import;
653 /** Replay callback, called after request is replayed at recovery */
654 void (*rq_replay_cb)(struct ptlrpc_request *);
656 * Commit callback, called when request is committed and about to be
659 void (*rq_commit_cb)(struct ptlrpc_request *);
660 /** Opaq data for replay and commit callbacks. */
663 /** For bulk requests on client only: bulk descriptor */
664 struct ptlrpc_bulk_desc *rq_bulk;
666 /** client outgoing req */
668 * when request/reply sent (secs), or time when request should be sent
671 /** time for request really sent out */
674 /** when request must finish. volatile
675 * so that servers' early reply updates to the deadline aren't
676 * kept in per-cpu cache */
677 volatile time_t rq_deadline;
678 /** when req reply unlink must finish. */
679 time_t rq_reply_deadline;
680 /** when req bulk unlink must finish. */
681 time_t rq_bulk_deadline;
683 * service time estimate (secs)
684 * If the requestsis not served by this time, it is marked as timed out.
688 /** Multi-rpc bits */
689 /** Link item for request set lists */
690 cfs_list_t rq_set_chain;
691 /** Per-request waitq introduced by bug 21938 for recovery waiting */
692 cfs_waitq_t rq_set_waitq;
693 /** Link back to the request set */
694 struct ptlrpc_request_set *rq_set;
695 /** Async completion handler, called when reply is received */
696 ptlrpc_interpterer_t rq_interpret_reply;
697 /** Async completion context */
698 union ptlrpc_async_args rq_async_args;
700 /** Pool if request is from preallocated list */
701 struct ptlrpc_request_pool *rq_pool;
703 struct lu_context rq_session;
704 struct lu_context rq_recov_session;
706 /** request format description */
707 struct req_capsule rq_pill;
711 * Call completion handler for rpc if any, return it's status or original
712 * rc if there was no handler defined for this request.
714 static inline int ptlrpc_req_interpret(const struct lu_env *env,
715 struct ptlrpc_request *req, int rc)
717 if (req->rq_interpret_reply != NULL) {
718 req->rq_status = req->rq_interpret_reply(env, req,
721 return req->rq_status;
727 * Returns 1 if request buffer at offset \a index was already swabbed
729 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
731 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
732 return req->rq_req_swab_mask & (1 << index);
736 * Returns 1 if request reply buffer at offset \a index was already swabbed
738 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
740 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
741 return req->rq_rep_swab_mask & (1 << index);
745 * Returns 1 if request needs to be swabbed into local cpu byteorder
747 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
749 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
753 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
755 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
757 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
761 * Mark request buffer at offset \a index that it was already swabbed
763 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
765 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
766 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
767 req->rq_req_swab_mask |= 1 << index;
771 * Mark request reply buffer at offset \a index that it was already swabbed
773 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
775 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
776 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
777 req->rq_rep_swab_mask |= 1 << index;
781 * Convert numerical request phase value \a phase into text string description
783 static inline const char *
784 ptlrpc_phase2str(enum rq_phase phase)
793 case RQ_PHASE_INTERPRET:
795 case RQ_PHASE_COMPLETE:
797 case RQ_PHASE_UNREGISTERING:
798 return "Unregistering";
805 * Convert numerical request phase of the request \a req into text stringi
808 static inline const char *
809 ptlrpc_rqphase2str(struct ptlrpc_request *req)
811 return ptlrpc_phase2str(req->rq_phase);
815 * Debugging functions and helpers to print request structure into debug log
818 /* Spare the preprocessor, spoil the bugs. */
819 #define FLAG(field, str) (field ? str : "")
821 /** Convert bit flags into a string */
822 #define DEBUG_REQ_FLAGS(req) \
823 ptlrpc_rqphase2str(req), \
824 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
825 FLAG(req->rq_err, "E"), \
826 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
827 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
828 FLAG(req->rq_no_resend, "N"), \
829 FLAG(req->rq_waiting, "W"), \
830 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
831 FLAG(req->rq_committed, "M")
833 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
835 void _debug_req(struct ptlrpc_request *req, __u32 mask,
836 struct libcfs_debug_msg_data *data, const char *fmt, ...)
837 __attribute__ ((format (printf, 4, 5)));
840 * Helper that decides if we need to print request accordig to current debug
843 #define debug_req(cdls, level, req, file, func, line, fmt, a...) \
847 if (((level) & D_CANTMASK) != 0 || \
848 ((libcfs_debug & (level)) != 0 && \
849 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
850 static struct libcfs_debug_msg_data _req_dbg_data = \
851 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
852 _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \
857 * This is the debug print function you need to use to print request sturucture
858 * content into lustre debug log.
859 * for most callers (level is a constant) this is resolved at compile time */
860 #define DEBUG_REQ(level, req, fmt, args...) \
862 if ((level) & (D_ERROR | D_WARNING)) { \
863 static cfs_debug_limit_state_t cdls; \
864 debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \
865 "@@@ "fmt" ", ## args); \
867 debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \
868 "@@@ "fmt" ", ## args); \
873 * Structure that defines a single page of a bulk transfer
875 struct ptlrpc_bulk_page {
876 /** Linkage to list of pages in a bulk */
879 * Number of bytes in a page to transfer starting from \a bp_pageoffset
882 /** offset within a page */
884 /** The page itself */
885 struct page *bp_page;
888 #define BULK_GET_SOURCE 0
889 #define BULK_PUT_SINK 1
890 #define BULK_GET_SINK 2
891 #define BULK_PUT_SOURCE 3
894 * Definition of buk descriptor.
895 * Bulks are special "Two phase" RPCs where initial request message
896 * is sent first and it is followed bt a transfer (o receiving) of a large
897 * amount of data to be settled into pages referenced from the bulk descriptors.
898 * Bulks transfers (the actual data following the small requests) are done
899 * on separate LNet portals.
900 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
901 * Another user is readpage for MDT.
903 struct ptlrpc_bulk_desc {
904 /** completed successfully */
905 unsigned long bd_success:1;
906 /** accessible to the network (network io potentially in progress) */
907 unsigned long bd_network_rw:1;
908 /** {put,get}{source,sink} */
909 unsigned long bd_type:2;
911 unsigned long bd_registered:1;
912 /** For serialization with callback */
913 cfs_spinlock_t bd_lock;
914 /** Import generation when request for this bulk was sent */
915 int bd_import_generation;
916 /** Server side - export this bulk created for */
917 struct obd_export *bd_export;
918 /** Client side - import this bulk was sent on */
919 struct obd_import *bd_import;
920 /** LNet portal for this bulk */
922 /** Back pointer to the request */
923 struct ptlrpc_request *bd_req;
924 cfs_waitq_t bd_waitq; /* server side only WQ */
925 int bd_iov_count; /* # entries in bd_iov */
926 int bd_max_iov; /* allocated size of bd_iov */
927 int bd_nob; /* # bytes covered */
928 int bd_nob_transferred; /* # bytes GOT/PUT */
932 struct ptlrpc_cb_id bd_cbid; /* network callback info */
933 lnet_handle_md_t bd_md_h; /* associated MD */
934 lnet_nid_t bd_sender; /* stash event::sender */
936 #if defined(__KERNEL__)
938 * encrypt iov, size is either 0 or bd_iov_count.
940 lnet_kiov_t *bd_enc_iov;
942 lnet_kiov_t bd_iov[0];
944 lnet_md_iovec_t bd_iov[0];
949 SVC_STOPPED = 1 << 0,
950 SVC_STOPPING = 1 << 1,
951 SVC_STARTING = 1 << 2,
952 SVC_RUNNING = 1 << 3,
958 * Definition of server service thread structure
960 struct ptlrpc_thread {
962 * List of active threads in svc->srv_threads
966 * thread-private data (preallocated memory)
971 * service thread index, from ptlrpc_start_threads
979 * put watchdog in the structure per thread b=14840
981 struct lc_watchdog *t_watchdog;
983 * the svc this thread belonged to b=18582
985 struct ptlrpc_service *t_svc;
986 cfs_waitq_t t_ctl_waitq;
987 struct lu_env *t_env;
991 * Request buffer descriptor structure.
992 * This is a structure that contains one posted request buffer for service.
993 * Once data land into a buffer, event callback creates actual request and
994 * notifies wakes one of the service threads to process new incoming request.
995 * More than one request can fit into the buffer.
997 struct ptlrpc_request_buffer_desc {
998 /** Link item for rqbds on a service */
999 cfs_list_t rqbd_list;
1000 /** History of requests for this buffer */
1001 cfs_list_t rqbd_reqs;
1002 /** Back pointer to service for which this buffer is registered */
1003 struct ptlrpc_service *rqbd_service;
1004 /** LNet descriptor */
1005 lnet_handle_md_t rqbd_md_h;
1007 /** The buffer itself */
1009 struct ptlrpc_cb_id rqbd_cbid;
1011 * This "embedded" request structure is only used for the
1012 * last request to fit into the buffer
1014 struct ptlrpc_request rqbd_req;
1017 typedef int (*svc_thr_init_t)(struct ptlrpc_thread *thread);
1018 typedef void (*svc_thr_done_t)(struct ptlrpc_thread *thread);
1019 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
1020 typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
1021 typedef void (*svc_req_printfn_t)(void *, struct ptlrpc_request *);
1023 #ifndef __cfs_cacheline_aligned
1024 /* NB: put it here for reducing patche dependence */
1025 # define __cfs_cacheline_aligned
1029 * How many high priority requests to serve before serving one normal
1032 #define PTLRPC_SVC_HP_RATIO 10
1035 * Definition of PortalRPC service.
1036 * The service is listening on a particular portal (like tcp port)
1037 * and perform actions for a specific server like IO service for OST
1038 * or general metadata service for MDS.
1040 * ptlrpc service has four locks:
1042 * serialize operations on rqbd and requests waiting for preprocess
1044 * serialize operations active requests sent to this portal
1046 * serialize adaptive timeout stuff
1048 * serialize operations on RS list (reply states)
1050 * We don't have any use-case to take two or more locks at the same time
1051 * for now, so there is no lock order issue.
1053 struct ptlrpc_service {
1054 /** most often accessed fields */
1055 /** chain thru all services */
1056 cfs_list_t srv_list;
1057 /** only statically allocated strings here; we don't clean them */
1059 /** only statically allocated strings here; we don't clean them */
1060 char *srv_thread_name;
1061 /** service thread list */
1062 cfs_list_t srv_threads;
1063 /** threads to start at beginning of service */
1064 int srv_threads_min;
1065 /** thread upper limit */
1066 int srv_threads_max;
1067 /** always increasing number */
1068 unsigned srv_threads_next_id;
1069 /** # of starting threads */
1070 int srv_threads_starting;
1071 /** # running threads */
1072 int srv_threads_running;
1074 /** service operations, move to ptlrpc_svc_ops_t in the future */
1077 * if non-NULL called during thread creation (ptlrpc_start_thread())
1078 * to initialize service specific per-thread state.
1080 svc_thr_init_t srv_init;
1082 * if non-NULL called during thread shutdown (ptlrpc_main()) to
1083 * destruct state created by ->srv_init().
1085 svc_thr_done_t srv_done;
1086 /** Handler function for incoming requests for this service */
1087 svc_handler_t srv_handler;
1088 /** hp request handler */
1089 svc_hpreq_handler_t srv_hpreq_handler;
1090 /** service-specific print fn */
1091 svc_req_printfn_t srv_req_printfn;
1094 /** Root of /proc dir tree for this service */
1095 cfs_proc_dir_entry_t *srv_procroot;
1096 /** Pointer to statistic data for this service */
1097 struct lprocfs_stats *srv_stats;
1098 /** # hp per lp reqs to handle */
1099 int srv_hpreq_ratio;
1100 /** biggest request to receive */
1101 int srv_max_req_size;
1102 /** biggest reply to send */
1103 int srv_max_reply_size;
1104 /** size of individual buffers */
1106 /** # buffers to allocate in 1 group */
1107 int srv_nbuf_per_group;
1108 /** Local portal on which to receive requests */
1109 __u32 srv_req_portal;
1110 /** Portal on the client to send replies to */
1111 __u32 srv_rep_portal;
1113 * Tags for lu_context associated with this thread, see struct
1117 /** soft watchdog timeout multiplier */
1118 int srv_watchdog_factor;
1119 /** bind threads to CPUs */
1120 unsigned srv_cpu_affinity:1;
1121 /** under unregister_service */
1122 unsigned srv_is_stopping:1;
1125 * serialize the following fields, used for protecting
1126 * rqbd list and incoming requests waiting for preprocess
1128 cfs_spinlock_t srv_lock __cfs_cacheline_aligned;
1129 /** incoming reqs */
1130 cfs_list_t srv_req_in_queue;
1131 /** total # req buffer descs allocated */
1133 /** # posted request buffers */
1134 int srv_nrqbd_receiving;
1135 /** timeout before re-posting reqs, in tick */
1136 cfs_duration_t srv_rqbd_timeout;
1137 /** request buffers to be reposted */
1138 cfs_list_t srv_idle_rqbds;
1139 /** req buffers receiving */
1140 cfs_list_t srv_active_rqbds;
1141 /** request buffer history */
1142 cfs_list_t srv_history_rqbds;
1143 /** # request buffers in history */
1144 int srv_n_history_rqbds;
1145 /** max # request buffers in history */
1146 int srv_max_history_rqbds;
1147 /** request history */
1148 cfs_list_t srv_request_history;
1149 /** next request sequence # */
1150 __u64 srv_request_seq;
1151 /** highest seq culled from history */
1152 __u64 srv_request_max_cull_seq;
1154 * all threads sleep on this. This wait-queue is signalled when new
1155 * incoming request arrives and when difficult reply has to be handled.
1157 cfs_waitq_t srv_waitq;
1160 * serialize the following fields, used for processing requests
1161 * sent to this portal
1163 cfs_spinlock_t srv_rq_lock __cfs_cacheline_aligned;
1164 /** # reqs in either of the queues below */
1165 /** reqs waiting for service */
1166 cfs_list_t srv_request_queue;
1167 /** high priority queue */
1168 cfs_list_t srv_request_hpq;
1169 /** # incoming reqs */
1170 int srv_n_queued_reqs;
1171 /** # reqs being served */
1172 int srv_n_active_reqs;
1173 /** # HPreqs being served */
1174 int srv_n_active_hpreq;
1175 /** # hp requests handled */
1176 int srv_hpreq_count;
1181 * serialize the following fields, used for changes on
1184 cfs_spinlock_t srv_at_lock __cfs_cacheline_aligned;
1185 /** estimated rpc service time */
1186 struct adaptive_timeout srv_at_estimate;
1187 /** reqs waiting for replies */
1188 struct ptlrpc_at_array srv_at_array;
1189 /** early reply timer */
1190 cfs_timer_t srv_at_timer;
1191 /** check early replies */
1192 unsigned srv_at_check;
1194 cfs_time_t srv_at_checktime;
1198 * serialize the following fields, used for processing
1199 * replies for this portal
1201 cfs_spinlock_t srv_rs_lock __cfs_cacheline_aligned;
1202 /** all the active replies */
1203 cfs_list_t srv_active_replies;
1205 /** replies waiting for service */
1206 cfs_list_t srv_reply_queue;
1208 /** List of free reply_states */
1209 cfs_list_t srv_free_rs_list;
1210 /** waitq to run, when adding stuff to srv_free_rs_list */
1211 cfs_waitq_t srv_free_rs_waitq;
1212 /** # 'difficult' replies */
1213 cfs_atomic_t srv_n_difficult_replies;
1214 //struct ptlrpc_srv_ni srv_interfaces[0];
1218 * Declaration of ptlrpcd control structure
1220 struct ptlrpcd_ctl {
1222 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
1224 unsigned long pc_flags;
1226 * Thread lock protecting structure fields.
1228 cfs_spinlock_t pc_lock;
1232 cfs_completion_t pc_starting;
1236 cfs_completion_t pc_finishing;
1238 * Thread requests set.
1240 struct ptlrpc_request_set *pc_set;
1242 * Thread name used in cfs_daemonize()
1246 * Environment for request interpreters to run in.
1248 struct lu_env pc_env;
1251 * Async rpcs flag to make sure that ptlrpcd_check() is called only
1256 * Currently not used.
1260 * User-space async rpcs callback.
1262 void *pc_wait_callback;
1264 * User-space check idle rpcs callback.
1266 void *pc_idle_callback;
1270 /* Bits for pc_flags */
1271 enum ptlrpcd_ctl_flags {
1273 * Ptlrpc thread start flag.
1275 LIOD_START = 1 << 0,
1277 * Ptlrpc thread stop flag.
1281 * Ptlrpc thread force flag (only stop force so far).
1282 * This will cause aborting any inflight rpcs handled
1283 * by thread if LIOD_STOP is specified.
1285 LIOD_FORCE = 1 << 2,
1287 * This is a recovery ptlrpc thread.
1289 LIOD_RECOVERY = 1 << 3
1292 /* ptlrpc/events.c */
1293 extern lnet_handle_eq_t ptlrpc_eq_h;
1294 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
1295 lnet_process_id_t *peer, lnet_nid_t *self);
1297 * These callbacks are invoked by LNet when something happened to
1301 extern void request_out_callback (lnet_event_t *ev);
1302 extern void reply_in_callback(lnet_event_t *ev);
1303 extern void client_bulk_callback (lnet_event_t *ev);
1304 extern void request_in_callback(lnet_event_t *ev);
1305 extern void reply_out_callback(lnet_event_t *ev);
1306 extern void server_bulk_callback (lnet_event_t *ev);
1309 /* ptlrpc/connection.c */
1310 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
1312 struct obd_uuid *uuid);
1313 int ptlrpc_connection_put(struct ptlrpc_connection *c);
1314 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
1315 int ptlrpc_connection_init(void);
1316 void ptlrpc_connection_fini(void);
1317 extern lnet_pid_t ptl_get_pid(void);
1319 /* ptlrpc/niobuf.c */
1321 * Actual interfacing with LNet to put/get/register/unregister stuff
1324 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
1325 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
1326 int ptlrpc_register_bulk(struct ptlrpc_request *req);
1327 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
1329 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
1333 LASSERT(desc != NULL);
1335 cfs_spin_lock(&desc->bd_lock);
1336 rc = desc->bd_network_rw;
1337 cfs_spin_unlock(&desc->bd_lock);
1341 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
1343 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
1346 LASSERT(req != NULL);
1348 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
1349 req->rq_bulk_deadline > cfs_time_current_sec())
1355 cfs_spin_lock(&desc->bd_lock);
1356 rc = desc->bd_network_rw;
1357 cfs_spin_unlock(&desc->bd_lock);
1361 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
1362 #define PTLRPC_REPLY_EARLY 0x02
1363 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
1364 int ptlrpc_reply(struct ptlrpc_request *req);
1365 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
1366 int ptlrpc_error(struct ptlrpc_request *req);
1367 void ptlrpc_resend_req(struct ptlrpc_request *request);
1368 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
1369 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
1370 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
1373 /* ptlrpc/client.c */
1375 * Client-side portals API. Everything to send requests, receive replies,
1376 * request queues, request management, etc.
1379 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
1380 struct ptlrpc_client *);
1381 void ptlrpc_cleanup_client(struct obd_import *imp);
1382 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
1384 int ptlrpc_queue_wait(struct ptlrpc_request *req);
1385 int ptlrpc_replay_req(struct ptlrpc_request *req);
1386 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
1387 void ptlrpc_restart_req(struct ptlrpc_request *req);
1388 void ptlrpc_abort_inflight(struct obd_import *imp);
1389 void ptlrpc_cleanup_imp(struct obd_import *imp);
1390 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
1392 struct ptlrpc_request_set *ptlrpc_prep_set(void);
1393 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
1394 set_interpreter_func fn, void *data);
1395 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
1396 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
1397 int ptlrpc_set_wait(struct ptlrpc_request_set *);
1398 int ptlrpc_expired_set(void *data);
1399 void ptlrpc_interrupted_set(void *data);
1400 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
1401 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
1402 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
1403 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1404 struct ptlrpc_request *req);
1406 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
1407 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
1409 struct ptlrpc_request_pool *
1410 ptlrpc_init_rq_pool(int, int,
1411 void (*populate_pool)(struct ptlrpc_request_pool *, int));
1413 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
1414 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1415 const struct req_format *format);
1416 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
1417 struct ptlrpc_request_pool *,
1418 const struct req_format *format);
1419 void ptlrpc_request_free(struct ptlrpc_request *request);
1420 int ptlrpc_request_pack(struct ptlrpc_request *request,
1421 __u32 version, int opcode);
1422 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1423 const struct req_format *format,
1424 __u32 version, int opcode);
1425 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
1426 __u32 version, int opcode, char **bufs,
1427 struct ptlrpc_cli_ctx *ctx);
1428 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
1429 unsigned int timeout,
1430 ptlrpc_interpterer_t interpreter);
1431 void ptlrpc_fakereq_finished(struct ptlrpc_request *req);
1433 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
1434 int opcode, int count, __u32 *lengths,
1436 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
1437 __u32 version, int opcode,
1438 int count, __u32 *lengths, char **bufs,
1439 struct ptlrpc_request_pool *pool);
1440 void ptlrpc_req_finished(struct ptlrpc_request *request);
1441 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
1442 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
1443 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
1444 int npages, int type, int portal);
1445 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
1446 int npages, int type, int portal);
1447 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
1448 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
1449 cfs_page_t *page, int pageoffset, int len);
1450 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1451 struct obd_import *imp);
1452 __u64 ptlrpc_next_xid(void);
1453 __u64 ptlrpc_sample_next_xid(void);
1454 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
1458 struct ptlrpc_service_conf {
1461 int psc_max_req_size;
1462 int psc_max_reply_size;
1465 int psc_watchdog_factor;
1466 int psc_min_threads;
1467 int psc_max_threads;
1471 /* ptlrpc/service.c */
1473 * Server-side services API. Register/unregister service, request state
1474 * management, service thread management
1478 void ptlrpc_save_lock (struct ptlrpc_request *req,
1479 struct lustre_handle *lock, int mode, int no_ack);
1480 void ptlrpc_commit_replies(struct obd_export *exp);
1481 void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
1482 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
1483 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
1484 svc_handler_t h, char *name,
1485 struct proc_dir_entry *proc_entry,
1486 svc_req_printfn_t prntfn,
1489 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
1491 int req_portal, int rep_portal,
1492 int watchdog_factor,
1493 svc_handler_t, char *name,
1494 cfs_proc_dir_entry_t *proc_entry,
1496 int min_threads, int max_threads,
1497 char *threadname, __u32 ctx_tags,
1498 svc_hpreq_handler_t);
1499 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
1501 int ptlrpc_start_threads(struct ptlrpc_service *svc);
1502 int ptlrpc_start_thread(struct ptlrpc_service *svc);
1503 int ptlrpc_unregister_service(struct ptlrpc_service *service);
1504 int liblustre_check_services (void *arg);
1505 void ptlrpc_daemonize(char *name);
1506 int ptlrpc_service_health_check(struct ptlrpc_service *);
1507 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
1508 void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
1509 void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
1510 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
1513 int ptlrpc_hr_init(void);
1514 void ptlrpc_hr_fini(void);
1516 # define ptlrpc_hr_init() (0)
1517 # define ptlrpc_hr_fini() do {} while(0)
1520 struct ptlrpc_svc_data {
1522 struct ptlrpc_service *svc;
1523 struct ptlrpc_thread *thread;
1527 /* ptlrpc/import.c */
1532 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
1533 int ptlrpc_init_import(struct obd_import *imp);
1534 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
1535 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
1537 /* ptlrpc/pack_generic.c */
1538 int ptlrpc_reconnect_import(struct obd_import *imp);
1542 * ptlrpc msg buffer and swab interface
1546 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
1548 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
1550 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
1551 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
1553 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
1554 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
1556 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
1557 __u32 *lens, char **bufs);
1558 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
1560 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
1561 __u32 *lens, char **bufs, int flags);
1562 #define LPRFL_EARLY_REPLY 1
1563 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
1564 char **bufs, int flags);
1565 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
1566 unsigned int newlen, int move_data);
1567 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
1568 int __lustre_unpack_msg(struct lustre_msg *m, int len);
1569 int lustre_msg_hdr_size(__u32 magic, int count);
1570 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
1571 int lustre_msg_size_v2(int count, __u32 *lengths);
1572 int lustre_packed_msg_size(struct lustre_msg *msg);
1573 int lustre_msg_early_size(void);
1574 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
1575 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
1576 int lustre_msg_buflen(struct lustre_msg *m, int n);
1577 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
1578 int lustre_msg_bufcount(struct lustre_msg *m);
1579 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
1580 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
1581 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
1582 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
1583 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
1584 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
1585 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
1586 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
1587 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
1588 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
1589 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
1590 __u32 lustre_msg_get_type(struct lustre_msg *msg);
1591 __u32 lustre_msg_get_version(struct lustre_msg *msg);
1592 void lustre_msg_add_version(struct lustre_msg *msg, int version);
1593 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
1594 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
1595 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
1596 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
1597 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
1598 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
1599 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
1600 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
1601 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
1602 int lustre_msg_get_status(struct lustre_msg *msg);
1603 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
1604 int lustre_msg_is_v1(struct lustre_msg *msg);
1605 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
1606 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
1607 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
1608 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
1609 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0)
1610 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
1612 # warning "remove checksum compatibility support for b1_8"
1613 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1615 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
1616 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
1617 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
1618 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1619 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
1620 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
1621 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
1622 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
1623 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1624 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
1625 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
1626 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1627 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1628 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1631 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
1632 unsigned int newlen, int move_data)
1634 LASSERT(req->rq_reply_state);
1635 LASSERT(req->rq_repmsg);
1636 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
1641 /** Change request phase of \a req to \a new_phase */
1643 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1645 if (req->rq_phase == new_phase)
1648 if (new_phase == RQ_PHASE_UNREGISTERING) {
1649 req->rq_next_phase = req->rq_phase;
1651 cfs_atomic_inc(&req->rq_import->imp_unregistering);
1654 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1656 cfs_atomic_dec(&req->rq_import->imp_unregistering);
1659 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
1660 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1662 req->rq_phase = new_phase;
1666 * Returns true if request \a req got early reply and hard deadline is not met
1669 ptlrpc_client_early(struct ptlrpc_request *req)
1671 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1672 req->rq_reply_deadline > cfs_time_current_sec())
1674 return req->rq_early;
1678 * Returns true if we got real reply from server for this request
1681 ptlrpc_client_replied(struct ptlrpc_request *req)
1683 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1684 req->rq_reply_deadline > cfs_time_current_sec())
1686 return req->rq_replied;
1689 /** Returns true if request \a req is in process of receiving server reply */
1691 ptlrpc_client_recv(struct ptlrpc_request *req)
1693 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1694 req->rq_reply_deadline > cfs_time_current_sec())
1696 return req->rq_receiving_reply;
1700 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1704 cfs_spin_lock(&req->rq_lock);
1705 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1706 req->rq_reply_deadline > cfs_time_current_sec()) {
1707 cfs_spin_unlock(&req->rq_lock);
1710 rc = req->rq_receiving_reply || req->rq_must_unlink;
1711 cfs_spin_unlock(&req->rq_lock);
1716 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1718 if (req->rq_set == NULL)
1719 cfs_waitq_signal(&req->rq_reply_waitq);
1721 cfs_waitq_signal(&req->rq_set->set_waitq);
1725 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1727 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1728 cfs_atomic_inc(&rs->rs_refcount);
1732 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1734 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1735 if (cfs_atomic_dec_and_test(&rs->rs_refcount))
1736 lustre_free_reply_state(rs);
1739 /* Should only be called once per req */
1740 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1742 if (req->rq_reply_state == NULL)
1743 return; /* shouldn't occur */
1744 ptlrpc_rs_decref(req->rq_reply_state);
1745 req->rq_reply_state = NULL;
1746 req->rq_repmsg = NULL;
1749 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1751 return lustre_msg_get_magic(req->rq_reqmsg);
1754 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1756 switch (req->rq_reqmsg->lm_magic) {
1757 case LUSTRE_MSG_MAGIC_V2:
1758 return req->rq_reqmsg->lm_repsize;
1760 LASSERTF(0, "incorrect message magic: %08x\n",
1761 req->rq_reqmsg->lm_magic);
1766 static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
1768 if (req->rq_delay_limit != 0 &&
1769 cfs_time_before(cfs_time_add(req->rq_queued_time,
1770 cfs_time_seconds(req->rq_delay_limit)),
1771 cfs_time_current())) {
1777 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
1779 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
1780 cfs_spin_lock(&req->rq_lock);
1781 req->rq_no_resend = 1;
1782 cfs_spin_unlock(&req->rq_lock);
1784 return req->rq_no_resend;
1787 /* ldlm/ldlm_lib.c */
1789 * Target client logic
1792 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
1793 int client_obd_cleanup(struct obd_device *obddev);
1794 int client_connect_import(const struct lu_env *env,
1795 struct obd_export **exp, struct obd_device *obd,
1796 struct obd_uuid *cluuid, struct obd_connect_data *,
1798 int client_disconnect_export(struct obd_export *exp);
1799 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1801 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1802 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1803 void client_destroy_import(struct obd_import *imp);
1806 int server_disconnect_export(struct obd_export *exp);
1808 /* ptlrpc/pinger.c */
1810 * Pinger API (client side only)
1813 enum timeout_event {
1816 struct timeout_item;
1817 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1818 int ptlrpc_pinger_add_import(struct obd_import *imp);
1819 int ptlrpc_pinger_del_import(struct obd_import *imp);
1820 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1821 timeout_cb_t cb, void *data,
1822 cfs_list_t *obd_list);
1823 int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
1824 enum timeout_event event);
1825 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
1826 int ptlrpc_obd_ping(struct obd_device *obd);
1827 cfs_time_t ptlrpc_suspend_wakeup_time(void);
1829 void ping_evictor_start(void);
1830 void ping_evictor_stop(void);
1832 #define ping_evictor_start() do {} while (0)
1833 #define ping_evictor_stop() do {} while (0)
1835 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
1838 /* ptlrpc/ptlrpcd.c */
1841 * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv,
1842 * these threads are used to asynchronously send requests queued with
1843 * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for
1844 * such requests. Multiple scopes are needed to avoid dead-locks.
1846 enum ptlrpcd_scope {
1847 /** Scope of bulk read-write rpcs. */
1849 /** Everything else. */
1854 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc);
1855 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
1856 void ptlrpcd_wake(struct ptlrpc_request *req);
1857 int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
1858 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
1859 int ptlrpcd_addref(void);
1860 void ptlrpcd_decref(void);
1862 /* ptlrpc/lproc_ptlrpc.c */
1864 * procfs output related functions
1867 const char* ll_opcode2str(__u32 opcode);
1869 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
1870 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
1871 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
1873 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
1874 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
1875 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
1879 /* ptlrpc/llog_server.c */
1880 int llog_origin_handle_create(struct ptlrpc_request *req);
1881 int llog_origin_handle_destroy(struct ptlrpc_request *req);
1882 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
1883 int llog_origin_handle_next_block(struct ptlrpc_request *req);
1884 int llog_origin_handle_read_header(struct ptlrpc_request *req);
1885 int llog_origin_handle_close(struct ptlrpc_request *req);
1886 int llog_origin_handle_cancel(struct ptlrpc_request *req);
1887 int llog_catinfo(struct ptlrpc_request *req);
1889 /* ptlrpc/llog_client.c */
1890 extern struct llog_operations llog_client_ops;