1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
45 #if defined(__linux__)
46 #include <linux/lustre_net.h>
47 #elif defined(__APPLE__)
48 #include <darwin/lustre_net.h>
49 #elif defined(__WINNT__)
50 #include <winnt/lustre_net.h>
52 #error Unsupported operating system.
55 #include <libcfs/libcfs.h>
57 #include <lnet/lnet.h>
58 #include <lustre/lustre_idl.h>
59 #include <lustre_ha.h>
60 #include <lustre_sec.h>
61 #include <lustre_import.h>
62 #include <lprocfs_status.h>
63 #include <lu_object.h>
64 #include <lustre_req_layout.h>
66 #include <obd_support.h>
67 #include <lustre_ver.h>
69 /* MD flags we _always_ use */
70 #define PTLRPC_MD_OPTIONS 0
72 /* Define maxima for bulk I/O
73 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
74 * these limits are system wide and not interface-local. */
75 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
76 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
77 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
79 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
81 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
82 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
84 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
85 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
87 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
88 # error "PTLRPC_MAX_BRW_SIZE too big"
90 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
91 # error "PTLRPC_MAX_BRW_PAGES too big"
93 #endif /* __KERNEL__ */
95 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
97 #define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
99 /* The following constants determine how memory is used to buffer incoming
102 * ?_NBUFS # buffers to allocate when growing the pool
103 * ?_BUFSIZE # bytes in a single request buffer
104 * ?_MAXREQSIZE # maximum request service will receive
106 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
107 * of ?_NBUFS is added to the pool.
109 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
110 * considered full when less than ?_MAXREQSIZE is left in them.
113 #define LDLM_THREADS_AUTO_MIN (2)
114 #define LDLM_THREADS_AUTO_MAX min_t(unsigned, cfs_num_online_cpus() * \
115 cfs_num_online_cpus() * 32, 128)
116 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
117 #define LDLM_NBUFS (64 * cfs_num_online_cpus())
118 #define LDLM_BUFSIZE (8 * 1024)
119 #define LDLM_MAXREQSIZE (5 * 1024)
120 #define LDLM_MAXREPSIZE (1024)
122 #define MDT_MIN_THREADS 2UL
123 #define MDT_MAX_THREADS 512UL
124 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
125 cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
128 /* Absolute limits */
129 #define MDS_THREADS_MIN 2
130 #define MDS_THREADS_MAX 512
131 #define MDS_THREADS_MIN_READPAGE 2
132 #define MDS_NBUFS (64 * cfs_num_online_cpus())
133 #define MDS_BUFSIZE (8 * 1024)
134 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
135 * path name length = PATH_MAX = 4096
136 * LOV MD size max = EA_MAX = 4000
137 * symlink: FNAME_MAX + PATH_MAX <- largest
138 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
139 * rename: FNAME_MAX + FNAME_MAX
140 * open: FNAME_MAX + EA_MAX
142 * MDS_MAXREQSIZE ~= 4736 bytes =
143 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
144 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
145 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
146 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
148 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
149 * except in the open case where there are a large number of OSTs in a LOV.
151 #define MDS_MAXREQSIZE (5 * 1024)
152 #define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
154 /* FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
155 #define FLD_MAXREQSIZE (160)
157 /* FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */
158 #define FLD_MAXREPSIZE (152)
160 /* SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
162 #define SEQ_MAXREQSIZE (160)
164 /* SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
165 #define SEQ_MAXREPSIZE (152)
167 /* MGS threads must be >= 3, see bug 22458 comment #28 */
168 #define MGS_THREADS_AUTO_MIN 3
169 #define MGS_THREADS_AUTO_MAX 32
170 #define MGS_NBUFS (64 * cfs_num_online_cpus())
171 #define MGS_BUFSIZE (8 * 1024)
172 #define MGS_MAXREQSIZE (7 * 1024)
173 #define MGS_MAXREPSIZE (9 * 1024)
175 /* Absolute limits */
176 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
177 #define OSS_THREADS_MAX 512
178 #define OST_NBUFS (64 * cfs_num_online_cpus())
179 #define OST_BUFSIZE (8 * 1024)
180 /* OST_MAXREQSIZE ~= 4768 bytes =
181 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
183 * - single object with 16 pages is 512 bytes
184 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
186 #define OST_MAXREQSIZE (5 * 1024)
187 #define OST_MAXREPSIZE (9 * 1024)
189 /* Macro to hide a typecast. */
190 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
192 struct ptlrpc_connection {
193 cfs_hlist_node_t c_hash;
195 lnet_process_id_t c_peer;
196 struct obd_uuid c_remote_uuid;
197 cfs_atomic_t c_refcount;
200 struct ptlrpc_client {
201 __u32 cli_request_portal;
202 __u32 cli_reply_portal;
206 /* state flags of requests */
207 /* XXX only ones left are those used by the bulk descs as well! */
208 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
209 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
211 #define REQ_MAX_ACK_LOCKS 8
213 union ptlrpc_async_args {
214 /* Scratchpad for passing args to completion interpreter. Users
215 * cast to the struct of their choosing, and LASSERT that this is
216 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
217 * a pointer to it here. The pointer_arg ensures this struct is at
218 * least big enough for that. */
219 void *pointer_arg[11];
223 struct ptlrpc_request_set;
224 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
226 struct ptlrpc_request_set {
227 cfs_atomic_t set_remaining; /* # uncompleted requests */
228 cfs_waitq_t set_waitq;
229 cfs_waitq_t *set_wakeup_ptr;
230 cfs_list_t set_requests;
231 cfs_list_t set_cblist; /* list of completion callbacks */
232 set_interpreter_func set_interpret; /* completion callback */
233 void *set_arg; /* completion context */
234 /* locked so that any old caller can communicate requests to
235 * the set holder who can then fold them into the lock-free set */
236 cfs_spinlock_t set_new_req_lock;
237 cfs_list_t set_new_requests;
240 struct ptlrpc_set_cbdata {
242 set_interpreter_func psc_interpret;
246 struct ptlrpc_bulk_desc;
249 * ptlrpc callback & work item stuff
251 struct ptlrpc_cb_id {
252 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
253 void *cbid_arg; /* additional arg */
256 #define RS_MAX_LOCKS 8
259 struct ptlrpc_reply_state {
260 struct ptlrpc_cb_id rs_cb_id;
262 cfs_list_t rs_exp_list;
263 cfs_list_t rs_obd_list;
265 cfs_list_t rs_debug_list;
267 /* A spinlock to protect the reply state flags */
268 cfs_spinlock_t rs_lock;
269 /* Reply state flags */
270 unsigned long rs_difficult:1; /* ACK/commit stuff */
271 unsigned long rs_no_ack:1; /* no ACK, even for
272 difficult requests */
273 unsigned long rs_scheduled:1; /* being handled? */
274 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
275 unsigned long rs_handled:1; /* been handled yet? */
276 unsigned long rs_on_net:1; /* reply_out_callback pending? */
277 unsigned long rs_prealloc:1; /* rs from prealloc list */
278 unsigned long rs_committed:1;/* the transaction was committed
279 and the rs was dispatched
280 by ptlrpc_commit_replies */
285 struct obd_export *rs_export;
286 struct ptlrpc_service *rs_service;
287 lnet_handle_md_t rs_md_h;
288 cfs_atomic_t rs_refcount;
290 struct ptlrpc_svc_ctx *rs_svc_ctx;
291 struct lustre_msg *rs_repbuf; /* wrapper */
292 int rs_repbuf_len; /* wrapper buf length */
293 int rs_repdata_len; /* wrapper msg length */
294 struct lustre_msg *rs_msg; /* reply message */
296 /* locks awaiting client reply ACK */
298 struct lustre_handle rs_locks[RS_MAX_LOCKS];
299 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
302 struct ptlrpc_thread;
305 RQ_PHASE_NEW = 0xebc0de00,
306 RQ_PHASE_RPC = 0xebc0de01,
307 RQ_PHASE_BULK = 0xebc0de02,
308 RQ_PHASE_INTERPRET = 0xebc0de03,
309 RQ_PHASE_COMPLETE = 0xebc0de04,
310 RQ_PHASE_UNREGISTERING = 0xebc0de05,
311 RQ_PHASE_UNDEFINED = 0xebc0de06
314 /** Type of request interpreter call-back */
315 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
316 struct ptlrpc_request *req,
319 struct ptlrpc_request_pool {
320 cfs_spinlock_t prp_lock;
321 cfs_list_t prp_req_list; /* list of ptlrpc_request structs */
323 void (*prp_populate)(struct ptlrpc_request_pool *, int);
331 struct ptlrpc_hpreq_ops {
333 * Check if the lock handle of the given lock is the same as
334 * taken from the request.
336 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
338 * Check if the request is a high priority one.
340 int (*hpreq_check)(struct ptlrpc_request *);
344 * Represents remote procedure call.
346 struct ptlrpc_request {
347 int rq_type; /* one of PTL_RPC_MSG_* */
349 cfs_list_t rq_timed_list; /* server-side early replies */
350 cfs_list_t rq_history_list; /* server-side history */
351 cfs_list_t rq_exp_list; /* server-side per-export list */
352 struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
353 __u64 rq_history_seq; /* history sequence # */
354 /* the index of service's srv_at_array into which request is linked */
357 cfs_spinlock_t rq_lock;
358 /* client-side flags are serialized by rq_lock */
359 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
360 rq_timedout:1, rq_resend:1, rq_restart:1,
362 * when ->rq_replay is set, request is kept by the client even
363 * after server commits corresponding transaction. This is
364 * used for operations that require sequence of multiple
365 * requests to be replayed. The only example currently is file
366 * open/close. When last request in such a sequence is
367 * committed, ->rq_replay is cleared on all requests in the
371 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
372 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
373 rq_early:1, rq_must_unlink:1,
374 rq_fake:1, /* this fake req */
375 /* server-side flags */
376 rq_packed_final:1, /* packed final reply */
377 rq_hp:1, /* high priority RPC */
378 rq_at_linked:1, /* link into service's srv_at_array */
381 /* whether the "rq_set" is a valid one */
384 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
385 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
386 cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
387 server-side refcounf for multiple replies */
389 struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
391 int rq_request_portal; /* XXX FIXME bug 249 */
392 int rq_reply_portal; /* XXX FIXME bug 249 */
394 int rq_nob_received; /* client-side:
395 * !rq_truncate : # reply bytes actually received,
396 * rq_truncate : required repbuf_len for resend */
398 struct lustre_msg *rq_reqmsg;
401 struct lustre_msg *rq_repmsg;
404 cfs_list_t rq_replay_list;
406 struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */
407 struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */
408 cfs_list_t rq_ctx_chain; /* link to waited ctx */
410 struct sptlrpc_flavor rq_flvr; /* client & server */
411 enum lustre_sec_part rq_sp_from;
413 unsigned long /* client/server security flags */
414 rq_ctx_init:1, /* context initiation */
415 rq_ctx_fini:1, /* context destroy */
416 rq_bulk_read:1, /* request bulk read */
417 rq_bulk_write:1, /* request bulk write */
418 /* server authentication flags */
419 rq_auth_gss:1, /* authenticated by gss */
420 rq_auth_remote:1, /* authed as remote user */
421 rq_auth_usr_root:1, /* authed as root */
422 rq_auth_usr_mdt:1, /* authed as mdt */
423 /* security tfm flags */
426 /* doesn't expect reply FIXME */
428 rq_pill_init:1; /* pill initialized */
430 uid_t rq_auth_uid; /* authed uid */
431 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
433 /* (server side), pointed directly into req buffer */
434 struct ptlrpc_user_desc *rq_user_desc;
436 /* early replies go to offset 0, regular replies go after that */
437 unsigned int rq_reply_off;
439 /* various buffer pointers */
440 struct lustre_msg *rq_reqbuf; /* req wrapper */
441 int rq_reqbuf_len; /* req wrapper buf len */
442 int rq_reqdata_len; /* req wrapper msg len */
443 char *rq_repbuf; /* rep buffer */
444 int rq_repbuf_len; /* rep buffer len */
445 struct lustre_msg *rq_repdata; /* rep wrapper msg */
446 int rq_repdata_len; /* rep wrapper msg len */
447 struct lustre_msg *rq_clrbuf; /* only in priv mode */
448 int rq_clrbuf_len; /* only in priv mode */
449 int rq_clrdata_len; /* only in priv mode */
451 __u32 rq_req_swab_mask;
452 __u32 rq_rep_swab_mask;
454 int rq_import_generation;
455 enum lustre_imp_state rq_send_state;
457 int rq_early_count; /* how many early replies (for stats) */
459 /* client+server request */
460 lnet_handle_md_t rq_req_md_h;
461 struct ptlrpc_cb_id rq_req_cbid;
464 struct timeval rq_arrival_time; /* request arrival time */
465 struct ptlrpc_reply_state *rq_reply_state; /* separated reply state */
466 struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/
468 __u32 rq_uid; /* peer uid, used in MDS only */
471 /* client-only incoming reply */
472 lnet_handle_md_t rq_reply_md_h;
473 cfs_waitq_t rq_reply_waitq;
474 struct ptlrpc_cb_id rq_reply_cbid;
477 lnet_process_id_t rq_peer;
478 struct obd_export *rq_export;
479 struct obd_import *rq_import;
481 void (*rq_replay_cb)(struct ptlrpc_request *);
482 void (*rq_commit_cb)(struct ptlrpc_request *);
485 struct ptlrpc_bulk_desc *rq_bulk;/* client side bulk */
487 /* client outgoing req */
488 time_t rq_sent; /* when request/reply sent (secs), or
489 * time when request should be sent */
491 volatile time_t rq_deadline; /* when request must finish. volatile
492 so that servers' early reply updates to the deadline aren't
493 kept in per-cpu cache */
494 time_t rq_reply_deadline; /* when req reply unlink must finish. */
495 time_t rq_bulk_deadline; /* when req bulk unlink must finish. */
496 int rq_timeout; /* service time estimate (secs) */
499 cfs_list_t rq_set_chain;
500 cfs_waitq_t rq_set_waitq;
501 struct ptlrpc_request_set *rq_set;
502 /** Async completion handler */
503 ptlrpc_interpterer_t rq_interpret_reply;
504 union ptlrpc_async_args rq_async_args; /* Async completion context */
505 struct ptlrpc_request_pool *rq_pool; /* Pool if request from
507 struct lu_context rq_session;
508 struct lu_context rq_recov_session;
511 struct req_capsule rq_pill;
514 static inline int ptlrpc_req_interpret(const struct lu_env *env,
515 struct ptlrpc_request *req, int rc)
517 if (req->rq_interpret_reply != NULL) {
518 req->rq_status = req->rq_interpret_reply(env, req,
521 return req->rq_status;
526 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
528 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
529 return req->rq_req_swab_mask & (1 << index);
532 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
534 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
535 return req->rq_rep_swab_mask & (1 << index);
538 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
540 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
543 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
545 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
548 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
550 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
551 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
552 req->rq_req_swab_mask |= 1 << index;
555 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
557 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
558 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
559 req->rq_rep_swab_mask |= 1 << index;
562 static inline const char *
563 ptlrpc_phase2str(enum rq_phase phase)
572 case RQ_PHASE_INTERPRET:
574 case RQ_PHASE_COMPLETE:
576 case RQ_PHASE_UNREGISTERING:
577 return "Unregistering";
583 static inline const char *
584 ptlrpc_rqphase2str(struct ptlrpc_request *req)
586 return ptlrpc_phase2str(req->rq_phase);
589 /* Spare the preprocessor, spoil the bugs. */
590 #define FLAG(field, str) (field ? str : "")
592 #define DEBUG_REQ_FLAGS(req) \
593 ptlrpc_rqphase2str(req), \
594 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
595 FLAG(req->rq_err, "E"), \
596 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
597 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
598 FLAG(req->rq_no_resend, "N"), \
599 FLAG(req->rq_waiting, "W"), \
600 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
601 FLAG(req->rq_committed, "M")
603 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
605 void _debug_req(struct ptlrpc_request *req, __u32 mask,
606 struct libcfs_debug_msg_data *data, const char *fmt, ...)
607 __attribute__ ((format (printf, 4, 5)));
609 #define debug_req(cdls, level, req, file, func, line, fmt, a...) \
613 if (((level) & D_CANTMASK) != 0 || \
614 ((libcfs_debug & (level)) != 0 && \
615 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
616 static struct libcfs_debug_msg_data _req_dbg_data = \
617 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
618 _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \
622 /* for most callers (level is a constant) this is resolved at compile time */
623 #define DEBUG_REQ(level, req, fmt, args...) \
625 if ((level) & (D_ERROR | D_WARNING)) { \
626 static cfs_debug_limit_state_t cdls; \
627 debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \
628 "@@@ "fmt" ", ## args); \
630 debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \
631 "@@@ "fmt" ", ## args); \
634 struct ptlrpc_bulk_page {
637 int bp_pageoffset; /* offset within a page */
638 struct page *bp_page;
641 #define BULK_GET_SOURCE 0
642 #define BULK_PUT_SINK 1
643 #define BULK_GET_SINK 2
644 #define BULK_PUT_SOURCE 3
646 struct ptlrpc_bulk_desc {
647 unsigned long bd_success:1; /* completed successfully */
648 unsigned long bd_network_rw:1; /* accessible to the network */
649 unsigned long bd_type:2; /* {put,get}{source,sink} */
650 unsigned long bd_registered:1; /* client side */
651 cfs_spinlock_t bd_lock; /* serialise with callback */
652 int bd_import_generation;
653 struct obd_export *bd_export;
654 struct obd_import *bd_import;
656 struct ptlrpc_request *bd_req; /* associated request */
657 cfs_waitq_t bd_waitq; /* server side only WQ */
658 int bd_iov_count; /* # entries in bd_iov */
659 int bd_max_iov; /* allocated size of bd_iov */
660 int bd_nob; /* # bytes covered */
661 int bd_nob_transferred; /* # bytes GOT/PUT */
665 struct ptlrpc_cb_id bd_cbid; /* network callback info */
666 lnet_handle_md_t bd_md_h; /* associated MD */
667 lnet_nid_t bd_sender; /* stash event::sender */
669 #if defined(__KERNEL__)
671 * encrypt iov, size is either 0 or bd_iov_count.
673 lnet_kiov_t *bd_enc_iov;
675 lnet_kiov_t bd_iov[0];
677 lnet_md_iovec_t bd_iov[0];
681 struct ptlrpc_thread {
683 * active threads in svc->srv_threads
687 * thread-private data (preallocated memory)
692 * service thread index, from ptlrpc_start_threads
700 * put watchdog in the structure per thread b=14840
702 struct lc_watchdog *t_watchdog;
704 * the svc this thread belonged to b=18582
706 struct ptlrpc_service *t_svc;
707 cfs_waitq_t t_ctl_waitq;
708 struct lu_env *t_env;
711 struct ptlrpc_request_buffer_desc {
712 cfs_list_t rqbd_list;
713 cfs_list_t rqbd_reqs;
714 struct ptlrpc_service *rqbd_service;
715 lnet_handle_md_t rqbd_md_h;
718 struct ptlrpc_cb_id rqbd_cbid;
719 struct ptlrpc_request rqbd_req;
722 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
723 typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
724 typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
726 #define PTLRPC_SVC_HP_RATIO 10
728 struct ptlrpc_service {
729 cfs_list_t srv_list; /* chain thru all services */
730 int srv_max_req_size; /* biggest request to receive */
731 int srv_max_reply_size; /* biggest reply to send */
732 int srv_buf_size; /* size of individual buffers */
733 int srv_nbuf_per_group; /* # buffers to allocate in 1 group */
734 int srv_nbufs; /* total # req buffer descs allocated */
735 int srv_threads_min; /* threads to start at SOW */
736 int srv_threads_max; /* thread upper limit */
737 int srv_threads_started; /* index of last started thread */
738 int srv_threads_running; /* # running threads */
739 cfs_atomic_t srv_n_difficult_replies; /* # 'difficult' replies */
740 int srv_n_active_reqs; /* # reqs being served */
741 int srv_n_hpreq; /* # HPreqs being served */
742 cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */
743 int srv_watchdog_factor; /* soft watchdog timeout multiplier */
744 unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
745 unsigned srv_at_check:1; /* check early replies */
746 unsigned srv_is_stopping:1; /* under unregister_service */
747 cfs_time_t srv_at_checktime; /* debug */
749 __u32 srv_req_portal;
750 __u32 srv_rep_portal;
753 struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
754 cfs_spinlock_t srv_at_lock;
755 struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
756 cfs_timer_t srv_at_timer; /* early reply timer */
758 int srv_n_queued_reqs; /* # reqs in either of the queues below */
759 int srv_hpreq_count; /* # hp requests handled */
760 int srv_hpreq_ratio; /* # hp per lp reqs to handle */
761 cfs_list_t srv_req_in_queue; /* incoming reqs */
762 cfs_list_t srv_request_queue; /* reqs waiting for service */
763 cfs_list_t srv_request_hpq; /* high priority queue */
765 cfs_list_t srv_request_history; /* request history */
766 __u64 srv_request_seq; /* next request sequence # */
767 __u64 srv_request_max_cull_seq; /* highest seq culled from history */
768 svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
770 cfs_list_t srv_idle_rqbds; /* request buffers to be reposted */
771 cfs_list_t srv_active_rqbds; /* req buffers receiving */
772 cfs_list_t srv_history_rqbds; /* request buffer history */
773 int srv_nrqbd_receiving; /* # posted request buffers */
774 int srv_n_history_rqbds; /* # request buffers in history */
775 int srv_max_history_rqbds;/* max # request buffers in history */
777 cfs_atomic_t srv_outstanding_replies;
778 cfs_list_t srv_active_replies; /* all the active replies */
780 cfs_list_t srv_reply_queue; /* replies waiting for service */
782 cfs_waitq_t srv_waitq; /* all threads sleep on this. This
783 * wait-queue is signalled when new
784 * incoming request arrives and when
785 * difficult reply has to be handled. */
787 cfs_list_t srv_threads; /* service thread list */
788 svc_handler_t srv_handler;
789 svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
791 char *srv_name; /* only statically allocated strings here; we don't clean them */
792 char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
794 cfs_spinlock_t srv_lock;
796 cfs_proc_dir_entry_t *srv_procroot;
797 struct lprocfs_stats *srv_stats;
799 /* List of free reply_states */
800 cfs_list_t srv_free_rs_list;
801 /* waitq to run, when adding stuff to srv_free_rs_list */
802 cfs_waitq_t srv_free_rs_waitq;
805 * Tags for lu_context associated with this thread, see struct
810 * if non-NULL called during thread creation (ptlrpc_start_thread())
811 * to initialize service specific per-thread state.
813 int (*srv_init)(struct ptlrpc_thread *thread);
815 * if non-NULL called during thread shutdown (ptlrpc_main()) to
816 * destruct state created by ->srv_init().
818 void (*srv_done)(struct ptlrpc_thread *thread);
820 //struct ptlrpc_srv_ni srv_interfaces[0];
825 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
827 unsigned long pc_flags;
829 * Thread lock protecting structure fields.
831 cfs_spinlock_t pc_lock;
835 cfs_completion_t pc_starting;
839 cfs_completion_t pc_finishing;
841 * Thread requests set.
843 struct ptlrpc_request_set *pc_set;
845 * Thread name used in cfs_daemonize()
849 * Environment for request interpreters to run in.
851 struct lu_env pc_env;
854 * Async rpcs flag to make sure that ptlrpcd_check() is called only
859 * Currently not used.
863 * User-space async rpcs callback.
865 void *pc_wait_callback;
867 * User-space check idle rpcs callback.
869 void *pc_idle_callback;
873 /* Bits for pc_flags */
874 enum ptlrpcd_ctl_flags {
876 * Ptlrpc thread start flag.
880 * Ptlrpc thread stop flag.
884 * Ptlrpc thread force flag (only stop force so far).
885 * This will cause aborting any inflight rpcs handled
886 * by thread if LIOD_STOP is specified.
890 * This is a recovery ptlrpc thread.
892 LIOD_RECOVERY = 1 << 3
895 /* ptlrpc/events.c */
896 extern lnet_handle_eq_t ptlrpc_eq_h;
897 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
898 lnet_process_id_t *peer, lnet_nid_t *self);
899 extern void request_out_callback (lnet_event_t *ev);
900 extern void reply_in_callback(lnet_event_t *ev);
901 extern void client_bulk_callback (lnet_event_t *ev);
902 extern void request_in_callback(lnet_event_t *ev);
903 extern void reply_out_callback(lnet_event_t *ev);
904 extern void server_bulk_callback (lnet_event_t *ev);
906 /* ptlrpc/connection.c */
907 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
909 struct obd_uuid *uuid);
910 int ptlrpc_connection_put(struct ptlrpc_connection *c);
911 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
912 int ptlrpc_connection_init(void);
913 void ptlrpc_connection_fini(void);
914 extern lnet_pid_t ptl_get_pid(void);
916 /* ptlrpc/niobuf.c */
917 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
918 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
919 int ptlrpc_register_bulk(struct ptlrpc_request *req);
920 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
922 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
926 LASSERT(desc != NULL);
928 cfs_spin_lock(&desc->bd_lock);
929 rc = desc->bd_network_rw;
930 cfs_spin_unlock(&desc->bd_lock);
934 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
936 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
939 LASSERT(req != NULL);
941 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
942 req->rq_bulk_deadline > cfs_time_current_sec())
948 cfs_spin_lock(&desc->bd_lock);
949 rc = desc->bd_network_rw;
950 cfs_spin_unlock(&desc->bd_lock);
954 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
955 #define PTLRPC_REPLY_EARLY 0x02
956 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
957 int ptlrpc_reply(struct ptlrpc_request *req);
958 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
959 int ptlrpc_error(struct ptlrpc_request *req);
960 void ptlrpc_resend_req(struct ptlrpc_request *request);
961 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
962 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
963 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
965 /* ptlrpc/client.c */
966 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
967 struct ptlrpc_client *);
968 void ptlrpc_cleanup_client(struct obd_import *imp);
969 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
971 int ptlrpc_queue_wait(struct ptlrpc_request *req);
972 int ptlrpc_replay_req(struct ptlrpc_request *req);
973 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
974 void ptlrpc_restart_req(struct ptlrpc_request *req);
975 void ptlrpc_abort_inflight(struct obd_import *imp);
976 void ptlrpc_cleanup_imp(struct obd_import *imp);
977 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
979 struct ptlrpc_request_set *ptlrpc_prep_set(void);
980 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
981 set_interpreter_func fn, void *data);
982 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
983 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
984 int ptlrpc_set_wait(struct ptlrpc_request_set *);
985 int ptlrpc_expired_set(void *data);
986 void ptlrpc_interrupted_set(void *data);
987 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
988 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
989 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
990 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
991 struct ptlrpc_request *req);
993 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
994 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
996 struct ptlrpc_request_pool *
997 ptlrpc_init_rq_pool(int, int,
998 void (*populate_pool)(struct ptlrpc_request_pool *, int));
1000 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
1001 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1002 const struct req_format *format);
1003 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
1004 struct ptlrpc_request_pool *,
1005 const struct req_format *format);
1006 void ptlrpc_request_free(struct ptlrpc_request *request);
1007 int ptlrpc_request_pack(struct ptlrpc_request *request,
1008 __u32 version, int opcode);
1009 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1010 const struct req_format *format,
1011 __u32 version, int opcode);
1012 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
1013 __u32 version, int opcode, char **bufs,
1014 struct ptlrpc_cli_ctx *ctx);
1015 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
1016 unsigned int timeout,
1017 ptlrpc_interpterer_t interpreter);
1018 void ptlrpc_fakereq_finished(struct ptlrpc_request *req);
1020 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
1021 int opcode, int count, __u32 *lengths,
1023 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
1024 __u32 version, int opcode,
1025 int count, __u32 *lengths, char **bufs,
1026 struct ptlrpc_request_pool *pool);
1027 void ptlrpc_req_finished(struct ptlrpc_request *request);
1028 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
1029 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
1030 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
1031 int npages, int type, int portal);
1032 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
1033 int npages, int type, int portal);
1034 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
1035 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
1036 cfs_page_t *page, int pageoffset, int len);
1037 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1038 struct obd_import *imp);
1039 __u64 ptlrpc_next_xid(void);
1040 __u64 ptlrpc_sample_next_xid(void);
1041 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
1043 struct ptlrpc_service_conf {
1046 int psc_max_req_size;
1047 int psc_max_reply_size;
1050 int psc_watchdog_factor;
1051 int psc_min_threads;
1052 int psc_max_threads;
1056 /* ptlrpc/service.c */
1057 void ptlrpc_save_lock (struct ptlrpc_request *req,
1058 struct lustre_handle *lock, int mode, int no_ack);
1059 void ptlrpc_commit_replies(struct obd_export *exp);
1060 void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
1061 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
1062 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
1063 svc_handler_t h, char *name,
1064 struct proc_dir_entry *proc_entry,
1065 svcreq_printfn_t prntfn,
1068 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
1070 int req_portal, int rep_portal,
1071 int watchdog_factor,
1072 svc_handler_t, char *name,
1073 cfs_proc_dir_entry_t *proc_entry,
1075 int min_threads, int max_threads,
1076 char *threadname, __u32 ctx_tags,
1077 svc_hpreq_handler_t);
1078 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
1080 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc);
1081 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc);
1082 int ptlrpc_unregister_service(struct ptlrpc_service *service);
1083 int liblustre_check_services (void *arg);
1084 void ptlrpc_daemonize(char *name);
1085 int ptlrpc_service_health_check(struct ptlrpc_service *);
1086 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
1087 void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
1088 void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
1089 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
1092 int ptlrpc_hr_init(void);
1093 void ptlrpc_hr_fini(void);
1095 # define ptlrpc_hr_init() (0)
1096 # define ptlrpc_hr_fini() do {} while(0)
1099 struct ptlrpc_svc_data {
1101 struct ptlrpc_service *svc;
1102 struct ptlrpc_thread *thread;
1103 struct obd_device *dev;
1106 /* ptlrpc/import.c */
1107 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
1108 int ptlrpc_init_import(struct obd_import *imp);
1109 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
1110 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
1112 /* ptlrpc/pack_generic.c */
1113 int ptlrpc_reconnect_import(struct obd_import *imp);
1115 /** ptlrpc mgs buffer swab interface */
1116 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
1118 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
1120 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
1121 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
1123 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
1124 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
1126 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
1127 __u32 *lens, char **bufs);
1128 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
1130 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
1131 __u32 *lens, char **bufs, int flags);
1132 #define LPRFL_EARLY_REPLY 1
1133 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
1134 char **bufs, int flags);
1135 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
1136 unsigned int newlen, int move_data);
1137 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
1138 int __lustre_unpack_msg(struct lustre_msg *m, int len);
1139 int lustre_msg_hdr_size(__u32 magic, int count);
1140 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
1141 int lustre_msg_size_v2(int count, __u32 *lengths);
1142 int lustre_packed_msg_size(struct lustre_msg *msg);
1143 int lustre_msg_early_size(void);
1144 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
1145 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
1146 int lustre_msg_buflen(struct lustre_msg *m, int n);
1147 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
1148 int lustre_msg_bufcount(struct lustre_msg *m);
1149 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
1150 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
1151 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
1152 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
1153 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
1154 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
1155 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
1156 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
1157 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
1158 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
1159 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
1160 __u32 lustre_msg_get_type(struct lustre_msg *msg);
1161 __u32 lustre_msg_get_version(struct lustre_msg *msg);
1162 void lustre_msg_add_version(struct lustre_msg *msg, int version);
1163 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
1164 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
1165 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
1166 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
1167 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
1168 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
1169 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
1170 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
1171 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
1172 int lustre_msg_get_status(struct lustre_msg *msg);
1173 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
1174 int lustre_msg_is_v1(struct lustre_msg *msg);
1175 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
1176 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
1177 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
1178 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
1179 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0)
1180 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
1182 # warning "remove checksum compatibility support for b1_8"
1183 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1185 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
1186 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
1187 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
1188 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1189 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
1190 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
1191 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
1192 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
1193 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1194 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
1195 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
1196 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1197 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1198 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1201 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
1202 unsigned int newlen, int move_data)
1204 LASSERT(req->rq_reply_state);
1205 LASSERT(req->rq_repmsg);
1206 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
1211 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1213 if (req->rq_phase == new_phase)
1216 if (new_phase == RQ_PHASE_UNREGISTERING) {
1217 req->rq_next_phase = req->rq_phase;
1219 cfs_atomic_inc(&req->rq_import->imp_unregistering);
1222 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1224 cfs_atomic_dec(&req->rq_import->imp_unregistering);
1227 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
1228 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1230 req->rq_phase = new_phase;
1234 ptlrpc_client_early(struct ptlrpc_request *req)
1236 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1237 req->rq_reply_deadline > cfs_time_current_sec())
1239 return req->rq_early;
1243 ptlrpc_client_replied(struct ptlrpc_request *req)
1245 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1246 req->rq_reply_deadline > cfs_time_current_sec())
1248 return req->rq_replied;
1252 ptlrpc_client_recv(struct ptlrpc_request *req)
1254 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1255 req->rq_reply_deadline > cfs_time_current_sec())
1257 return req->rq_receiving_reply;
1261 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1265 cfs_spin_lock(&req->rq_lock);
1266 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1267 req->rq_reply_deadline > cfs_time_current_sec()) {
1268 cfs_spin_unlock(&req->rq_lock);
1271 rc = req->rq_receiving_reply || req->rq_must_unlink;
1272 cfs_spin_unlock(&req->rq_lock);
1277 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1279 if (req->rq_set == NULL)
1280 cfs_waitq_signal(&req->rq_reply_waitq);
1282 cfs_waitq_signal(&req->rq_set->set_waitq);
1286 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1288 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1289 cfs_atomic_inc(&rs->rs_refcount);
1293 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1295 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1296 if (cfs_atomic_dec_and_test(&rs->rs_refcount))
1297 lustre_free_reply_state(rs);
1300 /* Should only be called once per req */
1301 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1303 if (req->rq_reply_state == NULL)
1304 return; /* shouldn't occur */
1305 ptlrpc_rs_decref(req->rq_reply_state);
1306 req->rq_reply_state = NULL;
1307 req->rq_repmsg = NULL;
1310 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1312 return lustre_msg_get_magic(req->rq_reqmsg);
1315 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1317 switch (req->rq_reqmsg->lm_magic) {
1318 case LUSTRE_MSG_MAGIC_V2:
1319 return req->rq_reqmsg->lm_repsize;
1321 LASSERTF(0, "incorrect message magic: %08x\n",
1322 req->rq_reqmsg->lm_magic);
1327 /* ldlm/ldlm_lib.c */
1328 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
1329 int client_obd_cleanup(struct obd_device *obddev);
1330 int client_connect_import(const struct lu_env *env,
1331 struct obd_export **exp, struct obd_device *obd,
1332 struct obd_uuid *cluuid, struct obd_connect_data *,
1334 int client_disconnect_export(struct obd_export *exp);
1335 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1337 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1338 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1339 void client_destroy_import(struct obd_import *imp);
1341 int server_disconnect_export(struct obd_export *exp);
1343 /* ptlrpc/pinger.c */
1344 enum timeout_event {
1347 struct timeout_item;
1348 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1349 int ptlrpc_pinger_add_import(struct obd_import *imp);
1350 int ptlrpc_pinger_del_import(struct obd_import *imp);
1351 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1352 timeout_cb_t cb, void *data,
1353 cfs_list_t *obd_list);
1354 int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
1355 enum timeout_event event);
1356 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
1357 int ptlrpc_obd_ping(struct obd_device *obd);
1358 cfs_time_t ptlrpc_suspend_wakeup_time(void);
1360 void ping_evictor_start(void);
1361 void ping_evictor_stop(void);
1363 #define ping_evictor_start() do {} while (0)
1364 #define ping_evictor_stop() do {} while (0)
1366 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
1368 /* ptlrpc/ptlrpcd.c */
1371 * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv,
1372 * these threads are used to asynchronously send requests queued with
1373 * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for
1374 * such requests. Multiple scopes are needed to avoid dead-locks.
1376 enum ptlrpcd_scope {
1377 /** Scope of bulk read-write rpcs. */
1379 /** Everything else. */
1384 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc);
1385 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
1386 void ptlrpcd_wake(struct ptlrpc_request *req);
1387 int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
1388 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
1389 int ptlrpcd_addref(void);
1390 void ptlrpcd_decref(void);
1392 /* ptlrpc/lproc_ptlrpc.c */
1393 const char* ll_opcode2str(__u32 opcode);
1395 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
1396 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
1397 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
1399 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
1400 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
1401 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
1404 /* ptlrpc/llog_server.c */
1405 int llog_origin_handle_create(struct ptlrpc_request *req);
1406 int llog_origin_handle_destroy(struct ptlrpc_request *req);
1407 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
1408 int llog_origin_handle_next_block(struct ptlrpc_request *req);
1409 int llog_origin_handle_read_header(struct ptlrpc_request *req);
1410 int llog_origin_handle_close(struct ptlrpc_request *req);
1411 int llog_origin_handle_cancel(struct ptlrpc_request *req);
1412 int llog_catinfo(struct ptlrpc_request *req);
1414 /* ptlrpc/llog_client.c */
1415 extern struct llog_operations llog_client_ops;