1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
45 #if defined(__linux__)
46 #include <linux/lustre_net.h>
47 #elif defined(__APPLE__)
48 #include <darwin/lustre_net.h>
49 #elif defined(__WINNT__)
50 #include <winnt/lustre_net.h>
52 #error Unsupported operating system.
55 #include <libcfs/libcfs.h>
57 #include <lnet/lnet.h>
58 #include <lustre/lustre_idl.h>
59 #include <lustre_ha.h>
60 #include <lustre_sec.h>
61 #include <lustre_import.h>
62 #include <lprocfs_status.h>
63 #include <lu_object.h>
64 #include <lustre_req_layout.h>
66 #include <obd_support.h>
68 /* MD flags we _always_ use */
69 #define PTLRPC_MD_OPTIONS 0
71 /* Define maxima for bulk I/O
72 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
73 * these limits are system wide and not interface-local. */
74 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
75 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
76 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
78 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
80 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
81 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
83 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
84 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
86 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
87 # error "PTLRPC_MAX_BRW_SIZE too big"
89 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
90 # error "PTLRPC_MAX_BRW_PAGES too big"
92 #endif /* __KERNEL__ */
94 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
96 #define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
98 /* The following constants determine how memory is used to buffer incoming
101 * ?_NBUFS # buffers to allocate when growing the pool
102 * ?_BUFSIZE # bytes in a single request buffer
103 * ?_MAXREQSIZE # maximum request service will receive
105 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
106 * of ?_NBUFS is added to the pool.
108 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
109 * considered full when less than ?_MAXREQSIZE is left in them.
112 #define LDLM_THREADS_AUTO_MIN (2)
113 #define LDLM_THREADS_AUTO_MAX min_t(unsigned, cfs_num_online_cpus() * \
114 cfs_num_online_cpus() * 32, 128)
115 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
116 #define LDLM_NBUFS (64 * cfs_num_online_cpus())
117 #define LDLM_BUFSIZE (8 * 1024)
118 #define LDLM_MAXREQSIZE (5 * 1024)
119 #define LDLM_MAXREPSIZE (1024)
121 #define MDT_MIN_THREADS 2UL
122 #define MDT_MAX_THREADS 512UL
123 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
124 cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
127 /* Absolute limits */
128 #define MDS_THREADS_MIN 2
129 #define MDS_THREADS_MAX 512
130 #define MDS_THREADS_MIN_READPAGE 2
131 #define MDS_NBUFS (64 * cfs_num_online_cpus())
132 #define MDS_BUFSIZE (8 * 1024)
133 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
134 * path name length = PATH_MAX = 4096
135 * LOV MD size max = EA_MAX = 4000
136 * symlink: FNAME_MAX + PATH_MAX <- largest
137 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
138 * rename: FNAME_MAX + FNAME_MAX
139 * open: FNAME_MAX + EA_MAX
141 * MDS_MAXREQSIZE ~= 4736 bytes =
142 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
143 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
144 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
145 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
147 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
148 * except in the open case where there are a large number of OSTs in a LOV.
150 #define MDS_MAXREQSIZE (5 * 1024)
151 #define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
153 /* FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
154 #define FLD_MAXREQSIZE (160)
156 /* FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */
157 #define FLD_MAXREPSIZE (152)
159 /* SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
161 #define SEQ_MAXREQSIZE (160)
163 /* SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
164 #define SEQ_MAXREPSIZE (152)
166 #define MGS_THREADS_AUTO_MIN 2
167 #define MGS_THREADS_AUTO_MAX 32
168 #define MGS_NBUFS (64 * cfs_num_online_cpus())
169 #define MGS_BUFSIZE (8 * 1024)
170 #define MGS_MAXREQSIZE (7 * 1024)
171 #define MGS_MAXREPSIZE (9 * 1024)
173 /* Absolute limits */
174 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
175 #define OSS_THREADS_MAX 512
176 #define OST_NBUFS (64 * cfs_num_online_cpus())
177 #define OST_BUFSIZE (8 * 1024)
178 /* OST_MAXREQSIZE ~= 4768 bytes =
179 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
181 * - single object with 16 pages is 512 bytes
182 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
184 #define OST_MAXREQSIZE (5 * 1024)
185 #define OST_MAXREPSIZE (9 * 1024)
187 /* Macro to hide a typecast. */
188 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
190 struct ptlrpc_connection {
191 cfs_hlist_node_t c_hash;
193 lnet_process_id_t c_peer;
194 struct obd_uuid c_remote_uuid;
195 cfs_atomic_t c_refcount;
198 struct ptlrpc_client {
199 __u32 cli_request_portal;
200 __u32 cli_reply_portal;
204 /* state flags of requests */
205 /* XXX only ones left are those used by the bulk descs as well! */
206 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
207 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
209 #define REQ_MAX_ACK_LOCKS 8
211 union ptlrpc_async_args {
212 /* Scratchpad for passing args to completion interpreter. Users
213 * cast to the struct of their choosing, and LASSERT that this is
214 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
215 * a pointer to it here. The pointer_arg ensures this struct is at
216 * least big enough for that. */
217 void *pointer_arg[11];
221 struct ptlrpc_request_set;
222 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
224 struct ptlrpc_request_set {
225 int set_remaining; /* # uncompleted requests */
226 cfs_waitq_t set_waitq;
227 cfs_waitq_t *set_wakeup_ptr;
228 cfs_list_t set_requests;
229 cfs_list_t set_cblist; /* list of completion callbacks */
230 set_interpreter_func set_interpret; /* completion callback */
231 void *set_arg; /* completion context */
232 /* locked so that any old caller can communicate requests to
233 * the set holder who can then fold them into the lock-free set */
234 cfs_spinlock_t set_new_req_lock;
235 cfs_list_t set_new_requests;
238 struct ptlrpc_set_cbdata {
240 set_interpreter_func psc_interpret;
244 struct ptlrpc_bulk_desc;
247 * ptlrpc callback & work item stuff
249 struct ptlrpc_cb_id {
250 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
251 void *cbid_arg; /* additional arg */
254 #define RS_MAX_LOCKS 8
257 struct ptlrpc_reply_state {
258 struct ptlrpc_cb_id rs_cb_id;
260 cfs_list_t rs_exp_list;
261 cfs_list_t rs_obd_list;
263 cfs_list_t rs_debug_list;
265 /* A spinlock to protect the reply state flags */
266 cfs_spinlock_t rs_lock;
267 /* Reply state flags */
268 unsigned long rs_difficult:1; /* ACK/commit stuff */
269 unsigned long rs_no_ack:1; /* no ACK, even for
270 difficult requests */
271 unsigned long rs_scheduled:1; /* being handled? */
272 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
273 unsigned long rs_handled:1; /* been handled yet? */
274 unsigned long rs_on_net:1; /* reply_out_callback pending? */
275 unsigned long rs_prealloc:1; /* rs from prealloc list */
276 unsigned long rs_committed:1;/* the transaction was committed
277 and the rs was dispatched
278 by ptlrpc_commit_replies */
283 struct obd_export *rs_export;
284 struct ptlrpc_service *rs_service;
285 lnet_handle_md_t rs_md_h;
286 cfs_atomic_t rs_refcount;
288 struct ptlrpc_svc_ctx *rs_svc_ctx;
289 struct lustre_msg *rs_repbuf; /* wrapper */
290 int rs_repbuf_len; /* wrapper buf length */
291 int rs_repdata_len; /* wrapper msg length */
292 struct lustre_msg *rs_msg; /* reply message */
294 /* locks awaiting client reply ACK */
296 struct lustre_handle rs_locks[RS_MAX_LOCKS];
297 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
300 struct ptlrpc_thread;
303 RQ_PHASE_NEW = 0xebc0de00,
304 RQ_PHASE_RPC = 0xebc0de01,
305 RQ_PHASE_BULK = 0xebc0de02,
306 RQ_PHASE_INTERPRET = 0xebc0de03,
307 RQ_PHASE_COMPLETE = 0xebc0de04,
308 RQ_PHASE_UNREGISTERING = 0xebc0de05,
309 RQ_PHASE_UNDEFINED = 0xebc0de06
312 /** Type of request interpreter call-back */
313 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
314 struct ptlrpc_request *req,
317 struct ptlrpc_request_pool {
318 cfs_spinlock_t prp_lock;
319 cfs_list_t prp_req_list; /* list of ptlrpc_request structs */
321 void (*prp_populate)(struct ptlrpc_request_pool *, int);
329 struct ptlrpc_hpreq_ops {
331 * Check if the lock handle of the given lock is the same as
332 * taken from the request.
334 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
336 * Check if the request is a high priority one.
338 int (*hpreq_check)(struct ptlrpc_request *);
342 * Represents remote procedure call.
344 struct ptlrpc_request {
345 int rq_type; /* one of PTL_RPC_MSG_* */
347 cfs_list_t rq_timed_list; /* server-side early replies */
348 cfs_list_t rq_history_list; /* server-side history */
349 cfs_list_t rq_exp_list; /* server-side per-export list */
350 struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
351 __u64 rq_history_seq; /* history sequence # */
352 /* the index of service's srv_at_array into which request is linked */
355 cfs_spinlock_t rq_lock;
356 /* client-side flags are serialized by rq_lock */
357 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
358 rq_timedout:1, rq_resend:1, rq_restart:1,
360 * when ->rq_replay is set, request is kept by the client even
361 * after server commits corresponding transaction. This is
362 * used for operations that require sequence of multiple
363 * requests to be replayed. The only example currently is file
364 * open/close. When last request in such a sequence is
365 * committed, ->rq_replay is cleared on all requests in the
369 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
370 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
371 rq_early:1, rq_must_unlink:1,
372 rq_fake:1, /* this fake req */
373 /* server-side flags */
374 rq_packed_final:1, /* packed final reply */
375 rq_hp:1, /* high priority RPC */
376 rq_at_linked:1, /* link into service's srv_at_array */
380 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
381 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
382 cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
383 server-side refcounf for multiple replies */
385 struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
387 int rq_request_portal; /* XXX FIXME bug 249 */
388 int rq_reply_portal; /* XXX FIXME bug 249 */
390 int rq_nob_received; /* client-side:
391 * !rq_truncate : # reply bytes actually received,
392 * rq_truncate : required repbuf_len for resend */
394 struct lustre_msg *rq_reqmsg;
397 struct lustre_msg *rq_repmsg;
400 cfs_list_t rq_replay_list;
402 struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */
403 struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */
404 cfs_list_t rq_ctx_chain; /* link to waited ctx */
406 struct sptlrpc_flavor rq_flvr; /* client & server */
407 enum lustre_sec_part rq_sp_from;
409 unsigned long /* client/server security flags */
410 rq_ctx_init:1, /* context initiation */
411 rq_ctx_fini:1, /* context destroy */
412 rq_bulk_read:1, /* request bulk read */
413 rq_bulk_write:1, /* request bulk write */
414 /* server authentication flags */
415 rq_auth_gss:1, /* authenticated by gss */
416 rq_auth_remote:1, /* authed as remote user */
417 rq_auth_usr_root:1, /* authed as root */
418 rq_auth_usr_mdt:1, /* authed as mdt */
419 /* security tfm flags */
422 /* doesn't expect reply FIXME */
424 rq_pill_init:1; /* pill initialized */
426 uid_t rq_auth_uid; /* authed uid */
427 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
429 /* (server side), pointed directly into req buffer */
430 struct ptlrpc_user_desc *rq_user_desc;
432 /* early replies go to offset 0, regular replies go after that */
433 unsigned int rq_reply_off;
435 /* various buffer pointers */
436 struct lustre_msg *rq_reqbuf; /* req wrapper */
437 int rq_reqbuf_len; /* req wrapper buf len */
438 int rq_reqdata_len; /* req wrapper msg len */
439 char *rq_repbuf; /* rep buffer */
440 int rq_repbuf_len; /* rep buffer len */
441 struct lustre_msg *rq_repdata; /* rep wrapper msg */
442 int rq_repdata_len; /* rep wrapper msg len */
443 struct lustre_msg *rq_clrbuf; /* only in priv mode */
444 int rq_clrbuf_len; /* only in priv mode */
445 int rq_clrdata_len; /* only in priv mode */
447 __u32 rq_req_swab_mask;
448 __u32 rq_rep_swab_mask;
450 int rq_import_generation;
451 enum lustre_imp_state rq_send_state;
453 int rq_early_count; /* how many early replies (for stats) */
455 /* client+server request */
456 lnet_handle_md_t rq_req_md_h;
457 struct ptlrpc_cb_id rq_req_cbid;
460 struct timeval rq_arrival_time; /* request arrival time */
461 struct ptlrpc_reply_state *rq_reply_state; /* separated reply state */
462 struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/
464 __u32 rq_uid; /* peer uid, used in MDS only */
467 /* client-only incoming reply */
468 lnet_handle_md_t rq_reply_md_h;
469 cfs_waitq_t rq_reply_waitq;
470 struct ptlrpc_cb_id rq_reply_cbid;
473 lnet_process_id_t rq_peer;
474 struct obd_export *rq_export;
475 struct obd_import *rq_import;
477 void (*rq_replay_cb)(struct ptlrpc_request *);
478 void (*rq_commit_cb)(struct ptlrpc_request *);
481 struct ptlrpc_bulk_desc *rq_bulk;/* client side bulk */
483 /* client outgoing req */
484 time_t rq_sent; /* when request/reply sent (secs), or
485 * time when request should be sent */
487 volatile time_t rq_deadline; /* when request must finish. volatile
488 so that servers' early reply updates to the deadline aren't
489 kept in per-cpu cache */
490 time_t rq_reply_deadline; /* when req reply unlink must finish. */
491 time_t rq_bulk_deadline; /* when req bulk unlink must finish. */
492 int rq_timeout; /* service time estimate (secs) */
495 cfs_list_t rq_set_chain;
496 struct ptlrpc_request_set *rq_set;
497 /** Async completion handler */
498 ptlrpc_interpterer_t rq_interpret_reply;
499 union ptlrpc_async_args rq_async_args; /* Async completion context */
500 struct ptlrpc_request_pool *rq_pool; /* Pool if request from
502 struct lu_context rq_session;
503 struct lu_context rq_recov_session;
506 struct req_capsule rq_pill;
509 static inline int ptlrpc_req_interpret(const struct lu_env *env,
510 struct ptlrpc_request *req, int rc)
512 if (req->rq_interpret_reply != NULL) {
513 req->rq_status = req->rq_interpret_reply(env, req,
516 return req->rq_status;
521 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
523 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
524 return req->rq_req_swab_mask & (1 << index);
527 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
529 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
530 return req->rq_rep_swab_mask & (1 << index);
533 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
535 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
538 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
540 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
543 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
545 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
546 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
547 req->rq_req_swab_mask |= 1 << index;
550 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
552 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
553 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
554 req->rq_rep_swab_mask |= 1 << index;
557 static inline const char *
558 ptlrpc_phase2str(enum rq_phase phase)
567 case RQ_PHASE_INTERPRET:
569 case RQ_PHASE_COMPLETE:
571 case RQ_PHASE_UNREGISTERING:
572 return "Unregistering";
578 static inline const char *
579 ptlrpc_rqphase2str(struct ptlrpc_request *req)
581 return ptlrpc_phase2str(req->rq_phase);
584 /* Spare the preprocessor, spoil the bugs. */
585 #define FLAG(field, str) (field ? str : "")
587 #define DEBUG_REQ_FLAGS(req) \
588 ptlrpc_rqphase2str(req), \
589 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
590 FLAG(req->rq_err, "E"), \
591 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
592 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
593 FLAG(req->rq_no_resend, "N"), \
594 FLAG(req->rq_waiting, "W"), \
595 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
596 FLAG(req->rq_committed, "M")
598 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
600 void _debug_req(struct ptlrpc_request *req, __u32 mask,
601 struct libcfs_debug_msg_data *data, const char *fmt, ...)
602 __attribute__ ((format (printf, 4, 5)));
604 #define debug_req(cdls, level, req, file, func, line, fmt, a...) \
608 if (((level) & D_CANTMASK) != 0 || \
609 ((libcfs_debug & (level)) != 0 && \
610 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
611 static struct libcfs_debug_msg_data _req_dbg_data = \
612 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
613 _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \
617 /* for most callers (level is a constant) this is resolved at compile time */
618 #define DEBUG_REQ(level, req, fmt, args...) \
620 if ((level) & (D_ERROR | D_WARNING)) { \
621 static cfs_debug_limit_state_t cdls; \
622 debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \
623 "@@@ "fmt" ", ## args); \
625 debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \
626 "@@@ "fmt" ", ## args); \
629 struct ptlrpc_bulk_page {
632 int bp_pageoffset; /* offset within a page */
633 struct page *bp_page;
636 #define BULK_GET_SOURCE 0
637 #define BULK_PUT_SINK 1
638 #define BULK_GET_SINK 2
639 #define BULK_PUT_SOURCE 3
641 struct ptlrpc_bulk_desc {
642 unsigned long bd_success:1; /* completed successfully */
643 unsigned long bd_network_rw:1; /* accessible to the network */
644 unsigned long bd_type:2; /* {put,get}{source,sink} */
645 unsigned long bd_registered:1; /* client side */
646 cfs_spinlock_t bd_lock; /* serialise with callback */
647 int bd_import_generation;
648 struct obd_export *bd_export;
649 struct obd_import *bd_import;
651 struct ptlrpc_request *bd_req; /* associated request */
652 cfs_waitq_t bd_waitq; /* server side only WQ */
653 int bd_iov_count; /* # entries in bd_iov */
654 int bd_max_iov; /* allocated size of bd_iov */
655 int bd_nob; /* # bytes covered */
656 int bd_nob_transferred; /* # bytes GOT/PUT */
660 struct ptlrpc_cb_id bd_cbid; /* network callback info */
661 lnet_handle_md_t bd_md_h; /* associated MD */
662 lnet_nid_t bd_sender; /* stash event::sender */
664 #if defined(__KERNEL__)
666 * encrypt iov, size is either 0 or bd_iov_count.
668 lnet_kiov_t *bd_enc_iov;
670 lnet_kiov_t bd_iov[0];
672 lnet_md_iovec_t bd_iov[0];
676 struct ptlrpc_thread {
678 * active threads in svc->srv_threads
682 * thread-private data (preallocated memory)
687 * service thread index, from ptlrpc_start_threads
695 * put watchdog in the structure per thread b=14840
697 struct lc_watchdog *t_watchdog;
699 * the svc this thread belonged to b=18582
701 struct ptlrpc_service *t_svc;
702 cfs_waitq_t t_ctl_waitq;
703 struct lu_env *t_env;
706 struct ptlrpc_request_buffer_desc {
707 cfs_list_t rqbd_list;
708 cfs_list_t rqbd_reqs;
709 struct ptlrpc_service *rqbd_service;
710 lnet_handle_md_t rqbd_md_h;
713 struct ptlrpc_cb_id rqbd_cbid;
714 struct ptlrpc_request rqbd_req;
717 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
718 typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
719 typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
721 #define PTLRPC_SVC_HP_RATIO 10
723 struct ptlrpc_service {
724 cfs_list_t srv_list; /* chain thru all services */
725 int srv_max_req_size; /* biggest request to receive */
726 int srv_max_reply_size; /* biggest reply to send */
727 int srv_buf_size; /* size of individual buffers */
728 int srv_nbuf_per_group; /* # buffers to allocate in 1 group */
729 int srv_nbufs; /* total # req buffer descs allocated */
730 int srv_threads_min; /* threads to start at SOW */
731 int srv_threads_max; /* thread upper limit */
732 int srv_threads_started; /* index of last started thread */
733 int srv_threads_running; /* # running threads */
734 cfs_atomic_t srv_n_difficult_replies; /* # 'difficult' replies */
735 int srv_n_active_reqs; /* # reqs being served */
736 int srv_n_hpreq; /* # HPreqs being served */
737 cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */
738 int srv_watchdog_factor; /* soft watchdog timeout multiplier */
739 unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
740 unsigned srv_at_check:1; /* check early replies */
741 unsigned srv_is_stopping:1; /* under unregister_service */
742 cfs_time_t srv_at_checktime; /* debug */
744 __u32 srv_req_portal;
745 __u32 srv_rep_portal;
748 struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
749 cfs_spinlock_t srv_at_lock;
750 struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
751 cfs_timer_t srv_at_timer; /* early reply timer */
753 int srv_n_queued_reqs; /* # reqs in either of the queues below */
754 int srv_hpreq_count; /* # hp requests handled */
755 int srv_hpreq_ratio; /* # hp per lp reqs to handle */
756 cfs_list_t srv_req_in_queue; /* incoming reqs */
757 cfs_list_t srv_request_queue; /* reqs waiting for service */
758 cfs_list_t srv_request_hpq; /* high priority queue */
760 cfs_list_t srv_request_history; /* request history */
761 __u64 srv_request_seq; /* next request sequence # */
762 __u64 srv_request_max_cull_seq; /* highest seq culled from history */
763 svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
765 cfs_list_t srv_idle_rqbds; /* request buffers to be reposted */
766 cfs_list_t srv_active_rqbds; /* req buffers receiving */
767 cfs_list_t srv_history_rqbds; /* request buffer history */
768 int srv_nrqbd_receiving; /* # posted request buffers */
769 int srv_n_history_rqbds; /* # request buffers in history */
770 int srv_max_history_rqbds;/* max # request buffers in history */
772 cfs_atomic_t srv_outstanding_replies;
773 cfs_list_t srv_active_replies; /* all the active replies */
775 cfs_list_t srv_reply_queue; /* replies waiting for service */
777 cfs_waitq_t srv_waitq; /* all threads sleep on this. This
778 * wait-queue is signalled when new
779 * incoming request arrives and when
780 * difficult reply has to be handled. */
782 cfs_list_t srv_threads; /* service thread list */
783 svc_handler_t srv_handler;
784 svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
786 char *srv_name; /* only statically allocated strings here; we don't clean them */
787 char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
789 cfs_spinlock_t srv_lock;
791 cfs_proc_dir_entry_t *srv_procroot;
792 struct lprocfs_stats *srv_stats;
794 /* List of free reply_states */
795 cfs_list_t srv_free_rs_list;
796 /* waitq to run, when adding stuff to srv_free_rs_list */
797 cfs_waitq_t srv_free_rs_waitq;
800 * Tags for lu_context associated with this thread, see struct
805 * if non-NULL called during thread creation (ptlrpc_start_thread())
806 * to initialize service specific per-thread state.
808 int (*srv_init)(struct ptlrpc_thread *thread);
810 * if non-NULL called during thread shutdown (ptlrpc_main()) to
811 * destruct state created by ->srv_init().
813 void (*srv_done)(struct ptlrpc_thread *thread);
815 //struct ptlrpc_srv_ni srv_interfaces[0];
820 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
822 unsigned long pc_flags;
824 * Thread lock protecting structure fields.
826 cfs_spinlock_t pc_lock;
830 cfs_completion_t pc_starting;
834 cfs_completion_t pc_finishing;
836 * Thread requests set.
838 struct ptlrpc_request_set *pc_set;
840 * Thread name used in cfs_daemonize()
844 * Environment for request interpreters to run in.
846 struct lu_env pc_env;
849 * Async rpcs flag to make sure that ptlrpcd_check() is called only
854 * Currently not used.
858 * User-space async rpcs callback.
860 void *pc_wait_callback;
862 * User-space check idle rpcs callback.
864 void *pc_idle_callback;
868 /* Bits for pc_flags */
869 enum ptlrpcd_ctl_flags {
871 * Ptlrpc thread start flag.
875 * Ptlrpc thread stop flag.
879 * Ptlrpc thread force flag (only stop force so far).
880 * This will cause aborting any inflight rpcs handled
881 * by thread if LIOD_STOP is specified.
885 * This is a recovery ptlrpc thread.
887 LIOD_RECOVERY = 1 << 3
890 /* ptlrpc/events.c */
891 extern lnet_handle_eq_t ptlrpc_eq_h;
892 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
893 lnet_process_id_t *peer, lnet_nid_t *self);
894 extern void request_out_callback (lnet_event_t *ev);
895 extern void reply_in_callback(lnet_event_t *ev);
896 extern void client_bulk_callback (lnet_event_t *ev);
897 extern void request_in_callback(lnet_event_t *ev);
898 extern void reply_out_callback(lnet_event_t *ev);
899 extern void server_bulk_callback (lnet_event_t *ev);
901 /* ptlrpc/connection.c */
902 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
904 struct obd_uuid *uuid);
905 int ptlrpc_connection_put(struct ptlrpc_connection *c);
906 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
907 int ptlrpc_connection_init(void);
908 void ptlrpc_connection_fini(void);
909 extern lnet_pid_t ptl_get_pid(void);
911 /* ptlrpc/niobuf.c */
912 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
913 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
914 int ptlrpc_register_bulk(struct ptlrpc_request *req);
915 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
917 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
921 LASSERT(desc != NULL);
923 cfs_spin_lock(&desc->bd_lock);
924 rc = desc->bd_network_rw;
925 cfs_spin_unlock(&desc->bd_lock);
929 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
931 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
934 LASSERT(req != NULL);
936 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
937 req->rq_bulk_deadline > cfs_time_current_sec())
943 cfs_spin_lock(&desc->bd_lock);
944 rc = desc->bd_network_rw;
945 cfs_spin_unlock(&desc->bd_lock);
949 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
950 #define PTLRPC_REPLY_EARLY 0x02
951 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
952 int ptlrpc_reply(struct ptlrpc_request *req);
953 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
954 int ptlrpc_error(struct ptlrpc_request *req);
955 void ptlrpc_resend_req(struct ptlrpc_request *request);
956 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
957 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
958 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
960 /* ptlrpc/client.c */
961 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
962 struct ptlrpc_client *);
963 void ptlrpc_cleanup_client(struct obd_import *imp);
964 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
966 int ptlrpc_queue_wait(struct ptlrpc_request *req);
967 int ptlrpc_replay_req(struct ptlrpc_request *req);
968 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
969 void ptlrpc_restart_req(struct ptlrpc_request *req);
970 void ptlrpc_abort_inflight(struct obd_import *imp);
971 void ptlrpc_cleanup_imp(struct obd_import *imp);
972 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
974 struct ptlrpc_request_set *ptlrpc_prep_set(void);
975 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
976 set_interpreter_func fn, void *data);
977 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
978 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
979 int ptlrpc_set_wait(struct ptlrpc_request_set *);
980 int ptlrpc_expired_set(void *data);
981 void ptlrpc_interrupted_set(void *data);
982 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
983 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
984 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
985 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
986 struct ptlrpc_request *req);
988 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
989 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
991 struct ptlrpc_request_pool *
992 ptlrpc_init_rq_pool(int, int,
993 void (*populate_pool)(struct ptlrpc_request_pool *, int));
995 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
996 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
997 const struct req_format *format);
998 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
999 struct ptlrpc_request_pool *,
1000 const struct req_format *format);
1001 void ptlrpc_request_free(struct ptlrpc_request *request);
1002 int ptlrpc_request_pack(struct ptlrpc_request *request,
1003 __u32 version, int opcode);
1004 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1005 const struct req_format *format,
1006 __u32 version, int opcode);
1007 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
1008 __u32 version, int opcode, char **bufs,
1009 struct ptlrpc_cli_ctx *ctx);
1010 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
1011 unsigned int timeout,
1012 ptlrpc_interpterer_t interpreter);
1013 void ptlrpc_fakereq_finished(struct ptlrpc_request *req);
1015 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
1016 int opcode, int count, __u32 *lengths,
1018 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
1019 __u32 version, int opcode,
1020 int count, __u32 *lengths, char **bufs,
1021 struct ptlrpc_request_pool *pool);
1022 void ptlrpc_req_finished(struct ptlrpc_request *request);
1023 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
1024 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
1025 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
1026 int npages, int type, int portal);
1027 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
1028 int npages, int type, int portal);
1029 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
1030 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
1031 cfs_page_t *page, int pageoffset, int len);
1032 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1033 struct obd_import *imp);
1034 __u64 ptlrpc_next_xid(void);
1035 __u64 ptlrpc_sample_next_xid(void);
1036 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
1038 struct ptlrpc_service_conf {
1041 int psc_max_req_size;
1042 int psc_max_reply_size;
1045 int psc_watchdog_factor;
1046 int psc_min_threads;
1047 int psc_max_threads;
1051 /* ptlrpc/service.c */
1052 void ptlrpc_save_lock (struct ptlrpc_request *req,
1053 struct lustre_handle *lock, int mode, int no_ack);
1054 void ptlrpc_commit_replies(struct obd_export *exp);
1055 void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
1056 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
1057 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
1058 svc_handler_t h, char *name,
1059 struct proc_dir_entry *proc_entry,
1060 svcreq_printfn_t prntfn,
1063 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
1065 int req_portal, int rep_portal,
1066 int watchdog_factor,
1067 svc_handler_t, char *name,
1068 cfs_proc_dir_entry_t *proc_entry,
1070 int min_threads, int max_threads,
1071 char *threadname, __u32 ctx_tags,
1072 svc_hpreq_handler_t);
1073 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
1075 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc);
1076 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc);
1077 int ptlrpc_unregister_service(struct ptlrpc_service *service);
1078 int liblustre_check_services (void *arg);
1079 void ptlrpc_daemonize(char *name);
1080 int ptlrpc_service_health_check(struct ptlrpc_service *);
1081 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
1082 void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
1083 void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
1084 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
1087 int ptlrpc_hr_init(void);
1088 void ptlrpc_hr_fini(void);
1090 # define ptlrpc_hr_init() (0)
1091 # define ptlrpc_hr_fini() do {} while(0)
1094 struct ptlrpc_svc_data {
1096 struct ptlrpc_service *svc;
1097 struct ptlrpc_thread *thread;
1098 struct obd_device *dev;
1101 /* ptlrpc/import.c */
1102 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
1103 int ptlrpc_init_import(struct obd_import *imp);
1104 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
1105 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
1107 /* ptlrpc/pack_generic.c */
1108 int ptlrpc_reconnect_import(struct obd_import *imp);
1110 /** ptlrpc mgs buffer swab interface */
1111 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
1113 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
1115 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
1116 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
1118 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
1119 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
1121 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
1122 __u32 *lens, char **bufs);
1123 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
1125 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
1126 __u32 *lens, char **bufs, int flags);
1127 #define LPRFL_EARLY_REPLY 1
1128 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
1129 char **bufs, int flags);
1130 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
1131 unsigned int newlen, int move_data);
1132 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
1133 int __lustre_unpack_msg(struct lustre_msg *m, int len);
1134 int lustre_msg_hdr_size(__u32 magic, int count);
1135 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
1136 int lustre_msg_size_v2(int count, __u32 *lengths);
1137 int lustre_packed_msg_size(struct lustre_msg *msg);
1138 int lustre_msg_early_size(void);
1139 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
1140 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
1141 int lustre_msg_buflen(struct lustre_msg *m, int n);
1142 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
1143 int lustre_msg_bufcount(struct lustre_msg *m);
1144 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
1145 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
1146 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
1147 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
1148 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
1149 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
1150 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
1151 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
1152 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
1153 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
1154 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
1155 __u32 lustre_msg_get_type(struct lustre_msg *msg);
1156 __u32 lustre_msg_get_version(struct lustre_msg *msg);
1157 void lustre_msg_add_version(struct lustre_msg *msg, int version);
1158 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
1159 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
1160 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
1161 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
1162 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
1163 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
1164 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
1165 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
1166 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
1167 int lustre_msg_get_status(struct lustre_msg *msg);
1168 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
1169 int lustre_msg_is_v1(struct lustre_msg *msg);
1170 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
1171 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
1172 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
1173 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
1174 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1175 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
1176 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
1177 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
1178 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1179 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
1180 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
1181 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
1182 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
1183 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1184 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
1185 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
1186 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1187 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1188 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1191 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
1192 unsigned int newlen, int move_data)
1194 LASSERT(req->rq_reply_state);
1195 LASSERT(req->rq_repmsg);
1196 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
1201 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1203 if (req->rq_phase == new_phase)
1206 if (new_phase == RQ_PHASE_UNREGISTERING) {
1207 req->rq_next_phase = req->rq_phase;
1209 cfs_atomic_inc(&req->rq_import->imp_unregistering);
1212 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1214 cfs_atomic_dec(&req->rq_import->imp_unregistering);
1217 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
1218 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1220 req->rq_phase = new_phase;
1224 ptlrpc_client_early(struct ptlrpc_request *req)
1226 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1227 req->rq_reply_deadline > cfs_time_current_sec())
1229 return req->rq_early;
1233 ptlrpc_client_replied(struct ptlrpc_request *req)
1235 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1236 req->rq_reply_deadline > cfs_time_current_sec())
1238 return req->rq_replied;
1242 ptlrpc_client_recv(struct ptlrpc_request *req)
1244 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1245 req->rq_reply_deadline > cfs_time_current_sec())
1247 return req->rq_receiving_reply;
1251 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1255 cfs_spin_lock(&req->rq_lock);
1256 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1257 req->rq_reply_deadline > cfs_time_current_sec()) {
1258 cfs_spin_unlock(&req->rq_lock);
1261 rc = req->rq_receiving_reply || req->rq_must_unlink;
1262 cfs_spin_unlock(&req->rq_lock);
1267 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1269 if (req->rq_set == NULL)
1270 cfs_waitq_signal(&req->rq_reply_waitq);
1272 cfs_waitq_signal(&req->rq_set->set_waitq);
1276 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1278 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1279 cfs_atomic_inc(&rs->rs_refcount);
1283 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1285 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
1286 if (cfs_atomic_dec_and_test(&rs->rs_refcount))
1287 lustre_free_reply_state(rs);
1290 /* Should only be called once per req */
1291 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1293 if (req->rq_reply_state == NULL)
1294 return; /* shouldn't occur */
1295 ptlrpc_rs_decref(req->rq_reply_state);
1296 req->rq_reply_state = NULL;
1297 req->rq_repmsg = NULL;
1300 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1302 return lustre_msg_get_magic(req->rq_reqmsg);
1305 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1307 switch (req->rq_reqmsg->lm_magic) {
1308 case LUSTRE_MSG_MAGIC_V2:
1309 return req->rq_reqmsg->lm_repsize;
1311 LASSERTF(0, "incorrect message magic: %08x\n",
1312 req->rq_reqmsg->lm_magic);
1317 /* ldlm/ldlm_lib.c */
1318 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
1319 int client_obd_cleanup(struct obd_device *obddev);
1320 int client_connect_import(const struct lu_env *env,
1321 struct obd_export **exp, struct obd_device *obd,
1322 struct obd_uuid *cluuid, struct obd_connect_data *,
1324 int client_disconnect_export(struct obd_export *exp);
1325 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1327 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1328 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1329 void client_destroy_import(struct obd_import *imp);
1331 int server_disconnect_export(struct obd_export *exp);
1333 /* ptlrpc/pinger.c */
1334 enum timeout_event {
1337 struct timeout_item;
1338 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1339 int ptlrpc_pinger_add_import(struct obd_import *imp);
1340 int ptlrpc_pinger_del_import(struct obd_import *imp);
1341 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1342 timeout_cb_t cb, void *data,
1343 cfs_list_t *obd_list);
1344 int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
1345 enum timeout_event event);
1346 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
1347 int ptlrpc_obd_ping(struct obd_device *obd);
1348 cfs_time_t ptlrpc_suspend_wakeup_time(void);
1350 void ping_evictor_start(void);
1351 void ping_evictor_stop(void);
1353 #define ping_evictor_start() do {} while (0)
1354 #define ping_evictor_stop() do {} while (0)
1356 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
1358 /* ptlrpc/ptlrpcd.c */
1361 * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv,
1362 * these threads are used to asynchronously send requests queued with
1363 * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for
1364 * such requests. Multiple scopes are needed to avoid dead-locks.
1366 enum ptlrpcd_scope {
1367 /** Scope of bulk read-write rpcs. */
1369 /** Everything else. */
1374 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc);
1375 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
1376 void ptlrpcd_wake(struct ptlrpc_request *req);
1377 int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
1378 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
1379 int ptlrpcd_addref(void);
1380 void ptlrpcd_decref(void);
1382 /* ptlrpc/lproc_ptlrpc.c */
1383 const char* ll_opcode2str(__u32 opcode);
1385 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
1386 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
1387 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
1389 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
1390 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
1391 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
1394 /* ptlrpc/llog_server.c */
1395 int llog_origin_handle_create(struct ptlrpc_request *req);
1396 int llog_origin_handle_destroy(struct ptlrpc_request *req);
1397 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
1398 int llog_origin_handle_next_block(struct ptlrpc_request *req);
1399 int llog_origin_handle_read_header(struct ptlrpc_request *req);
1400 int llog_origin_handle_close(struct ptlrpc_request *req);
1401 int llog_origin_handle_cancel(struct ptlrpc_request *req);
1402 int llog_catinfo(struct ptlrpc_request *req);
1404 /* ptlrpc/llog_client.c */
1405 extern struct llog_operations llog_client_ops;