1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
40 #if defined(__linux__)
41 #include <linux/lustre_net.h>
42 #elif defined(__APPLE__)
43 #include <darwin/lustre_net.h>
44 #elif defined(__WINNT__)
45 #include <winnt/lustre_net.h>
47 #error Unsupported operating system.
50 #include <libcfs/libcfs.h>
52 #include <lnet/lnet.h>
53 #include <lustre/lustre_idl.h>
54 #include <lustre_ha.h>
55 #include <lustre_sec.h>
56 #include <lustre_import.h>
57 #include <lprocfs_status.h>
58 #include <lu_object.h>
59 #include <lustre_req_layout.h>
61 #include <obd_support.h>
63 /* MD flags we _always_ use */
64 #define PTLRPC_MD_OPTIONS 0
66 /* Define maxima for bulk I/O
67 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
68 * these limits are system wide and not interface-local. */
69 #define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
70 #define PTLRPC_MAX_BRW_SIZE (1<<LNET_MTU_BITS)
71 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
73 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
75 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
76 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
78 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
79 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
81 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
82 # error "PTLRPC_MAX_BRW_SIZE too big"
84 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
85 # error "PTLRPC_MAX_BRW_PAGES too big"
87 #endif /* __KERNEL__ */
89 /* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
91 #define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
93 /* The following constants determine how memory is used to buffer incoming
96 * ?_NBUFS # buffers to allocate when growing the pool
97 * ?_BUFSIZE # bytes in a single request buffer
98 * ?_MAXREQSIZE # maximum request service will receive
100 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
101 * of ?_NBUFS is added to the pool.
103 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
104 * considered full when less than ?_MAXREQSIZE is left in them.
107 #define LDLM_THREADS_AUTO_MIN (2)
108 #define LDLM_THREADS_AUTO_MAX min(num_online_cpus()*num_online_cpus()*32, 128)
109 #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
110 #define LDLM_NBUFS (64 * num_online_cpus())
111 #define LDLM_BUFSIZE (8 * 1024)
112 #define LDLM_MAXREQSIZE (5 * 1024)
113 #define LDLM_MAXREPSIZE (1024)
115 #define MDT_MIN_THREADS 2UL
116 #define MDT_MAX_THREADS 512UL
117 #define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
118 num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
119 #define FLD_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
120 num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
121 #define SEQ_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
122 num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL)
124 /* Absolute limits */
125 #define MDS_THREADS_MIN 2
126 #define MDS_THREADS_MAX 512
127 #define MDS_THREADS_MIN_READPAGE 2
128 #define MDS_NBUFS (64 * num_online_cpus())
129 #define MDS_BUFSIZE (8 * 1024)
130 /* Assume file name length = FNAME_MAX = 256 (true for ext3).
131 * path name length = PATH_MAX = 4096
132 * LOV MD size max = EA_MAX = 4000
133 * symlink: FNAME_MAX + PATH_MAX <- largest
134 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
135 * rename: FNAME_MAX + FNAME_MAX
136 * open: FNAME_MAX + EA_MAX
138 * MDS_MAXREQSIZE ~= 4736 bytes =
139 * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
140 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
141 * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
142 * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
144 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
145 * except in the open case where there are a large number of OSTs in a LOV.
147 #define MDS_MAXREQSIZE (5 * 1024)
148 #define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
150 /* FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
151 #define FLD_MAXREQSIZE (160)
153 /* FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */
154 #define FLD_MAXREPSIZE (152)
156 /* SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
158 #define SEQ_MAXREQSIZE (160)
160 /* SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
161 #define SEQ_MAXREPSIZE (152)
163 #define MGS_THREADS_AUTO_MIN 2
164 #define MGS_THREADS_AUTO_MAX 32
165 #define MGS_NBUFS (64 * num_online_cpus())
166 #define MGS_BUFSIZE (8 * 1024)
167 #define MGS_MAXREQSIZE (7 * 1024)
168 #define MGS_MAXREPSIZE (9 * 1024)
170 /* Absolute limits */
171 #define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */
172 #define OSS_THREADS_MAX 512
173 #define OST_NBUFS (64 * num_online_cpus())
174 #define OST_BUFSIZE (8 * 1024)
175 /* OST_MAXREQSIZE ~= 4768 bytes =
176 * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
178 * - single object with 16 pages is 512 bytes
179 * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
181 #define OST_MAXREQSIZE (5 * 1024)
182 #define OST_MAXREPSIZE (9 * 1024)
184 /* Macro to hide a typecast. */
185 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
187 struct ptlrpc_connection {
188 struct hlist_node c_hash;
190 lnet_process_id_t c_peer;
191 struct obd_uuid c_remote_uuid;
195 struct ptlrpc_client {
196 __u32 cli_request_portal;
197 __u32 cli_reply_portal;
201 /* state flags of requests */
202 /* XXX only ones left are those used by the bulk descs as well! */
203 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
204 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
206 #define REQ_MAX_ACK_LOCKS 8
208 union ptlrpc_async_args {
209 /* Scratchpad for passing args to completion interpreter. Users
210 * cast to the struct of their choosing, and LASSERT that this is
211 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
212 * a pointer to it here. The pointer_arg ensures this struct is at
213 * least big enough for that. */
214 void *pointer_arg[11];
218 struct ptlrpc_request_set;
219 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
221 struct ptlrpc_request_set {
222 int set_remaining; /* # uncompleted requests */
223 cfs_waitq_t set_waitq;
224 cfs_waitq_t *set_wakeup_ptr;
225 struct list_head set_requests;
226 struct list_head set_cblist; /* list of completion callbacks */
227 set_interpreter_func set_interpret; /* completion callback */
228 void *set_arg; /* completion context */
229 /* locked so that any old caller can communicate requests to
230 * the set holder who can then fold them into the lock-free set */
231 spinlock_t set_new_req_lock;
232 struct list_head set_new_requests;
235 struct ptlrpc_set_cbdata {
236 struct list_head psc_item;
237 set_interpreter_func psc_interpret;
241 struct ptlrpc_bulk_desc;
244 * ptlrpc callback & work item stuff
246 struct ptlrpc_cb_id {
247 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
248 void *cbid_arg; /* additional arg */
251 #define RS_MAX_LOCKS 8
254 struct ptlrpc_reply_state {
255 struct ptlrpc_cb_id rs_cb_id;
256 struct list_head rs_list;
257 struct list_head rs_exp_list;
258 struct list_head rs_obd_list;
260 struct list_head rs_debug_list;
262 /* A spinlock to protect the reply state flags */
264 /* Reply state flags */
265 unsigned long rs_difficult:1; /* ACK/commit stuff */
266 unsigned long rs_no_ack:1; /* no ACK, even for
267 difficult requests */
268 unsigned long rs_scheduled:1; /* being handled? */
269 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
270 unsigned long rs_handled:1; /* been handled yet? */
271 unsigned long rs_on_net:1; /* reply_out_callback pending? */
272 unsigned long rs_prealloc:1; /* rs from prealloc list */
277 struct obd_export *rs_export;
278 struct ptlrpc_service *rs_service;
279 lnet_handle_md_t rs_md_h;
280 atomic_t rs_refcount;
282 struct ptlrpc_svc_ctx *rs_svc_ctx;
283 struct lustre_msg *rs_repbuf; /* wrapper */
284 int rs_repbuf_len; /* wrapper buf length */
285 int rs_repdata_len; /* wrapper msg length */
286 struct lustre_msg *rs_msg; /* reply message */
288 /* locks awaiting client reply ACK */
290 struct lustre_handle rs_locks[RS_MAX_LOCKS];
291 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
294 struct ptlrpc_thread;
297 RQ_PHASE_NEW = 0xebc0de00,
298 RQ_PHASE_RPC = 0xebc0de01,
299 RQ_PHASE_BULK = 0xebc0de02,
300 RQ_PHASE_INTERPRET = 0xebc0de03,
301 RQ_PHASE_COMPLETE = 0xebc0de04,
302 RQ_PHASE_UNREGISTERING = 0xebc0de05,
303 RQ_PHASE_UNDEFINED = 0xebc0de06
306 /** Type of request interpreter call-back */
307 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
308 struct ptlrpc_request *req,
311 struct ptlrpc_request_pool {
313 struct list_head prp_req_list; /* list of ptlrpc_request structs */
315 void (*prp_populate)(struct ptlrpc_request_pool *, int);
323 struct ptlrpc_hpreq_ops {
325 * Check if the lock handle of the given lock is the same as
326 * taken from the request.
328 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
330 * Check if the request is a high priority one.
332 int (*hpreq_check)(struct ptlrpc_request *);
336 * Represents remote procedure call.
338 struct ptlrpc_request {
339 int rq_type; /* one of PTL_RPC_MSG_* */
340 struct list_head rq_list;
341 struct list_head rq_timed_list; /* server-side early replies */
342 struct list_head rq_history_list; /* server-side history */
343 struct list_head rq_exp_list; /* server-side per-export list */
344 struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
345 __u64 rq_history_seq; /* history sequence # */
346 /* the index of service's srv_at_array into which request is linked */
350 /* client-side flags are serialized by rq_lock */
351 unsigned long rq_intr:1, rq_replied:1, rq_err:1,
352 rq_timedout:1, rq_resend:1, rq_restart:1,
354 * when ->rq_replay is set, request is kept by the client even
355 * after server commits corresponding transaction. This is
356 * used for operations that require sequence of multiple
357 * requests to be replayed. The only example currently is file
358 * open/close. When last request in such a sequence is
359 * committed, ->rq_replay is cleared on all requests in the
363 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
364 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
365 rq_early:1, rq_must_unlink:1,
366 /* server-side flags */
367 rq_packed_final:1, /* packed final reply */
368 rq_sent_final:1, /* stop sending early replies */
369 rq_hp:1, /* high priority RPC */
370 rq_at_linked:1; /* link into service's srv_at_array */
372 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
373 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
374 atomic_t rq_refcount; /* client-side refcount for SENT race,
375 server-side refcounf for multiple replies */
377 struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */
379 int rq_request_portal; /* XXX FIXME bug 249 */
380 int rq_reply_portal; /* XXX FIXME bug 249 */
382 int rq_nob_received; /* client-side # reply bytes actually received */
385 struct lustre_msg *rq_reqmsg;
388 struct lustre_msg *rq_repmsg;
391 struct list_head rq_replay_list;
393 struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */
394 struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */
395 struct list_head rq_ctx_chain; /* link to waited ctx */
397 struct sptlrpc_flavor rq_flvr; /* client & server */
398 enum lustre_sec_part rq_sp_from;
400 unsigned long /* client/server security flags */
401 rq_ctx_init:1, /* context initiation */
402 rq_ctx_fini:1, /* context destroy */
403 rq_bulk_read:1, /* request bulk read */
404 rq_bulk_write:1, /* request bulk write */
405 /* server authentication flags */
406 rq_auth_gss:1, /* authenticated by gss */
407 rq_auth_remote:1, /* authed as remote user */
408 rq_auth_usr_root:1, /* authed as root */
409 rq_auth_usr_mdt:1, /* authed as mdt */
410 /* security tfm flags */
413 /* doesn't expect reply FIXME */
416 uid_t rq_auth_uid; /* authed uid */
417 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
419 /* (server side), pointed directly into req buffer */
420 struct ptlrpc_user_desc *rq_user_desc;
422 /* early replies go to offset 0, regular replies go after that */
423 unsigned int rq_reply_off;
425 /* various buffer pointers */
426 struct lustre_msg *rq_reqbuf; /* req wrapper */
427 int rq_reqbuf_len; /* req wrapper buf len */
428 int rq_reqdata_len; /* req wrapper msg len */
429 char *rq_repbuf; /* rep buffer */
430 int rq_repbuf_len; /* rep buffer len */
431 struct lustre_msg *rq_repdata; /* rep wrapper msg */
432 int rq_repdata_len; /* rep wrapper msg len */
433 struct lustre_msg *rq_clrbuf; /* only in priv mode */
434 int rq_clrbuf_len; /* only in priv mode */
435 int rq_clrdata_len; /* only in priv mode */
437 __u32 rq_req_swab_mask;
438 __u32 rq_rep_swab_mask;
440 int rq_import_generation;
441 enum lustre_imp_state rq_send_state;
443 int rq_early_count; /* how many early replies (for stats) */
445 /* client+server request */
446 lnet_handle_md_t rq_req_md_h;
447 struct ptlrpc_cb_id rq_req_cbid;
450 struct timeval rq_arrival_time; /* request arrival time */
451 struct ptlrpc_reply_state *rq_reply_state; /* separated reply state */
452 struct ptlrpc_request_buffer_desc *rq_rqbd; /* incoming request buffer*/
454 __u32 rq_uid; /* peer uid, used in MDS only */
457 /* client-only incoming reply */
458 lnet_handle_md_t rq_reply_md_h;
459 cfs_waitq_t rq_reply_waitq;
460 struct ptlrpc_cb_id rq_reply_cbid;
463 lnet_process_id_t rq_peer;
464 struct obd_export *rq_export;
465 struct obd_import *rq_import;
467 void (*rq_replay_cb)(struct ptlrpc_request *);
468 void (*rq_commit_cb)(struct ptlrpc_request *);
471 struct ptlrpc_bulk_desc *rq_bulk;/* client side bulk */
473 /* client outgoing req */
474 time_t rq_sent; /* when request/reply sent (secs), or
475 * time when request should be sent */
477 volatile time_t rq_deadline; /* when request must finish. volatile
478 so that servers' early reply updates to the deadline aren't
479 kept in per-cpu cache */
480 time_t rq_reply_deadline; /* when req reply unlink must finish. */
481 time_t rq_bulk_deadline; /* when req bulk unlink must finish. */
482 int rq_timeout; /* service time estimate (secs) */
485 struct list_head rq_set_chain;
486 struct ptlrpc_request_set *rq_set;
487 /** Async completion handler */
488 ptlrpc_interpterer_t rq_interpret_reply;
489 union ptlrpc_async_args rq_async_args; /* Async completion context */
490 struct ptlrpc_request_pool *rq_pool; /* Pool if request from
492 struct lu_context rq_session;
495 struct req_capsule rq_pill;
498 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
500 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
501 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
502 req->rq_req_swab_mask |= 1 << index;
505 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
507 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
508 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
509 req->rq_rep_swab_mask |= 1 << index;
512 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
514 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
515 return req->rq_req_swab_mask & (1 << index);
518 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
520 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
521 return req->rq_rep_swab_mask & (1 << index);
524 static inline const char *
525 ptlrpc_phase2str(enum rq_phase phase)
534 case RQ_PHASE_INTERPRET:
536 case RQ_PHASE_COMPLETE:
538 case RQ_PHASE_UNREGISTERING:
539 return "Unregistering";
545 static inline const char *
546 ptlrpc_rqphase2str(struct ptlrpc_request *req)
548 return ptlrpc_phase2str(req->rq_phase);
551 /* Spare the preprocessor, spoil the bugs. */
552 #define FLAG(field, str) (field ? str : "")
554 #define DEBUG_REQ_FLAGS(req) \
555 ptlrpc_rqphase2str(req), \
556 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
557 FLAG(req->rq_err, "E"), \
558 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
559 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
560 FLAG(req->rq_no_resend, "N"), \
561 FLAG(req->rq_waiting, "W"), \
562 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H")
564 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s"
566 void _debug_req(struct ptlrpc_request *req, __u32 mask,
567 struct libcfs_debug_msg_data *data, const char *fmt, ...)
568 __attribute__ ((format (printf, 4, 5)));
570 #define debug_req(cdls, level, req, file, func, line, fmt, a...) \
574 if (((level) & D_CANTMASK) != 0 || \
575 ((libcfs_debug & (level)) != 0 && \
576 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
577 static struct libcfs_debug_msg_data _req_dbg_data = \
578 DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
579 _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \
583 /* for most callers (level is a constant) this is resolved at compile time */
584 #define DEBUG_REQ(level, req, fmt, args...) \
586 if ((level) & (D_ERROR | D_WARNING)) { \
587 static cfs_debug_limit_state_t cdls; \
588 debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \
589 "@@@ "fmt" ", ## args); \
591 debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \
592 "@@@ "fmt" ", ## args); \
595 struct ptlrpc_bulk_page {
596 struct list_head bp_link;
598 int bp_pageoffset; /* offset within a page */
599 struct page *bp_page;
602 #define BULK_GET_SOURCE 0
603 #define BULK_PUT_SINK 1
604 #define BULK_GET_SINK 2
605 #define BULK_PUT_SOURCE 3
607 struct ptlrpc_bulk_desc {
608 unsigned long bd_success:1; /* completed successfully */
609 unsigned long bd_network_rw:1; /* accessible to the network */
610 unsigned long bd_type:2; /* {put,get}{source,sink} */
611 unsigned long bd_registered:1; /* client side */
612 spinlock_t bd_lock; /* serialise with callback */
613 int bd_import_generation;
614 struct obd_export *bd_export;
615 struct obd_import *bd_import;
617 struct ptlrpc_request *bd_req; /* associated request */
618 cfs_waitq_t bd_waitq; /* server side only WQ */
619 int bd_iov_count; /* # entries in bd_iov */
620 int bd_max_iov; /* allocated size of bd_iov */
621 int bd_nob; /* # bytes covered */
622 int bd_nob_transferred; /* # bytes GOT/PUT */
626 struct ptlrpc_cb_id bd_cbid; /* network callback info */
627 lnet_handle_md_t bd_md_h; /* associated MD */
628 lnet_nid_t bd_sender; /* stash event::sender */
630 #if defined(__KERNEL__)
632 * encrypt iov, size is either 0 or bd_iov_count.
634 lnet_kiov_t *bd_enc_iov;
636 lnet_kiov_t bd_iov[0];
638 lnet_md_iovec_t bd_iov[0];
642 struct ptlrpc_thread {
644 * active threads in svc->srv_threads
646 struct list_head t_link;
648 * thread-private data (preallocated memory)
653 * service thread index, from ptlrpc_start_threads
657 * put watchdog in the structure per thread b=14840
659 struct lc_watchdog *t_watchdog;
661 * the svc this thread belonged to b=18582
663 struct ptlrpc_service *t_svc;
664 cfs_waitq_t t_ctl_waitq;
665 struct lu_env *t_env;
668 struct ptlrpc_request_buffer_desc {
669 struct list_head rqbd_list;
670 struct list_head rqbd_reqs;
671 struct ptlrpc_service *rqbd_service;
672 lnet_handle_md_t rqbd_md_h;
675 struct ptlrpc_cb_id rqbd_cbid;
676 struct ptlrpc_request rqbd_req;
679 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
680 typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
681 typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
683 #define PTLRPC_SVC_HP_RATIO 10
685 struct ptlrpc_service {
686 struct list_head srv_list; /* chain thru all services */
687 int srv_max_req_size; /* biggest request to receive */
688 int srv_max_reply_size; /* biggest reply to send */
689 int srv_buf_size; /* size of individual buffers */
690 int srv_nbuf_per_group; /* # buffers to allocate in 1 group */
691 int srv_nbufs; /* total # req buffer descs allocated */
692 int srv_threads_min; /* threads to start at SOW */
693 int srv_threads_max; /* thread upper limit */
694 int srv_threads_started; /* index of last started thread */
695 int srv_threads_running; /* # running threads */
696 atomic_t srv_n_difficult_replies; /* # 'difficult' replies */
697 int srv_n_active_reqs; /* # reqs being served */
698 int srv_n_hpreq; /* # HPreqs being served */
699 cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */
700 int srv_watchdog_factor; /* soft watchdog timeout mutiplier */
701 unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
702 unsigned srv_at_check:1; /* check early replies */
703 unsigned srv_is_stopping:1; /* under unregister_service */
704 cfs_time_t srv_at_checktime; /* debug */
706 __u32 srv_req_portal;
707 __u32 srv_rep_portal;
710 struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
711 spinlock_t srv_at_lock;
712 struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
713 cfs_timer_t srv_at_timer; /* early reply timer */
715 int srv_n_queued_reqs; /* # reqs in either of the queues below */
716 int srv_hpreq_count; /* # hp requests handled */
717 int srv_hpreq_ratio; /* # hp per lp reqs to handle */
718 struct list_head srv_req_in_queue; /* incoming reqs */
719 struct list_head srv_request_queue; /* reqs waiting for service */
720 struct list_head srv_request_hpq; /* high priority queue */
722 struct list_head srv_request_history; /* request history */
723 __u64 srv_request_seq; /* next request sequence # */
724 __u64 srv_request_max_cull_seq; /* highest seq culled from history */
725 svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
727 struct list_head srv_idle_rqbds; /* request buffers to be reposted */
728 struct list_head srv_active_rqbds; /* req buffers receiving */
729 struct list_head srv_history_rqbds; /* request buffer history */
730 int srv_nrqbd_receiving; /* # posted request buffers */
731 int srv_n_history_rqbds; /* # request buffers in history */
732 int srv_max_history_rqbds;/* max # request buffers in history */
734 atomic_t srv_outstanding_replies;
735 struct list_head srv_active_replies; /* all the active replies */
737 struct list_head srv_reply_queue; /* replies waiting for service */
739 cfs_waitq_t srv_waitq; /* all threads sleep on this. This
740 * wait-queue is signalled when new
741 * incoming request arrives and when
742 * difficult reply has to be handled. */
744 struct list_head srv_threads; /* service thread list */
745 svc_handler_t srv_handler;
746 svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */
748 char *srv_name; /* only statically allocated strings here; we don't clean them */
749 char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
753 cfs_proc_dir_entry_t *srv_procroot;
754 struct lprocfs_stats *srv_stats;
756 /* List of free reply_states */
757 struct list_head srv_free_rs_list;
758 /* waitq to run, when adding stuff to srv_free_rs_list */
759 cfs_waitq_t srv_free_rs_waitq;
762 * Tags for lu_context associated with this thread, see struct
767 * if non-NULL called during thread creation (ptlrpc_start_thread())
768 * to initialize service specific per-thread state.
770 int (*srv_init)(struct ptlrpc_thread *thread);
772 * if non-NULL called during thread shutdown (ptlrpc_main()) to
773 * destruct state created by ->srv_init().
775 void (*srv_done)(struct ptlrpc_thread *thread);
777 //struct ptlrpc_srv_ni srv_interfaces[0];
782 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
784 unsigned long pc_flags;
786 * Thread lock protecting structure fields.
792 struct completion pc_starting;
796 struct completion pc_finishing;
798 * Thread requests set.
800 struct ptlrpc_request_set *pc_set;
802 * Thread name used in cfs_daemonize()
806 * Environment for request interpreters to run in.
808 struct lu_env pc_env;
811 * Async rpcs flag to make sure that ptlrpcd_check() is called only
816 * Currently not used.
820 * User-space async rpcs callback.
822 void *pc_wait_callback;
824 * User-space check idle rpcs callback.
826 void *pc_idle_callback;
830 /* Bits for pc_flags */
831 enum ptlrpcd_ctl_flags {
833 * Ptlrpc thread start flag.
837 * Ptlrpc thread stop flag.
841 * Ptlrpc thread force flag (only stop force so far).
842 * This will cause aborting any inflight rpcs handled
843 * by thread if LIOD_STOP is specified.
847 * This is a recovery ptlrpc thread.
849 LIOD_RECOVERY = 1 << 3
852 /* ptlrpc/events.c */
853 extern lnet_handle_eq_t ptlrpc_eq_h;
854 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
855 lnet_process_id_t *peer, lnet_nid_t *self);
856 extern void request_out_callback (lnet_event_t *ev);
857 extern void reply_in_callback(lnet_event_t *ev);
858 extern void client_bulk_callback (lnet_event_t *ev);
859 extern void request_in_callback(lnet_event_t *ev);
860 extern void reply_out_callback(lnet_event_t *ev);
861 extern void server_bulk_callback (lnet_event_t *ev);
863 /* ptlrpc/connection.c */
864 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
866 struct obd_uuid *uuid);
867 int ptlrpc_connection_put(struct ptlrpc_connection *c);
868 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
869 int ptlrpc_connection_init(void);
870 void ptlrpc_connection_fini(void);
871 extern lnet_pid_t ptl_get_pid(void);
873 /* ptlrpc/niobuf.c */
874 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
875 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
876 int ptlrpc_register_bulk(struct ptlrpc_request *req);
877 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
879 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
883 LASSERT(desc != NULL);
885 spin_lock(&desc->bd_lock);
886 rc = desc->bd_network_rw;
887 spin_unlock(&desc->bd_lock);
891 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
893 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
896 LASSERT(req != NULL);
898 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
899 req->rq_bulk_deadline > cfs_time_current_sec())
905 spin_lock(&desc->bd_lock);
906 rc = desc->bd_network_rw;
907 spin_unlock(&desc->bd_lock);
911 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
912 #define PTLRPC_REPLY_EARLY 0x02
913 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
914 int ptlrpc_reply(struct ptlrpc_request *req);
915 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
916 int ptlrpc_error(struct ptlrpc_request *req);
917 void ptlrpc_resend_req(struct ptlrpc_request *request);
918 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
919 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
920 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
922 /* ptlrpc/client.c */
923 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
924 struct ptlrpc_client *);
925 void ptlrpc_cleanup_client(struct obd_import *imp);
926 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
928 int ptlrpc_queue_wait(struct ptlrpc_request *req);
929 int ptlrpc_replay_req(struct ptlrpc_request *req);
930 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
931 void ptlrpc_restart_req(struct ptlrpc_request *req);
932 void ptlrpc_abort_inflight(struct obd_import *imp);
933 void ptlrpc_cleanup_imp(struct obd_import *imp);
934 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
936 struct ptlrpc_request_set *ptlrpc_prep_set(void);
937 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
938 set_interpreter_func fn, void *data);
939 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
940 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
941 int ptlrpc_set_wait(struct ptlrpc_request_set *);
942 int ptlrpc_expired_set(void *data);
943 void ptlrpc_interrupted_set(void *data);
944 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
945 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
946 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
947 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
948 struct ptlrpc_request *req);
950 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
951 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
953 struct ptlrpc_request_pool *
954 ptlrpc_init_rq_pool(int, int,
955 void (*populate_pool)(struct ptlrpc_request_pool *, int));
957 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
958 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
959 const struct req_format *format);
960 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
961 struct ptlrpc_request_pool *,
962 const struct req_format *format);
963 void ptlrpc_request_free(struct ptlrpc_request *request);
964 int ptlrpc_request_pack(struct ptlrpc_request *request,
965 __u32 version, int opcode);
966 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
967 const struct req_format *format,
968 __u32 version, int opcode);
969 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
970 __u32 version, int opcode, char **bufs,
971 struct ptlrpc_cli_ctx *ctx);
972 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
973 int opcode, int count, __u32 *lengths,
975 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
976 __u32 version, int opcode,
977 int count, __u32 *lengths, char **bufs,
978 struct ptlrpc_request_pool *pool);
979 void ptlrpc_req_finished(struct ptlrpc_request *request);
980 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
981 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
982 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
983 int npages, int type, int portal);
984 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
985 int npages, int type, int portal);
986 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
987 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
988 cfs_page_t *page, int pageoffset, int len);
989 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
990 struct obd_import *imp);
991 __u64 ptlrpc_next_xid(void);
992 __u64 ptlrpc_sample_next_xid(void);
993 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
995 struct ptlrpc_service_conf {
998 int psc_max_req_size;
999 int psc_max_reply_size;
1002 int psc_watchdog_factor;
1003 int psc_min_threads;
1004 int psc_max_threads;
1008 /* ptlrpc/service.c */
1009 void ptlrpc_save_lock (struct ptlrpc_request *req,
1010 struct lustre_handle *lock, int mode, int no_ack);
1011 void ptlrpc_commit_replies(struct obd_export *exp);
1012 void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
1013 void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
1014 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
1015 svc_handler_t h, char *name,
1016 struct proc_dir_entry *proc_entry,
1017 svcreq_printfn_t prntfn,
1020 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
1022 int req_portal, int rep_portal,
1023 int watchdog_factor,
1024 svc_handler_t, char *name,
1025 cfs_proc_dir_entry_t *proc_entry,
1027 int min_threads, int max_threads,
1028 char *threadname, __u32 ctx_tags,
1029 svc_hpreq_handler_t);
1030 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
1032 int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc);
1033 int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc);
1034 int ptlrpc_unregister_service(struct ptlrpc_service *service);
1035 int liblustre_check_services (void *arg);
1036 void ptlrpc_daemonize(char *name);
1037 int ptlrpc_service_health_check(struct ptlrpc_service *);
1038 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
1041 int ptlrpc_hr_init(void);
1042 void ptlrpc_hr_fini(void);
1044 # define ptlrpc_hr_init() (0)
1045 # define ptlrpc_hr_fini() do {} while(0)
1048 struct ptlrpc_svc_data {
1050 struct ptlrpc_service *svc;
1051 struct ptlrpc_thread *thread;
1052 struct obd_device *dev;
1055 /* ptlrpc/import.c */
1056 int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
1057 int ptlrpc_init_import(struct obd_import *imp);
1058 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
1059 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
1061 /* ptlrpc/pack_generic.c */
1062 int ptlrpc_reconnect_import(struct obd_import *imp);
1063 int lustre_msg_swabbed(struct lustre_msg *msg);
1064 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
1065 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
1067 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
1068 __u32 *lens, char **bufs);
1069 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
1071 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
1072 __u32 *lens, char **bufs, int flags);
1073 #define LPRFL_EARLY_REPLY 1
1074 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
1075 char **bufs, int flags);
1076 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
1077 unsigned int newlen, int move_data);
1078 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
1079 int lustre_msg_hdr_size(__u32 magic, int count);
1080 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
1081 int lustre_msg_size_v2(int count, __u32 *lengths);
1082 int lustre_packed_msg_size(struct lustre_msg *msg);
1083 int lustre_msg_early_size(void);
1084 int lustre_unpack_msg(struct lustre_msg *m, int len);
1085 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
1086 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
1087 int lustre_msg_buflen(struct lustre_msg *m, int n);
1088 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
1089 int lustre_msg_bufcount(struct lustre_msg *m);
1090 char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
1091 void *lustre_swab_buf(struct lustre_msg *, int n, int minlen, void *swabber);
1092 void *lustre_swab_reqbuf(struct ptlrpc_request *req, int n, int minlen,
1094 void *lustre_swab_repbuf(struct ptlrpc_request *req, int n, int minlen,
1096 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
1097 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
1098 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
1099 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
1100 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
1101 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
1102 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
1103 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
1104 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
1105 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
1106 __u32 lustre_msg_get_type(struct lustre_msg *msg);
1107 __u32 lustre_msg_get_version(struct lustre_msg *msg);
1108 void lustre_msg_add_version(struct lustre_msg *msg, int version);
1109 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
1110 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
1111 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
1112 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
1113 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
1114 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
1115 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
1116 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
1117 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
1118 int lustre_msg_get_status(struct lustre_msg *msg);
1119 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
1120 int lustre_msg_is_v1(struct lustre_msg *msg);
1121 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
1122 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
1123 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
1124 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
1125 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1126 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
1127 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
1128 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
1129 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1130 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
1131 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
1132 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
1133 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
1134 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
1135 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
1136 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
1137 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
1138 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
1139 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
1142 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
1143 unsigned int newlen, int move_data)
1145 LASSERT(req->rq_reply_state);
1146 LASSERT(req->rq_repmsg);
1147 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
1152 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
1154 if (req->rq_phase == new_phase)
1157 if (new_phase == RQ_PHASE_UNREGISTERING) {
1158 req->rq_next_phase = req->rq_phase;
1160 atomic_inc(&req->rq_import->imp_unregistering);
1163 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1165 atomic_dec(&req->rq_import->imp_unregistering);
1168 DEBUG_REQ(D_RPCTRACE, req, "move req \"%s\" -> \"%s\"",
1169 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
1171 req->rq_phase = new_phase;
1175 ptlrpc_client_early(struct ptlrpc_request *req)
1177 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1178 req->rq_reply_deadline > cfs_time_current_sec())
1180 return req->rq_early;
1184 ptlrpc_client_replied(struct ptlrpc_request *req)
1186 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1187 req->rq_reply_deadline > cfs_time_current_sec())
1189 return req->rq_replied;
1193 ptlrpc_client_recv(struct ptlrpc_request *req)
1195 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1196 req->rq_reply_deadline > cfs_time_current_sec())
1198 return req->rq_receiving_reply;
1202 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
1206 spin_lock(&req->rq_lock);
1207 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1208 req->rq_reply_deadline > cfs_time_current_sec()) {
1209 spin_unlock(&req->rq_lock);
1212 rc = req->rq_receiving_reply || req->rq_must_unlink;
1213 spin_unlock(&req->rq_lock);
1218 ptlrpc_client_wake_req(struct ptlrpc_request *req)
1220 if (req->rq_set == NULL)
1221 cfs_waitq_signal(&req->rq_reply_waitq);
1223 cfs_waitq_signal(&req->rq_set->set_waitq);
1227 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
1229 LASSERT(atomic_read(&rs->rs_refcount) > 0);
1230 atomic_inc(&rs->rs_refcount);
1234 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
1236 LASSERT(atomic_read(&rs->rs_refcount) > 0);
1237 if (atomic_dec_and_test(&rs->rs_refcount))
1238 lustre_free_reply_state(rs);
1241 /* Should only be called once per req */
1242 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
1244 if (req->rq_reply_state == NULL)
1245 return; /* shouldn't occur */
1246 ptlrpc_rs_decref(req->rq_reply_state);
1247 req->rq_reply_state = NULL;
1248 req->rq_repmsg = NULL;
1251 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
1253 return lustre_msg_get_magic(req->rq_reqmsg);
1256 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
1258 switch (req->rq_reqmsg->lm_magic) {
1259 case LUSTRE_MSG_MAGIC_V2:
1260 return req->rq_reqmsg->lm_repsize;
1262 LASSERTF(0, "incorrect message magic: %08x\n",
1263 req->rq_reqmsg->lm_magic);
1268 /* ldlm/ldlm_lib.c */
1269 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
1270 int client_obd_cleanup(struct obd_device *obddev);
1271 int client_connect_import(const struct lu_env *env,
1272 struct obd_export **exp, struct obd_device *obd,
1273 struct obd_uuid *cluuid, struct obd_connect_data *,
1275 int client_disconnect_export(struct obd_export *exp);
1276 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
1278 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
1279 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
1280 void client_destroy_import(struct obd_import *imp);
1282 /* ptlrpc/pinger.c */
1283 enum timeout_event {
1286 struct timeout_item;
1287 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
1288 int ptlrpc_pinger_add_import(struct obd_import *imp);
1289 int ptlrpc_pinger_del_import(struct obd_import *imp);
1290 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
1291 timeout_cb_t cb, void *data,
1292 struct list_head *obd_list);
1293 int ptlrpc_del_timeout_client(struct list_head *obd_list,
1294 enum timeout_event event);
1295 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
1296 int ptlrpc_obd_ping(struct obd_device *obd);
1297 cfs_time_t ptlrpc_suspend_wakeup_time(void);
1299 void ping_evictor_start(void);
1300 void ping_evictor_stop(void);
1302 #define ping_evictor_start() do {} while (0)
1303 #define ping_evictor_stop() do {} while (0)
1305 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
1307 /* ptlrpc/ptlrpcd.c */
1310 * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv,
1311 * these threads are used to asynchronously send requests queued with
1312 * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for
1313 * such requests. Multiple scopes are needed to avoid dead-locks.
1315 enum ptlrpcd_scope {
1316 /** Scope of bulk read-write rpcs. */
1318 /** Everything else. */
1323 int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc);
1324 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
1325 void ptlrpcd_wake(struct ptlrpc_request *req);
1326 void ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
1327 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
1328 int ptlrpcd_addref(void);
1329 void ptlrpcd_decref(void);
1331 /* ptlrpc/lproc_ptlrpc.c */
1332 const char* ll_opcode2str(__u32 opcode);
1334 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
1335 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
1336 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
1338 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
1339 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
1340 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
1343 /* ptlrpc/llog_server.c */
1344 int llog_origin_handle_create(struct ptlrpc_request *req);
1345 int llog_origin_handle_destroy(struct ptlrpc_request *req);
1346 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
1347 int llog_origin_handle_next_block(struct ptlrpc_request *req);
1348 int llog_origin_handle_read_header(struct ptlrpc_request *req);
1349 int llog_origin_handle_close(struct ptlrpc_request *req);
1350 int llog_origin_handle_cancel(struct ptlrpc_request *req);
1351 int llog_catinfo(struct ptlrpc_request *req);
1353 /* ptlrpc/llog_client.c */
1354 extern struct llog_operations llog_client_ops;