4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 /** \defgroup PtlRPC Portal RPC and networking module.
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
44 * PortalRPC utilizes LNet as its transport layer.
58 #if defined(__linux__)
59 #include <linux/lustre_net.h>
60 #elif defined(__APPLE__)
61 #include <darwin/lustre_net.h>
62 #elif defined(__WINNT__)
63 #include <winnt/lustre_net.h>
65 #error Unsupported operating system.
68 #include <libcfs/libcfs.h>
70 #include <lnet/lnet.h>
71 #include <lustre/lustre_idl.h>
72 #include <lustre_ha.h>
73 #include <lustre_sec.h>
74 #include <lustre_import.h>
75 #include <lprocfs_status.h>
76 #include <lu_object.h>
77 #include <lustre_req_layout.h>
79 #include <obd_support.h>
80 #include <lustre_ver.h>
82 /* MD flags we _always_ use */
83 #define PTLRPC_MD_OPTIONS 0
86 * Max # of bulk operations in one request.
87 * In order for the client and server to properly negotiate the maximum
88 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
89 * value. The client is free to limit the actual RPC size for any bulk
90 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
91 #define PTLRPC_BULK_OPS_BITS 2
92 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
94 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
95 * should not be used on the server at all. Otherwise, it imposes a
96 * protocol limitation on the maximum RPC size that can be used by any
97 * RPC sent to that server in the future. Instead, the server should
98 * use the negotiated per-client ocd_brw_size to determine the bulk
100 #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
103 * Define maxima for bulk I/O.
105 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
106 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
107 * currently supported maximum between peers at connect via ocd_brw_size.
109 #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
110 #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
111 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
113 #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
114 #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
115 #define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
116 #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
117 #define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
118 #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
120 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
122 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
123 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
125 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
126 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
128 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
129 # error "PTLRPC_MAX_BRW_SIZE too big"
131 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
132 # error "PTLRPC_MAX_BRW_PAGES too big"
134 #endif /* __KERNEL__ */
136 #define PTLRPC_NTHRS_INIT 2
141 * Constants determine how memory is used to buffer incoming service requests.
143 * ?_NBUFS # buffers to allocate when growing the pool
144 * ?_BUFSIZE # bytes in a single request buffer
145 * ?_MAXREQSIZE # maximum request service will receive
147 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
148 * of ?_NBUFS is added to the pool.
150 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
151 * considered full when less than ?_MAXREQSIZE is left in them.
156 * Constants determine how threads are created for ptlrpc service.
158 * ?_NTHRS_INIT # threads to create for each service partition on
159 * initializing. If it's non-affinity service and
160 * there is only one partition, it's the overall #
161 * threads for the service while initializing.
162 * ?_NTHRS_BASE # threads should be created at least for each
163 * ptlrpc partition to keep the service healthy.
164 * It's the low-water mark of threads upper-limit
165 * for each partition.
166 * ?_THR_FACTOR # threads can be added on threads upper-limit for
167 * each CPU core. This factor is only for reference,
168 * we might decrease value of factor if number of cores
169 * per CPT is above a limit.
170 * ?_NTHRS_MAX # overall threads can be created for a service,
171 * it's a soft limit because if service is running
172 * on machine with hundreds of cores and tens of
173 * CPU partitions, we need to guarantee each partition
174 * has ?_NTHRS_BASE threads, which means total threads
175 * will be ?_NTHRS_BASE * number_of_cpts which can
176 * exceed ?_NTHRS_MAX.
180 * #define MDS_NTHRS_INIT 2
181 * #define MDS_NTHRS_BASE 64
182 * #define MDS_NTHRS_FACTOR 8
183 * #define MDS_NTHRS_MAX 1024
186 * ---------------------------------------------------------------------
187 * Server(A) has 16 cores, user configured it to 4 partitions so each
188 * partition has 4 cores, then actual number of service threads on each
190 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
192 * Total number of threads for the service is:
193 * 96 * partitions(4) = 384
196 * ---------------------------------------------------------------------
197 * Server(B) has 32 cores, user configured it to 4 partitions so each
198 * partition has 8 cores, then actual number of service threads on each
200 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
202 * Total number of threads for the service is:
203 * 128 * partitions(4) = 512
206 * ---------------------------------------------------------------------
207 * Server(B) has 96 cores, user configured it to 8 partitions so each
208 * partition has 12 cores, then actual number of service threads on each
210 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
212 * Total number of threads for the service is:
213 * 160 * partitions(8) = 1280
215 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
216 * as upper limit of threads number for each partition:
217 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
220 * ---------------------------------------------------------------------
221 * Server(C) have a thousand of cores and user configured it to 32 partitions
222 * MDS_NTHRS_BASE(64) * 32 = 2048
224 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
225 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
226 * to keep service healthy, so total number of threads will just be 2048.
228 * NB: we don't suggest to choose server with that many cores because backend
229 * filesystem itself, buffer cache, or underlying network stack might
230 * have some SMP scalability issues at that large scale.
232 * If user already has a fat machine with hundreds or thousands of cores,
233 * there are two choices for configuration:
234 * a) create CPU table from subset of all CPUs and run Lustre on
236 * b) bind service threads on a few partitions, see modparameters of
237 * MDS and OSS for details
239 * NB: these calculations (and examples below) are simplified to help
240 * understanding, the real implementation is a little more complex,
241 * please see ptlrpc_server_nthreads_check() for details.
246 * LDLM threads constants:
248 * Given 8 as factor and 24 as base threads number
251 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
254 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
255 * threads for each partition and total threads number will be 112.
258 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
259 * threads for each partition to keep service healthy, so total threads
260 * number should be 24 * 8 = 192.
262 * So with these constants, threads number will be at the similar level
263 * of old versions, unless target machine has over a hundred cores
265 #define LDLM_THR_FACTOR 8
266 #define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
267 #define LDLM_NTHRS_BASE 24
268 #define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128)
270 #define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
271 #define LDLM_CLIENT_NBUFS 1
272 #define LDLM_SERVER_NBUFS 64
273 #define LDLM_BUFSIZE (8 * 1024)
274 #define LDLM_MAXREQSIZE (5 * 1024)
275 #define LDLM_MAXREPSIZE (1024)
278 * MDS threads constants:
280 * Please see examples in "Thread Constants", MDS threads number will be at
281 * the comparable level of old versions, unless the server has many cores.
283 #ifndef MDS_MAX_THREADS
284 #define MDS_MAX_THREADS 1024
285 #define MDS_MAX_OTHR_THREADS 256
287 #else /* MDS_MAX_THREADS */
288 #if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT
289 #undef MDS_MAX_THREADS
290 #define MDS_MAX_THREADS PTLRPC_NTHRS_INIT
292 #define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2)
295 /* default service */
296 #define MDS_THR_FACTOR 8
297 #define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT
298 #define MDS_NTHRS_MAX MDS_MAX_THREADS
299 #define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX)
301 /* read-page service */
302 #define MDS_RDPG_THR_FACTOR 4
303 #define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT
304 #define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS
305 #define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX)
307 /* these should be removed when we remove setattr service in the future */
308 #define MDS_SETA_THR_FACTOR 4
309 #define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT
310 #define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS
311 #define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX)
313 /* non-affinity threads */
314 #define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
315 #define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
319 * Assume file name length = FNAME_MAX = 256 (true for ext3).
320 * path name length = PATH_MAX = 4096
321 * LOV MD size max = EA_MAX = 24 * 2000
322 * (NB: 24 is size of lov_ost_data)
323 * LOV LOGCOOKIE size max = 32 * 2000
324 * (NB: 32 is size of llog_cookie)
325 * symlink: FNAME_MAX + PATH_MAX <- largest
326 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
327 * rename: FNAME_MAX + FNAME_MAX
328 * open: FNAME_MAX + EA_MAX
330 * MDS_MAXREQSIZE ~= 4736 bytes =
331 * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
332 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
334 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
335 * except in the open case where there are a large number of OSTs in a LOV.
337 #define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
338 #define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
341 * MDS incoming request with LOV EA
342 * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
344 #define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
345 362 + LOV_MAX_STRIPE_COUNT * 24)
347 * MDS outgoing reply with LOV EA
349 * NB: max reply size Lustre 2.4+ client can get from old MDS is:
350 * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
352 * but 2.4 or later MDS will never send reply with llog_cookie to any
353 * version client. This macro is defined for server side reply buffer size.
355 #define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
358 * The update request includes all of updates from the create, which might
359 * include linkea (4K maxim), together with other updates, we set it to 9K:
360 * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
362 #define MDS_OUT_MAXREQSIZE (9 * 1024)
363 #define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
365 /** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
366 #define MDS_BUFSIZE max_t(int, MDS_MAXREQSIZE + 1024, 8 * 1024)
369 * MDS_LOV_BUFSIZE should be at least max_reqsize (with LOV EA) +
370 * max sptlrpc payload size, however, we need to allocate a much larger buffer
371 * for it because LNet requires each MD(rqbd) has at least MDS_LOVE_MAXREQSIZE
372 * bytes left to avoid dropping of maximum-sized incoming request.
373 * So if MDS_LOV_BUFSIZE is only a little larger than MDS_LOV_MAXREQSIZE,
374 * then it can only fit in one request even there are 48K bytes left in
375 * a rqbd, and memory utilization is very low.
377 * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
378 * reused until all requests fit in it have been processed and released,
379 * which means one long blocked request can prevent the rqbd be reused.
380 * Now we set request buffer size to 128K, so even each rqbd is unlinked
381 * from LNet with unused 48K, buffer utilization will be about 62%.
382 * Please check LU-2432 for details.
384 /** MDS_LOV_BUFSIZE = max_reqsize (w/ LOV EA) + max sptlrpc payload size */
385 #define MDS_LOV_BUFSIZE max_t(int, MDS_LOV_MAXREQSIZE + 1024, \
389 * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
390 * about 10K, for the same reason as MDS_LOV_BUFSIZE, we also give some
391 * extra bytes to each request buffer to improve buffer utilization rate.
393 #define MDS_OUT_BUFSIZE max_t(int, MDS_OUT_MAXREQSIZE + 1024, \
396 /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
397 #define FLD_MAXREQSIZE (160)
399 /** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
400 #define FLD_MAXREPSIZE (152)
401 #define FLD_BUFSIZE (1 << 12)
404 * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
406 #define SEQ_MAXREQSIZE (160)
408 /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
409 #define SEQ_MAXREPSIZE (152)
410 #define SEQ_BUFSIZE (1 << 12)
412 /** MGS threads must be >= 3, see bug 22458 comment #28 */
413 #define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
414 #define MGS_NTHRS_MAX 32
417 #define MGS_BUFSIZE (8 * 1024)
418 #define MGS_MAXREQSIZE (7 * 1024)
419 #define MGS_MAXREPSIZE (9 * 1024)
422 * OSS threads constants:
424 * Given 8 as factor and 64 as base threads number
427 * On 8-core server configured to 2 partitions, we will have
428 * 64 + 8 * 4 = 96 threads for each partition, 192 total threads.
431 * On 32-core machine configured to 4 partitions, we will have
432 * 64 + 8 * 8 = 112 threads for each partition, so total threads number
433 * will be 112 * 4 = 448.
436 * On 64-core machine configured to 4 partitions, we will have
437 * 64 + 16 * 8 = 192 threads for each partition, so total threads number
438 * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we
439 * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads
440 * for each partition.
442 * So we can see that with these constants, threads number wil be at the
443 * similar level of old versions, unless the server has many cores.
445 /* depress threads factor for VM with small memory size */
446 #define OSS_THR_FACTOR min_t(int, 8, \
447 CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT))
448 #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
449 #define OSS_NTHRS_BASE 64
450 #define OSS_NTHRS_MAX 512
452 /* threads for handling "create" request */
453 #define OSS_CR_THR_FACTOR 1
454 #define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT
455 #define OSS_CR_NTHRS_BASE 8
456 #define OSS_CR_NTHRS_MAX 64
459 * OST_IO_MAXREQSIZE ~=
460 * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
461 * DT_MAX_BRW_PAGES * niobuf_remote
463 * - single object with 16 pages is 512 bytes
464 * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
465 * - Must be a multiple of 1024
466 * - actual size is about 18K
468 #define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
469 sizeof(struct ptlrpc_body) + \
470 sizeof(struct obdo) + \
471 sizeof(struct obd_ioobj) + \
472 sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
474 * FIEMAP request can be 4K+ for now
476 #define OST_MAXREQSIZE (5 * 1024)
477 #define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
478 (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
480 #define OST_MAXREPSIZE (9 * 1024)
481 #define OST_IO_MAXREPSIZE OST_MAXREPSIZE
484 /** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
485 #define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
487 * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
488 * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
490 #define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
492 /* Macro to hide a typecast. */
493 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
496 * Structure to single define portal connection.
498 struct ptlrpc_connection {
499 /** linkage for connections hash table */
500 cfs_hlist_node_t c_hash;
501 /** Our own lnet nid for this connection */
503 /** Remote side nid for this connection */
504 lnet_process_id_t c_peer;
505 /** UUID of the other side */
506 struct obd_uuid c_remote_uuid;
507 /** reference counter for this connection */
508 cfs_atomic_t c_refcount;
511 /** Client definition for PortalRPC */
512 struct ptlrpc_client {
513 /** What lnet portal does this client send messages to by default */
514 __u32 cli_request_portal;
515 /** What portal do we expect replies on */
516 __u32 cli_reply_portal;
517 /** Name of the client */
521 /** state flags of requests */
522 /* XXX only ones left are those used by the bulk descs as well! */
523 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
524 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
526 #define REQ_MAX_ACK_LOCKS 8
528 union ptlrpc_async_args {
530 * Scratchpad for passing args to completion interpreter. Users
531 * cast to the struct of their choosing, and CLASSERT that this is
532 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
533 * a pointer to it here. The pointer_arg ensures this struct is at
534 * least big enough for that.
536 void *pointer_arg[11];
540 struct ptlrpc_request_set;
541 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
542 typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
545 * Definition of request set structure.
546 * Request set is a list of requests (not necessary to the same target) that
547 * once populated with RPCs could be sent in parallel.
548 * There are two kinds of request sets. General purpose and with dedicated
549 * serving thread. Example of the latter is ptlrpcd set.
550 * For general purpose sets once request set started sending it is impossible
551 * to add new requests to such set.
552 * Provides a way to call "completion callbacks" when all requests in the set
555 struct ptlrpc_request_set {
556 cfs_atomic_t set_refcount;
557 /** number of in queue requests */
558 cfs_atomic_t set_new_count;
559 /** number of uncompleted requests */
560 cfs_atomic_t set_remaining;
561 /** wait queue to wait on for request events */
562 cfs_waitq_t set_waitq;
563 cfs_waitq_t *set_wakeup_ptr;
564 /** List of requests in the set */
565 cfs_list_t set_requests;
567 * List of completion callbacks to be called when the set is completed
568 * This is only used if \a set_interpret is NULL.
569 * Links struct ptlrpc_set_cbdata.
571 cfs_list_t set_cblist;
572 /** Completion callback, if only one. */
573 set_interpreter_func set_interpret;
574 /** opaq argument passed to completion \a set_interpret callback. */
577 * Lock for \a set_new_requests manipulations
578 * locked so that any old caller can communicate requests to
579 * the set holder who can then fold them into the lock-free set
581 spinlock_t set_new_req_lock;
582 /** List of new yet unsent requests. Only used with ptlrpcd now. */
583 cfs_list_t set_new_requests;
585 /** rq_status of requests that have been freed already */
587 /** Additional fields used by the flow control extension */
588 /** Maximum number of RPCs in flight */
589 int set_max_inflight;
590 /** Callback function used to generate RPCs */
591 set_producer_func set_producer;
592 /** opaq argument passed to the producer callback */
593 void *set_producer_arg;
597 * Description of a single ptrlrpc_set callback
599 struct ptlrpc_set_cbdata {
600 /** List linkage item */
602 /** Pointer to interpreting function */
603 set_interpreter_func psc_interpret;
604 /** Opaq argument to pass to the callback */
608 struct ptlrpc_bulk_desc;
609 struct ptlrpc_service_part;
610 struct ptlrpc_service;
613 * ptlrpc callback & work item stuff
615 struct ptlrpc_cb_id {
616 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
617 void *cbid_arg; /* additional arg */
620 /** Maximum number of locks to fit into reply state */
621 #define RS_MAX_LOCKS 8
625 * Structure to define reply state on the server
626 * Reply state holds various reply message information. Also for "difficult"
627 * replies (rep-ack case) we store the state after sending reply and wait
628 * for the client to acknowledge the reception. In these cases locks could be
629 * added to the state for replay/failover consistency guarantees.
631 struct ptlrpc_reply_state {
632 /** Callback description */
633 struct ptlrpc_cb_id rs_cb_id;
634 /** Linkage for list of all reply states in a system */
636 /** Linkage for list of all reply states on same export */
637 cfs_list_t rs_exp_list;
638 /** Linkage for list of all reply states for same obd */
639 cfs_list_t rs_obd_list;
641 cfs_list_t rs_debug_list;
643 /** A spinlock to protect the reply state flags */
645 /** Reply state flags */
646 unsigned long rs_difficult:1; /* ACK/commit stuff */
647 unsigned long rs_no_ack:1; /* no ACK, even for
648 difficult requests */
649 unsigned long rs_scheduled:1; /* being handled? */
650 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
651 unsigned long rs_handled:1; /* been handled yet? */
652 unsigned long rs_on_net:1; /* reply_out_callback pending? */
653 unsigned long rs_prealloc:1; /* rs from prealloc list */
654 unsigned long rs_committed:1;/* the transaction was committed
655 and the rs was dispatched
656 by ptlrpc_commit_replies */
657 /** Size of the state */
661 /** Transaction number */
665 struct obd_export *rs_export;
666 struct ptlrpc_service_part *rs_svcpt;
667 /** Lnet metadata handle for the reply */
668 lnet_handle_md_t rs_md_h;
669 cfs_atomic_t rs_refcount;
671 /** Context for the sevice thread */
672 struct ptlrpc_svc_ctx *rs_svc_ctx;
673 /** Reply buffer (actually sent to the client), encoded if needed */
674 struct lustre_msg *rs_repbuf; /* wrapper */
675 /** Size of the reply buffer */
676 int rs_repbuf_len; /* wrapper buf length */
677 /** Size of the reply message */
678 int rs_repdata_len; /* wrapper msg length */
680 * Actual reply message. Its content is encrupted (if needed) to
681 * produce reply buffer for actual sending. In simple case
682 * of no network encryption we jus set \a rs_repbuf to \a rs_msg
684 struct lustre_msg *rs_msg; /* reply message */
686 /** Number of locks awaiting client ACK */
688 /** Handles of locks awaiting client reply ACK */
689 struct lustre_handle rs_locks[RS_MAX_LOCKS];
690 /** Lock modes of locks in \a rs_locks */
691 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
694 struct ptlrpc_thread;
698 RQ_PHASE_NEW = 0xebc0de00,
699 RQ_PHASE_RPC = 0xebc0de01,
700 RQ_PHASE_BULK = 0xebc0de02,
701 RQ_PHASE_INTERPRET = 0xebc0de03,
702 RQ_PHASE_COMPLETE = 0xebc0de04,
703 RQ_PHASE_UNREGISTERING = 0xebc0de05,
704 RQ_PHASE_UNDEFINED = 0xebc0de06
707 /** Type of request interpreter call-back */
708 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
709 struct ptlrpc_request *req,
713 * Definition of request pool structure.
714 * The pool is used to store empty preallocated requests for the case
715 * when we would actually need to send something without performing
716 * any allocations (to avoid e.g. OOM).
718 struct ptlrpc_request_pool {
719 /** Locks the list */
721 /** list of ptlrpc_request structs */
722 cfs_list_t prp_req_list;
723 /** Maximum message size that would fit into a rquest from this pool */
725 /** Function to allocate more requests for this pool */
726 void (*prp_populate)(struct ptlrpc_request_pool *, int);
735 * \defgroup nrs Network Request Scheduler
738 struct ptlrpc_nrs_policy;
739 struct ptlrpc_nrs_resource;
740 struct ptlrpc_nrs_request;
743 * NRS control operations.
745 * These are common for all policies.
747 enum ptlrpc_nrs_ctl {
749 * Activate the policy.
751 PTLRPC_NRS_CTL_START,
753 * Reserved for multiple primary policies, which may be a possibility
758 * Recycle resources for inactive policies.
760 PTLRPC_NRS_CTL_SHRINK,
762 * Not a valid opcode.
764 PTLRPC_NRS_CTL_INVALID,
766 * Policies can start using opcodes from this value and onwards for
767 * their own purposes; the assigned value itself is arbitrary.
769 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
773 * NRS policy operations.
775 * These determine the behaviour of a policy, and are called in response to
778 struct ptlrpc_nrs_pol_ops {
780 * Called during policy registration; this operation is optional.
782 * \param[in] policy The policy being initialized
784 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
786 * Called during policy unregistration; this operation is optional.
788 * \param[in] policy The policy being unregistered/finalized
790 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
792 * Called when activating a policy via lprocfs; policies allocate and
793 * initialize their resources here; this operation is optional.
795 * \param[in] policy The policy being started
797 * \see nrs_policy_start_locked()
799 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
801 * Called when deactivating a policy via lprocfs; policies deallocate
802 * their resources here; this operation is optional
804 * \param[in] policy The policy being stopped
806 * \see nrs_policy_stop_final()
808 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
810 * Used for policy-specific operations; i.e. not generic ones like
811 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
812 * to an ioctl; this operation is optional.
814 * \param[in] policy The policy carrying out operation \a opc
815 * \param[in] opc The command operation being carried out
816 * \param[in,out] arg An generic buffer for communication between the
817 * user and the control operation
822 * \see ptlrpc_nrs_policy_control()
824 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
825 enum ptlrpc_nrs_ctl opc, void *arg);
828 * Called when obtaining references to the resources of the resource
829 * hierarchy for a request that has arrived for handling at the PTLRPC
830 * service. Policies should return -ve for requests they do not wish
831 * to handle. This operation is mandatory.
833 * \param[in] policy The policy we're getting resources for.
834 * \param[in] nrq The request we are getting resources for.
835 * \param[in] parent The parent resource of the resource being
836 * requested; set to NULL if none.
837 * \param[out] resp The resource is to be returned here; the
838 * fallback policy in an NRS head should
839 * \e always return a non-NULL pointer value.
840 * \param[in] moving_req When set, signifies that this is an attempt
841 * to obtain resources for a request being moved
842 * to the high-priority NRS head by
843 * ldlm_lock_reorder_req().
844 * This implies two things:
845 * 1. We are under obd_export::exp_rpc_lock and
846 * so should not sleep.
847 * 2. We should not perform non-idempotent or can
848 * skip performing idempotent operations that
849 * were carried out when resources were first
850 * taken for the request when it was initialized
851 * in ptlrpc_nrs_req_initialize().
853 * \retval 0, +ve The level of the returned resource in the resource
854 * hierarchy; currently only 0 (for a non-leaf resource)
855 * and 1 (for a leaf resource) are supported by the
859 * \see ptlrpc_nrs_req_initialize()
860 * \see ptlrpc_nrs_hpreq_add_nolock()
861 * \see ptlrpc_nrs_req_hp_move()
863 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
864 struct ptlrpc_nrs_request *nrq,
865 struct ptlrpc_nrs_resource *parent,
866 struct ptlrpc_nrs_resource **resp,
869 * Called when releasing references taken for resources in the resource
870 * hierarchy for the request; this operation is optional.
872 * \param[in] policy The policy the resource belongs to
873 * \param[in] res The resource to be freed
875 * \see ptlrpc_nrs_req_finalize()
876 * \see ptlrpc_nrs_hpreq_add_nolock()
877 * \see ptlrpc_nrs_req_hp_move()
879 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
880 struct ptlrpc_nrs_resource *res);
883 * Obtain a request for handling from the policy via polling; this
884 * operation is mandatory.
886 * \param[in] policy The policy to poll
888 * \retval NULL No erquest available for handling
889 * \retval valid-pointer The request polled for handling
891 * \see ptlrpc_nrs_req_poll_nolock()
893 struct ptlrpc_nrs_request *
894 (*op_req_poll) (struct ptlrpc_nrs_policy *policy);
896 * Called when attempting to add a request to a policy for later
897 * handling; this operation is mandatory.
899 * \param[in] policy The policy on which to enqueue \a nrq
900 * \param[in] nrq The request to enqueue
905 * \see ptlrpc_nrs_req_add_nolock()
907 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
908 struct ptlrpc_nrs_request *nrq);
910 * Removes a request from the policy's set of pending requests. Normally
911 * called after a request has been polled successfully from the policy
912 * for handling; this operation is mandatory.
914 * \param[in] policy The policy the request \a nrq belongs to
915 * \param[in] nrq The request to dequeue
917 * \see ptlrpc_nrs_req_del_nolock()
919 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
920 struct ptlrpc_nrs_request *nrq);
922 * Called before carrying out the request; should not block. Could be
923 * used for job/resource control; this operation is optional.
925 * \param[in] policy The policy which is starting to handle request
927 * \param[in] nrq The request
929 * \pre spin_is_locked(&svcpt->scp_req_lock)
931 * \see ptlrpc_nrs_req_start_nolock()
933 void (*op_req_start) (struct ptlrpc_nrs_policy *policy,
934 struct ptlrpc_nrs_request *nrq);
936 * Called after the request being carried out. Could be used for
937 * job/resource control; this operation is optional.
939 * \param[in] policy The policy which is stopping to handle request
941 * \param[in] nrq The request
943 * \pre spin_is_locked(&svcpt->scp_req_lock)
945 * \see ptlrpc_nrs_req_stop_nolock()
947 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
948 struct ptlrpc_nrs_request *nrq);
950 * Registers the policy's lprocfs interface with a PTLRPC service.
952 * \param[in] svc The service
957 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
959 * Unegisters the policy's lprocfs interface with a PTLRPC service.
961 * \param[in] svc The service
963 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
969 enum nrs_policy_flags {
971 * Fallback policy, use this flag only on a single supported policy per
972 * service. Do not use this flag for policies registering using
973 * ptlrpc_nrs_policy_register() (i.e. ones that are not in
974 * \e nrs_pols_builtin).
976 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
978 * Start policy immediately after registering.
980 PTLRPC_NRS_FL_REG_START = (1 << 1),
982 * This is a polciy registering externally with NRS core, via
983 * ptlrpc_nrs_policy_register(), (i.e. one that is not in
984 * \e nrs_pols_builtin. Used to avoid ptlrpc_nrs_policy_register()
985 * racing with a policy start operation issued by the user via lprocfs.
987 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
993 * Denotes whether an NRS instance is for handling normal or high-priority
994 * RPCs, or whether an operation pertains to one or both of the NRS instances
997 enum ptlrpc_nrs_queue_type {
998 PTLRPC_NRS_QUEUE_REG,
1000 PTLRPC_NRS_QUEUE_BOTH,
1006 * A PTLRPC service has at least one NRS head instance for handling normal
1007 * priority RPCs, and may optionally have a second NRS head instance for
1008 * handling high-priority RPCs. Each NRS head maintains a list of available
1009 * policies, of which one and only one policy is acting as the fallback policy,
1010 * and optionally a different policy may be acting as the primary policy. For
1011 * all RPCs handled by this NRS head instance, NRS core will first attempt to
1012 * enqueue the RPC using the primary policy (if any). The fallback policy is
1013 * used in the following cases:
1014 * - when there was no primary policy in the
1015 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
1017 * - when the primary policy that was at the
1018 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
1019 * RPC was initialized, denoted it did not wish, or for some other reason was
1020 * not able to handle the request, by returning a non-valid NRS resource
1022 * - when the primary policy that was at the
1023 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
1024 * RPC was initialized, fails later during the request enqueueing stage.
1026 * \see nrs_resource_get_safe()
1027 * \see nrs_request_enqueue()
1030 spinlock_t nrs_lock;
1031 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
1033 * Linkage into nrs_core_heads_list
1035 cfs_list_t nrs_heads;
1037 * List of registered policies
1039 cfs_list_t nrs_policy_list;
1041 * List of policies with queued requests. Policies that have any
1042 * outstanding requests are queued here, and this list is queried
1043 * in a round-robin manner from NRS core when obtaining a request
1044 * for handling. This ensures that requests from policies that at some
1045 * point transition away from the
1046 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
1048 cfs_list_t nrs_policy_queued;
1050 * Service partition for this NRS head
1052 struct ptlrpc_service_part *nrs_svcpt;
1054 * Primary policy, which is the preferred policy for handling RPCs
1056 struct ptlrpc_nrs_policy *nrs_policy_primary;
1058 * Fallback policy, which is the backup policy for handling RPCs
1060 struct ptlrpc_nrs_policy *nrs_policy_fallback;
1062 * This NRS head handles either HP or regular requests
1064 enum ptlrpc_nrs_queue_type nrs_queue_type;
1066 * # queued requests from all policies in this NRS head
1068 unsigned long nrs_req_queued;
1070 * # scheduled requests from all policies in this NRS head
1072 unsigned long nrs_req_started;
1074 * # policies on this NRS
1075 * TODO: Can we avoid having this?
1077 unsigned nrs_num_pols;
1079 * This NRS head is in progress of starting a policy
1081 unsigned nrs_policy_starting:1;
1083 * In progress of shutting down the whole NRS head; used during
1086 unsigned nrs_stopping:1;
1089 #define NRS_POL_NAME_MAX 16
1092 * NRS policy registering descriptor
1094 * Is used to hold a description of a policy that can be passed to NRS core in
1095 * order to register the policy with NRS heads in different PTLRPC services.
1097 struct ptlrpc_nrs_pol_desc {
1099 * Human-readable policy name
1101 char pd_name[NRS_POL_NAME_MAX];
1103 * NRS operations for this policy
1105 struct ptlrpc_nrs_pol_ops *pd_ops;
1107 * Service Compatibility function; this determines whether a policy is
1108 * adequate for handling RPCs of a particular PTLRPC service.
1110 * XXX:This should give the same result during policy
1111 * registration and unregistration, and for all partitions of a
1112 * service; so the result should not depend on temporal service
1113 * or other properties, that may influence the result.
1115 bool (*pd_compat) (struct ptlrpc_service *svc,
1116 const struct ptlrpc_nrs_pol_desc *desc);
1118 * Optionally set for policies that support a single ptlrpc service,
1119 * i.e. ones that have \a pd_compat set to nrs_policy_compat_one()
1121 char *pd_compat_svc_name;
1123 * Bitmask of nrs_policy_flags
1127 * Link into nrs_core::nrs_policies
1135 * Policies transition from one state to the other during their lifetime
1137 enum ptlrpc_nrs_pol_state {
1139 * Not a valid policy state.
1141 NRS_POL_STATE_INVALID,
1143 * For now, this state is used exclusively for policies that register
1144 * externally to NRS core, i.e. ones that do so via
1145 * ptlrpc_nrs_policy_register() and are not part of nrs_pols_builtin;
1146 * it is used to prevent a race condition between the policy registering
1147 * with more than one service partition while service is operational,
1148 * and the user starting the policy via lprocfs.
1150 * \see nrs_pol_make_avail()
1152 NRS_POL_STATE_UNAVAIL,
1154 * Policies are at this state either at the start of their life, or
1155 * transition here when the user selects a different policy to act
1156 * as the primary one.
1158 NRS_POL_STATE_STOPPED,
1160 * Policy is progress of stopping
1162 NRS_POL_STATE_STOPPING,
1164 * Policy is in progress of starting
1166 NRS_POL_STATE_STARTING,
1168 * A policy is in this state in two cases:
1169 * - it is the fallback policy, which is always in this state.
1170 * - it has been activated by the user; i.e. it is the primary policy,
1172 NRS_POL_STATE_STARTED,
1176 * NRS policy information
1178 * Used for obtaining information for the status of a policy via lprocfs
1180 struct ptlrpc_nrs_pol_info {
1184 char pi_name[NRS_POL_NAME_MAX];
1186 * Current policy state
1188 enum ptlrpc_nrs_pol_state pi_state;
1190 * # RPCs enqueued for later dispatching by the policy
1194 * # RPCs started for dispatch by the policy
1196 long pi_req_started;
1198 * Is this a fallback policy?
1200 unsigned pi_fallback:1;
1206 * There is one instance of this for each policy in each NRS head of each
1207 * PTLRPC service partition.
1209 struct ptlrpc_nrs_policy {
1211 * Linkage into the NRS head's list of policies,
1212 * ptlrpc_nrs:nrs_policy_list
1214 cfs_list_t pol_list;
1216 * Linkage into the NRS head's list of policies with enqueued
1217 * requests ptlrpc_nrs:nrs_policy_queued
1219 cfs_list_t pol_list_queued;
1221 * Current state of this policy
1223 enum ptlrpc_nrs_pol_state pol_state;
1225 * Bitmask of nrs_policy_flags
1229 * # RPCs enqueued for later dispatching by the policy
1231 long pol_req_queued;
1233 * # RPCs started for dispatch by the policy
1235 long pol_req_started;
1237 * Usage Reference count taken on the policy instance
1241 * The NRS head this policy has been created at
1243 struct ptlrpc_nrs *pol_nrs;
1245 * NRS operations for this policy; points to ptlrpc_nrs_pol_desc::pd_ops
1247 struct ptlrpc_nrs_pol_ops *pol_ops;
1249 * Private policy data; varies by policy type
1253 * Human-readable policy name; point to ptlrpc_nrs_pol_desc::pd_name
1261 * Resources are embedded into two types of NRS entities:
1262 * - Inside NRS policies, in the policy's private data in
1263 * ptlrpc_nrs_policy::pol_private
1264 * - In objects that act as prime-level scheduling entities in different NRS
1265 * policies; e.g. on a policy that performs round robin or similar order
1266 * scheduling across client NIDs, there would be one NRS resource per unique
1267 * client NID. On a policy which performs round robin scheduling across
1268 * backend filesystem objects, there would be one resource associated with
1269 * each of the backend filesystem objects partaking in the scheduling
1270 * performed by the policy.
1272 * NRS resources share a parent-child relationship, in which resources embedded
1273 * in policy instances are the parent entities, with all scheduling entities
1274 * a policy schedules across being the children, thus forming a simple resource
1275 * hierarchy. This hierarchy may be extended with one or more levels in the
1276 * future if the ability to have more than one primary policy is added.
1278 * Upon request initialization, references to the then active NRS policies are
1279 * taken and used to later handle the dispatching of the request with one of
1282 * \see nrs_resource_get_safe()
1283 * \see ptlrpc_nrs_req_add()
1285 struct ptlrpc_nrs_resource {
1287 * This NRS resource's parent; is NULL for resources embedded in NRS
1288 * policy instances; i.e. those are top-level ones.
1290 struct ptlrpc_nrs_resource *res_parent;
1292 * The policy associated with this resource.
1294 struct ptlrpc_nrs_policy *res_policy;
1307 * This policy is a logical wrapper around previous, non-NRS functionality.
1308 * It dispatches RPCs in the same order as they arrive from the network. This
1309 * policy is currently used as the fallback policy, and the only enabled policy
1310 * on all NRS heads of all PTLRPC service partitions.
1315 * Private data structure for the FIFO policy
1317 struct nrs_fifo_head {
1319 * Resource object for policy instance.
1321 struct ptlrpc_nrs_resource fh_res;
1323 * List of queued requests.
1327 * For debugging purposes.
1332 struct nrs_fifo_req {
1333 /** request header, must be the first member of structure */
1343 * Instances of this object exist embedded within ptlrpc_request; the main
1344 * purpose of this object is to hold references to the request's resources
1345 * for the lifetime of the request, and to hold properties that policies use
1346 * use for determining the request's scheduling priority.
1348 struct ptlrpc_nrs_request {
1350 * The request's resource hierarchy.
1352 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1354 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1355 * policy that was used to enqueue the request.
1357 * \see nrs_request_enqueue()
1359 unsigned nr_res_idx;
1360 unsigned nr_initialized:1;
1361 unsigned nr_enqueued:1;
1362 unsigned nr_dequeued:1;
1363 unsigned nr_started:1;
1364 unsigned nr_finalized:1;
1365 cfs_binheap_node_t nr_node;
1368 * Policy-specific fields, used for determining a request's scheduling
1369 * priority, and other supporting functionality.
1373 * Fields for the FIFO policy
1375 struct nrs_fifo_req fifo;
1378 * Externally-registering policies may want to use this to allocate
1379 * their own request properties.
1387 * Basic request prioritization operations structure.
1388 * The whole idea is centered around locks and RPCs that might affect locks.
1389 * When a lock is contended we try to give priority to RPCs that might lead
1390 * to fastest release of that lock.
1391 * Currently only implemented for OSTs only in a way that makes all
1392 * IO and truncate RPCs that are coming from a locked region where a lock is
1393 * contended a priority over other requests.
1395 struct ptlrpc_hpreq_ops {
1397 * Check if the lock handle of the given lock is the same as
1398 * taken from the request.
1400 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1402 * Check if the request is a high priority one.
1404 int (*hpreq_check)(struct ptlrpc_request *);
1406 * Called after the request has been handled.
1408 void (*hpreq_fini)(struct ptlrpc_request *);
1412 * Represents remote procedure call.
1414 * This is a staple structure used by everybody wanting to send a request
1417 struct ptlrpc_request {
1418 /* Request type: one of PTL_RPC_MSG_* */
1420 /** Result of request processing */
1423 * Linkage item through which this request is included into
1424 * sending/delayed lists on client and into rqbd list on server
1428 * Server side list of incoming unserved requests sorted by arrival
1429 * time. Traversed from time to time to notice about to expire
1430 * requests and sent back "early replies" to clients to let them
1431 * know server is alive and well, just very busy to service their
1434 cfs_list_t rq_timed_list;
1435 /** server-side history, used for debuging purposes. */
1436 cfs_list_t rq_history_list;
1437 /** server-side per-export list */
1438 cfs_list_t rq_exp_list;
1439 /** server-side hp handlers */
1440 struct ptlrpc_hpreq_ops *rq_ops;
1442 /** initial thread servicing this request */
1443 struct ptlrpc_thread *rq_svc_thread;
1445 /** history sequence # */
1446 __u64 rq_history_seq;
1450 /** stub for NRS request */
1451 struct ptlrpc_nrs_request rq_nrq;
1453 /** the index of service's srv_at_array into which request is linked */
1455 /** Lock to protect request flags and some other important bits, like
1459 /** client-side flags are serialized by rq_lock */
1460 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1461 rq_timedout:1, rq_resend:1, rq_restart:1,
1463 * when ->rq_replay is set, request is kept by the client even
1464 * after server commits corresponding transaction. This is
1465 * used for operations that require sequence of multiple
1466 * requests to be replayed. The only example currently is file
1467 * open/close. When last request in such a sequence is
1468 * committed, ->rq_replay is cleared on all requests in the
1472 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1473 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
1474 rq_early:1, rq_must_unlink:1,
1475 rq_memalloc:1, /* req originated from "kswapd" */
1476 /* server-side flags */
1477 rq_packed_final:1, /* packed final reply */
1478 rq_hp:1, /* high priority RPC */
1479 rq_at_linked:1, /* link into service's srv_at_array */
1480 rq_reply_truncate:1,
1482 /* whether the "rq_set" is a valid one */
1484 rq_generation_set:1,
1485 /* do not resend request on -EINPROGRESS */
1486 rq_no_retry_einprogress:1,
1487 /* allow the req to be sent if the import is in recovery
1491 unsigned int rq_nr_resend;
1493 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1494 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1495 cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
1496 server-side refcounf for multiple replies */
1498 /** Portal to which this request would be sent */
1499 short rq_request_portal; /* XXX FIXME bug 249 */
1500 /** Portal where to wait for reply and where reply would be sent */
1501 short rq_reply_portal; /* XXX FIXME bug 249 */
1505 * !rq_truncate : # reply bytes actually received,
1506 * rq_truncate : required repbuf_len for resend
1508 int rq_nob_received;
1509 /** Request length */
1513 /** Request message - what client sent */
1514 struct lustre_msg *rq_reqmsg;
1515 /** Reply message - server response */
1516 struct lustre_msg *rq_repmsg;
1517 /** Transaction number */
1522 * List item to for replay list. Not yet commited requests get linked
1524 * Also see \a rq_replay comment above.
1526 cfs_list_t rq_replay_list;
1529 * security and encryption data
1531 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1532 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1533 cfs_list_t rq_ctx_chain; /**< link to waited ctx */
1535 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1536 enum lustre_sec_part rq_sp_from;
1538 /* client/server security flags */
1540 rq_ctx_init:1, /* context initiation */
1541 rq_ctx_fini:1, /* context destroy */
1542 rq_bulk_read:1, /* request bulk read */
1543 rq_bulk_write:1, /* request bulk write */
1544 /* server authentication flags */
1545 rq_auth_gss:1, /* authenticated by gss */
1546 rq_auth_remote:1, /* authed as remote user */
1547 rq_auth_usr_root:1, /* authed as root */
1548 rq_auth_usr_mdt:1, /* authed as mdt */
1549 rq_auth_usr_ost:1, /* authed as ost */
1550 /* security tfm flags */
1553 /* doesn't expect reply FIXME */
1555 rq_pill_init:1; /* pill initialized */
1557 uid_t rq_auth_uid; /* authed uid */
1558 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1560 /* (server side), pointed directly into req buffer */
1561 struct ptlrpc_user_desc *rq_user_desc;
1563 /* various buffer pointers */
1564 struct lustre_msg *rq_reqbuf; /* req wrapper */
1565 char *rq_repbuf; /* rep buffer */
1566 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1567 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1568 int rq_reqbuf_len; /* req wrapper buf len */
1569 int rq_reqdata_len; /* req wrapper msg len */
1570 int rq_repbuf_len; /* rep buffer len */
1571 int rq_repdata_len; /* rep wrapper msg len */
1572 int rq_clrbuf_len; /* only in priv mode */
1573 int rq_clrdata_len; /* only in priv mode */
1575 /** early replies go to offset 0, regular replies go after that */
1576 unsigned int rq_reply_off;
1580 /** Fields that help to see if request and reply were swabbed or not */
1581 __u32 rq_req_swab_mask;
1582 __u32 rq_rep_swab_mask;
1584 /** What was import generation when this request was sent */
1585 int rq_import_generation;
1586 enum lustre_imp_state rq_send_state;
1588 /** how many early replies (for stats) */
1591 /** client+server request */
1592 lnet_handle_md_t rq_req_md_h;
1593 struct ptlrpc_cb_id rq_req_cbid;
1594 /** optional time limit for send attempts */
1595 cfs_duration_t rq_delay_limit;
1596 /** time request was first queued */
1597 cfs_time_t rq_queued_time;
1599 /* server-side... */
1600 /** request arrival time */
1601 struct timeval rq_arrival_time;
1602 /** separated reply state */
1603 struct ptlrpc_reply_state *rq_reply_state;
1604 /** incoming request buffer */
1605 struct ptlrpc_request_buffer_desc *rq_rqbd;
1607 /** client-only incoming reply */
1608 lnet_handle_md_t rq_reply_md_h;
1609 cfs_waitq_t rq_reply_waitq;
1610 struct ptlrpc_cb_id rq_reply_cbid;
1614 /** Peer description (the other side) */
1615 lnet_process_id_t rq_peer;
1616 /** Server-side, export on which request was received */
1617 struct obd_export *rq_export;
1618 /** Client side, import where request is being sent */
1619 struct obd_import *rq_import;
1621 /** Replay callback, called after request is replayed at recovery */
1622 void (*rq_replay_cb)(struct ptlrpc_request *);
1624 * Commit callback, called when request is committed and about to be
1627 void (*rq_commit_cb)(struct ptlrpc_request *);
1628 /** Opaq data for replay and commit callbacks. */
1631 /** For bulk requests on client only: bulk descriptor */
1632 struct ptlrpc_bulk_desc *rq_bulk;
1634 /** client outgoing req */
1636 * when request/reply sent (secs), or time when request should be sent
1639 /** time for request really sent out */
1640 time_t rq_real_sent;
1642 /** when request must finish. volatile
1643 * so that servers' early reply updates to the deadline aren't
1644 * kept in per-cpu cache */
1645 volatile time_t rq_deadline;
1646 /** when req reply unlink must finish. */
1647 time_t rq_reply_deadline;
1648 /** when req bulk unlink must finish. */
1649 time_t rq_bulk_deadline;
1651 * service time estimate (secs)
1652 * If the requestsis not served by this time, it is marked as timed out.
1656 /** Multi-rpc bits */
1657 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1658 cfs_waitq_t rq_set_waitq;
1659 /** Link item for request set lists */
1660 cfs_list_t rq_set_chain;
1661 /** Link back to the request set */
1662 struct ptlrpc_request_set *rq_set;
1663 /** Async completion handler, called when reply is received */
1664 ptlrpc_interpterer_t rq_interpret_reply;
1665 /** Async completion context */
1666 union ptlrpc_async_args rq_async_args;
1668 /** Pool if request is from preallocated list */
1669 struct ptlrpc_request_pool *rq_pool;
1671 struct lu_context rq_session;
1672 struct lu_context rq_recov_session;
1674 /** request format description */
1675 struct req_capsule rq_pill;
1679 * Call completion handler for rpc if any, return it's status or original
1680 * rc if there was no handler defined for this request.
1682 static inline int ptlrpc_req_interpret(const struct lu_env *env,
1683 struct ptlrpc_request *req, int rc)
1685 if (req->rq_interpret_reply != NULL) {
1686 req->rq_status = req->rq_interpret_reply(env, req,
1687 &req->rq_async_args,
1689 return req->rq_status;
1697 int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_desc *desc);
1698 int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_desc *desc);
1699 void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
1700 void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
1701 struct ptlrpc_nrs_pol_info *info);
1704 * Can the request be moved from the regular NRS head to the high-priority NRS
1705 * head (of the same PTLRPC service partition), if any?
1707 * For a reliable result, this should be checked under svcpt->scp_req lock.
1710 ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1712 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1715 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1716 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1717 * to make sure it has not been scheduled yet (analogous to previous
1718 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1720 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1725 * Returns 1 if request buffer at offset \a index was already swabbed
1727 static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1729 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1730 return req->rq_req_swab_mask & (1 << index);
1734 * Returns 1 if request reply buffer at offset \a index was already swabbed
1736 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1738 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1739 return req->rq_rep_swab_mask & (1 << index);
1743 * Returns 1 if request needs to be swabbed into local cpu byteorder
1745 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1747 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1751 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1753 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1755 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1759 * Mark request buffer at offset \a index that it was already swabbed
1761 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1763 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1764 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1765 req->rq_req_swab_mask |= 1 << index;
1769 * Mark request reply buffer at offset \a index that it was already swabbed
1771 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1773 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1774 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1775 req->rq_rep_swab_mask |= 1 << index;
1779 * Convert numerical request phase value \a phase into text string description
1781 static inline const char *
1782 ptlrpc_phase2str(enum rq_phase phase)
1791 case RQ_PHASE_INTERPRET:
1793 case RQ_PHASE_COMPLETE:
1795 case RQ_PHASE_UNREGISTERING:
1796 return "Unregistering";
1803 * Convert numerical request phase of the request \a req into text stringi
1806 static inline const char *
1807 ptlrpc_rqphase2str(struct ptlrpc_request *req)
1809 return ptlrpc_phase2str(req->rq_phase);
1813 * Debugging functions and helpers to print request structure into debug log
1816 /* Spare the preprocessor, spoil the bugs. */
1817 #define FLAG(field, str) (field ? str : "")
1819 /** Convert bit flags into a string */
1820 #define DEBUG_REQ_FLAGS(req) \
1821 ptlrpc_rqphase2str(req), \
1822 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
1823 FLAG(req->rq_err, "E"), \
1824 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1825 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1826 FLAG(req->rq_no_resend, "N"), \
1827 FLAG(req->rq_waiting, "W"), \
1828 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1829 FLAG(req->rq_committed, "M")
1831 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
1833 void _debug_req(struct ptlrpc_request *req,
1834 struct libcfs_debug_msg_data *data, const char *fmt, ...)
1835 __attribute__ ((format (printf, 3, 4)));
1838 * Helper that decides if we need to print request accordig to current debug
1841 #define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1843 CFS_CHECK_STACK(msgdata, mask, cdls); \
1845 if (((mask) & D_CANTMASK) != 0 || \
1846 ((libcfs_debug & (mask)) != 0 && \
1847 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1848 _debug_req((req), msgdata, fmt, ##a); \
1852 * This is the debug print function you need to use to print request sturucture
1853 * content into lustre debug log.
1854 * for most callers (level is a constant) this is resolved at compile time */
1855 #define DEBUG_REQ(level, req, fmt, args...) \
1857 if ((level) & (D_ERROR | D_WARNING)) { \
1858 static cfs_debug_limit_state_t cdls; \
1859 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1860 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1862 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1863 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1869 * Structure that defines a single page of a bulk transfer
1871 struct ptlrpc_bulk_page {
1872 /** Linkage to list of pages in a bulk */
1875 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1878 /** offset within a page */
1880 /** The page itself */
1881 struct page *bp_page;
1884 #define BULK_GET_SOURCE 0
1885 #define BULK_PUT_SINK 1
1886 #define BULK_GET_SINK 2
1887 #define BULK_PUT_SOURCE 3
1890 * Definition of bulk descriptor.
1891 * Bulks are special "Two phase" RPCs where initial request message
1892 * is sent first and it is followed bt a transfer (o receiving) of a large
1893 * amount of data to be settled into pages referenced from the bulk descriptors.
1894 * Bulks transfers (the actual data following the small requests) are done
1895 * on separate LNet portals.
1896 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
1897 * Another user is readpage for MDT.
1899 struct ptlrpc_bulk_desc {
1900 /** completed with failure */
1901 unsigned long bd_failure:1;
1902 /** {put,get}{source,sink} */
1903 unsigned long bd_type:2;
1905 unsigned long bd_registered:1;
1906 /** For serialization with callback */
1908 /** Import generation when request for this bulk was sent */
1909 int bd_import_generation;
1910 /** LNet portal for this bulk */
1912 /** Server side - export this bulk created for */
1913 struct obd_export *bd_export;
1914 /** Client side - import this bulk was sent on */
1915 struct obd_import *bd_import;
1916 /** Back pointer to the request */
1917 struct ptlrpc_request *bd_req;
1918 cfs_waitq_t bd_waitq; /* server side only WQ */
1919 int bd_iov_count; /* # entries in bd_iov */
1920 int bd_max_iov; /* allocated size of bd_iov */
1921 int bd_nob; /* # bytes covered */
1922 int bd_nob_transferred; /* # bytes GOT/PUT */
1926 struct ptlrpc_cb_id bd_cbid; /* network callback info */
1927 lnet_nid_t bd_sender; /* stash event::sender */
1928 int bd_md_count; /* # valid entries in bd_mds */
1929 int bd_md_max_brw; /* max entries in bd_mds */
1930 /** array of associated MDs */
1931 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
1933 #if defined(__KERNEL__)
1935 * encrypt iov, size is either 0 or bd_iov_count.
1937 lnet_kiov_t *bd_enc_iov;
1939 lnet_kiov_t bd_iov[0];
1941 lnet_md_iovec_t bd_iov[0];
1946 SVC_STOPPED = 1 << 0,
1947 SVC_STOPPING = 1 << 1,
1948 SVC_STARTING = 1 << 2,
1949 SVC_RUNNING = 1 << 3,
1951 SVC_SIGNAL = 1 << 5,
1954 #define PTLRPC_THR_NAME_LEN 32
1956 * Definition of server service thread structure
1958 struct ptlrpc_thread {
1960 * List of active threads in svc->srv_threads
1964 * thread-private data (preallocated memory)
1969 * service thread index, from ptlrpc_start_threads
1973 * service thread pid
1977 * put watchdog in the structure per thread b=14840
1979 struct lc_watchdog *t_watchdog;
1981 * the svc this thread belonged to b=18582
1983 struct ptlrpc_service_part *t_svcpt;
1984 cfs_waitq_t t_ctl_waitq;
1985 struct lu_env *t_env;
1986 char t_name[PTLRPC_THR_NAME_LEN];
1989 static inline int thread_is_init(struct ptlrpc_thread *thread)
1991 return thread->t_flags == 0;
1994 static inline int thread_is_stopped(struct ptlrpc_thread *thread)
1996 return !!(thread->t_flags & SVC_STOPPED);
1999 static inline int thread_is_stopping(struct ptlrpc_thread *thread)
2001 return !!(thread->t_flags & SVC_STOPPING);
2004 static inline int thread_is_starting(struct ptlrpc_thread *thread)
2006 return !!(thread->t_flags & SVC_STARTING);
2009 static inline int thread_is_running(struct ptlrpc_thread *thread)
2011 return !!(thread->t_flags & SVC_RUNNING);
2014 static inline int thread_is_event(struct ptlrpc_thread *thread)
2016 return !!(thread->t_flags & SVC_EVENT);
2019 static inline int thread_is_signal(struct ptlrpc_thread *thread)
2021 return !!(thread->t_flags & SVC_SIGNAL);
2024 static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
2026 thread->t_flags &= ~flags;
2029 static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
2031 thread->t_flags = flags;
2034 static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
2036 thread->t_flags |= flags;
2039 static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
2042 if (thread->t_flags & flags) {
2043 thread->t_flags &= ~flags;
2050 * Request buffer descriptor structure.
2051 * This is a structure that contains one posted request buffer for service.
2052 * Once data land into a buffer, event callback creates actual request and
2053 * notifies wakes one of the service threads to process new incoming request.
2054 * More than one request can fit into the buffer.
2056 struct ptlrpc_request_buffer_desc {
2057 /** Link item for rqbds on a service */
2058 cfs_list_t rqbd_list;
2059 /** History of requests for this buffer */
2060 cfs_list_t rqbd_reqs;
2061 /** Back pointer to service for which this buffer is registered */
2062 struct ptlrpc_service_part *rqbd_svcpt;
2063 /** LNet descriptor */
2064 lnet_handle_md_t rqbd_md_h;
2066 /** The buffer itself */
2068 struct ptlrpc_cb_id rqbd_cbid;
2070 * This "embedded" request structure is only used for the
2071 * last request to fit into the buffer
2073 struct ptlrpc_request rqbd_req;
2076 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
2078 struct ptlrpc_service_ops {
2080 * if non-NULL called during thread creation (ptlrpc_start_thread())
2081 * to initialize service specific per-thread state.
2083 int (*so_thr_init)(struct ptlrpc_thread *thr);
2085 * if non-NULL called during thread shutdown (ptlrpc_main()) to
2086 * destruct state created by ->srv_init().
2088 void (*so_thr_done)(struct ptlrpc_thread *thr);
2090 * Handler function for incoming requests for this service
2092 int (*so_req_handler)(struct ptlrpc_request *req);
2094 * function to determine priority of the request, it's called
2095 * on every new request
2097 int (*so_hpreq_handler)(struct ptlrpc_request *);
2099 * service-specific print fn
2101 void (*so_req_printer)(void *, struct ptlrpc_request *);
2104 #ifndef __cfs_cacheline_aligned
2105 /* NB: put it here for reducing patche dependence */
2106 # define __cfs_cacheline_aligned
2110 * How many high priority requests to serve before serving one normal
2113 #define PTLRPC_SVC_HP_RATIO 10
2116 * Definition of PortalRPC service.
2117 * The service is listening on a particular portal (like tcp port)
2118 * and perform actions for a specific server like IO service for OST
2119 * or general metadata service for MDS.
2121 struct ptlrpc_service {
2122 /** serialize /proc operations */
2123 spinlock_t srv_lock;
2124 /** most often accessed fields */
2125 /** chain thru all services */
2126 cfs_list_t srv_list;
2127 /** service operations table */
2128 struct ptlrpc_service_ops srv_ops;
2129 /** only statically allocated strings here; we don't clean them */
2131 /** only statically allocated strings here; we don't clean them */
2132 char *srv_thread_name;
2133 /** service thread list */
2134 cfs_list_t srv_threads;
2135 /** threads # should be created for each partition on initializing */
2136 int srv_nthrs_cpt_init;
2137 /** limit of threads number for each partition */
2138 int srv_nthrs_cpt_limit;
2139 /** Root of /proc dir tree for this service */
2140 cfs_proc_dir_entry_t *srv_procroot;
2141 /** Pointer to statistic data for this service */
2142 struct lprocfs_stats *srv_stats;
2143 /** # hp per lp reqs to handle */
2144 int srv_hpreq_ratio;
2145 /** biggest request to receive */
2146 int srv_max_req_size;
2147 /** biggest reply to send */
2148 int srv_max_reply_size;
2149 /** size of individual buffers */
2151 /** # buffers to allocate in 1 group */
2152 int srv_nbuf_per_group;
2153 /** Local portal on which to receive requests */
2154 __u32 srv_req_portal;
2155 /** Portal on the client to send replies to */
2156 __u32 srv_rep_portal;
2158 * Tags for lu_context associated with this thread, see struct
2162 /** soft watchdog timeout multiplier */
2163 int srv_watchdog_factor;
2164 /** under unregister_service */
2165 unsigned srv_is_stopping:1;
2167 /** max # request buffers in history per partition */
2168 int srv_hist_nrqbds_cpt_max;
2169 /** number of CPTs this service bound on */
2171 /** CPTs array this service bound on */
2173 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2175 /** CPT table this service is running over */
2176 struct cfs_cpt_table *srv_cptable;
2178 * partition data for ptlrpc service
2180 struct ptlrpc_service_part *srv_parts[0];
2184 * Definition of PortalRPC service partition data.
2185 * Although a service only has one instance of it right now, but we
2186 * will have multiple instances very soon (instance per CPT).
2188 * it has four locks:
2190 * serialize operations on rqbd and requests waiting for preprocess
2192 * serialize operations active requests sent to this portal
2194 * serialize adaptive timeout stuff
2196 * serialize operations on RS list (reply states)
2198 * We don't have any use-case to take two or more locks at the same time
2199 * for now, so there is no lock order issue.
2201 struct ptlrpc_service_part {
2202 /** back reference to owner */
2203 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2204 /* CPT id, reserved */
2206 /** always increasing number */
2208 /** # of starting threads */
2209 int scp_nthrs_starting;
2210 /** # of stopping threads, reserved for shrinking threads */
2211 int scp_nthrs_stopping;
2212 /** # running threads */
2213 int scp_nthrs_running;
2214 /** service threads list */
2215 cfs_list_t scp_threads;
2218 * serialize the following fields, used for protecting
2219 * rqbd list and incoming requests waiting for preprocess,
2220 * threads starting & stopping are also protected by this lock.
2222 spinlock_t scp_lock __cfs_cacheline_aligned;
2223 /** total # req buffer descs allocated */
2224 int scp_nrqbds_total;
2225 /** # posted request buffers for receiving */
2226 int scp_nrqbds_posted;
2227 /** in progress of allocating rqbd */
2228 int scp_rqbd_allocating;
2229 /** # incoming reqs */
2230 int scp_nreqs_incoming;
2231 /** request buffers to be reposted */
2232 cfs_list_t scp_rqbd_idle;
2233 /** req buffers receiving */
2234 cfs_list_t scp_rqbd_posted;
2235 /** incoming reqs */
2236 cfs_list_t scp_req_incoming;
2237 /** timeout before re-posting reqs, in tick */
2238 cfs_duration_t scp_rqbd_timeout;
2240 * all threads sleep on this. This wait-queue is signalled when new
2241 * incoming request arrives and when difficult reply has to be handled.
2243 cfs_waitq_t scp_waitq;
2245 /** request history */
2246 cfs_list_t scp_hist_reqs;
2247 /** request buffer history */
2248 cfs_list_t scp_hist_rqbds;
2249 /** # request buffers in history */
2250 int scp_hist_nrqbds;
2251 /** sequence number for request */
2253 /** highest seq culled from history */
2254 __u64 scp_hist_seq_culled;
2257 * serialize the following fields, used for processing requests
2258 * sent to this portal
2260 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2261 /** # reqs in either of the NRS heads below */
2262 /** # reqs being served */
2263 int scp_nreqs_active;
2264 /** # HPreqs being served */
2265 int scp_nhreqs_active;
2266 /** # hp requests handled */
2269 /** NRS head for regular requests */
2270 struct ptlrpc_nrs scp_nrs_reg;
2271 /** NRS head for HP requests; this is only valid for services that can
2272 * handle HP requests */
2273 struct ptlrpc_nrs *scp_nrs_hp;
2278 * serialize the following fields, used for changes on
2281 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2282 /** estimated rpc service time */
2283 struct adaptive_timeout scp_at_estimate;
2284 /** reqs waiting for replies */
2285 struct ptlrpc_at_array scp_at_array;
2286 /** early reply timer */
2287 cfs_timer_t scp_at_timer;
2289 cfs_time_t scp_at_checktime;
2290 /** check early replies */
2291 unsigned scp_at_check;
2295 * serialize the following fields, used for processing
2296 * replies for this portal
2298 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2299 /** all the active replies */
2300 cfs_list_t scp_rep_active;
2302 /** replies waiting for service */
2303 cfs_list_t scp_rep_queue;
2305 /** List of free reply_states */
2306 cfs_list_t scp_rep_idle;
2307 /** waitq to run, when adding stuff to srv_free_rs_list */
2308 cfs_waitq_t scp_rep_waitq;
2309 /** # 'difficult' replies */
2310 cfs_atomic_t scp_nreps_difficult;
2313 #define ptlrpc_service_for_each_part(part, i, svc) \
2315 i < (svc)->srv_ncpts && \
2316 (svc)->srv_parts != NULL && \
2317 ((part) = (svc)->srv_parts[i]) != NULL; i++)
2320 * Declaration of ptlrpcd control structure
2322 struct ptlrpcd_ctl {
2324 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2326 unsigned long pc_flags;
2328 * Thread lock protecting structure fields.
2334 struct completion pc_starting;
2338 struct completion pc_finishing;
2340 * Thread requests set.
2342 struct ptlrpc_request_set *pc_set;
2344 * Thread name used in cfs_daemonize()
2348 * Environment for request interpreters to run in.
2350 struct lu_env pc_env;
2352 * Index of ptlrpcd thread in the array.
2356 * Number of the ptlrpcd's partners.
2360 * Pointer to the array of partners' ptlrpcd_ctl structure.
2362 struct ptlrpcd_ctl **pc_partners;
2364 * Record the partner index to be processed next.
2369 * Async rpcs flag to make sure that ptlrpcd_check() is called only
2374 * Currently not used.
2378 * User-space async rpcs callback.
2380 void *pc_wait_callback;
2382 * User-space check idle rpcs callback.
2384 void *pc_idle_callback;
2388 /* Bits for pc_flags */
2389 enum ptlrpcd_ctl_flags {
2391 * Ptlrpc thread start flag.
2393 LIOD_START = 1 << 0,
2395 * Ptlrpc thread stop flag.
2399 * Ptlrpc thread force flag (only stop force so far).
2400 * This will cause aborting any inflight rpcs handled
2401 * by thread if LIOD_STOP is specified.
2403 LIOD_FORCE = 1 << 2,
2405 * This is a recovery ptlrpc thread.
2407 LIOD_RECOVERY = 1 << 3,
2409 * The ptlrpcd is bound to some CPU core.
2418 * Service compatibility function; policy is compatible with all services.
2420 * \param[in] svc The service the policy is attempting to register with.
2421 * \param[in] desc The policy descriptor
2423 * \retval true The policy is compatible with the NRS head
2425 * \see ptlrpc_nrs_pol_desc::pd_compat()
2428 nrs_policy_compat_all(struct ptlrpc_service *svc,
2429 const struct ptlrpc_nrs_pol_desc *desc)
2435 * Service compatibility function; policy is compatible with only a specific
2436 * service which is identified by its human-readable name at
2437 * ptlrpc_service::srv_name.
2439 * \param[in] svc The service the policy is attempting to register with.
2440 * \param[in] desc The policy descriptor
2442 * \retval false The policy is not compatible with the NRS head
2443 * \retval true The policy is compatible with the NRS head
2445 * \see ptlrpc_nrs_pol_desc::pd_compat()
2448 nrs_policy_compat_one(struct ptlrpc_service *svc,
2449 const struct ptlrpc_nrs_pol_desc *desc)
2451 LASSERT(desc->pd_compat_svc_name != NULL);
2452 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2457 /* ptlrpc/events.c */
2458 extern lnet_handle_eq_t ptlrpc_eq_h;
2459 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2460 lnet_process_id_t *peer, lnet_nid_t *self);
2462 * These callbacks are invoked by LNet when something happened to
2466 extern void request_out_callback(lnet_event_t *ev);
2467 extern void reply_in_callback(lnet_event_t *ev);
2468 extern void client_bulk_callback(lnet_event_t *ev);
2469 extern void request_in_callback(lnet_event_t *ev);
2470 extern void reply_out_callback(lnet_event_t *ev);
2471 #ifdef HAVE_SERVER_SUPPORT
2472 extern void server_bulk_callback(lnet_event_t *ev);
2476 /* ptlrpc/connection.c */
2477 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2479 struct obd_uuid *uuid);
2480 int ptlrpc_connection_put(struct ptlrpc_connection *c);
2481 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2482 int ptlrpc_connection_init(void);
2483 void ptlrpc_connection_fini(void);
2484 extern lnet_pid_t ptl_get_pid(void);
2486 /* ptlrpc/niobuf.c */
2488 * Actual interfacing with LNet to put/get/register/unregister stuff
2491 #ifdef HAVE_SERVER_SUPPORT
2492 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
2493 unsigned npages, unsigned max_brw,
2494 unsigned type, unsigned portal);
2495 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
2496 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
2498 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
2502 LASSERT(desc != NULL);
2504 spin_lock(&desc->bd_lock);
2505 rc = desc->bd_md_count;
2506 spin_unlock(&desc->bd_lock);
2511 int ptlrpc_register_bulk(struct ptlrpc_request *req);
2512 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2514 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2516 struct ptlrpc_bulk_desc *desc;
2519 LASSERT(req != NULL);
2520 desc = req->rq_bulk;
2522 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
2523 req->rq_bulk_deadline > cfs_time_current_sec())
2529 spin_lock(&desc->bd_lock);
2530 rc = desc->bd_md_count;
2531 spin_unlock(&desc->bd_lock);
2535 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2536 #define PTLRPC_REPLY_EARLY 0x02
2537 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2538 int ptlrpc_reply(struct ptlrpc_request *req);
2539 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2540 int ptlrpc_error(struct ptlrpc_request *req);
2541 void ptlrpc_resend_req(struct ptlrpc_request *request);
2542 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2543 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2544 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2547 /* ptlrpc/client.c */
2549 * Client-side portals API. Everything to send requests, receive replies,
2550 * request queues, request management, etc.
2553 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2554 struct ptlrpc_client *);
2555 void ptlrpc_cleanup_client(struct obd_import *imp);
2556 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2558 int ptlrpc_queue_wait(struct ptlrpc_request *req);
2559 int ptlrpc_replay_req(struct ptlrpc_request *req);
2560 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2561 void ptlrpc_restart_req(struct ptlrpc_request *req);
2562 void ptlrpc_abort_inflight(struct obd_import *imp);
2563 void ptlrpc_cleanup_imp(struct obd_import *imp);
2564 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2566 struct ptlrpc_request_set *ptlrpc_prep_set(void);
2567 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2569 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
2570 set_interpreter_func fn, void *data);
2571 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2572 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2573 int ptlrpc_set_wait(struct ptlrpc_request_set *);
2574 int ptlrpc_expired_set(void *data);
2575 void ptlrpc_interrupted_set(void *data);
2576 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2577 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2578 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2579 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2580 struct ptlrpc_request *req);
2582 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2583 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2585 struct ptlrpc_request_pool *
2586 ptlrpc_init_rq_pool(int, int,
2587 void (*populate_pool)(struct ptlrpc_request_pool *, int));
2589 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2590 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2591 const struct req_format *format);
2592 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2593 struct ptlrpc_request_pool *,
2594 const struct req_format *format);
2595 void ptlrpc_request_free(struct ptlrpc_request *request);
2596 int ptlrpc_request_pack(struct ptlrpc_request *request,
2597 __u32 version, int opcode);
2598 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
2599 const struct req_format *format,
2600 __u32 version, int opcode);
2601 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2602 __u32 version, int opcode, char **bufs,
2603 struct ptlrpc_cli_ctx *ctx);
2604 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
2605 int opcode, int count, __u32 *lengths,
2607 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
2608 __u32 version, int opcode,
2609 int count, __u32 *lengths, char **bufs,
2610 struct ptlrpc_request_pool *pool);
2611 void ptlrpc_req_finished(struct ptlrpc_request *request);
2612 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
2613 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2614 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2615 unsigned npages, unsigned max_brw,
2616 unsigned type, unsigned portal);
2617 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2618 static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2620 __ptlrpc_free_bulk(bulk, 1);
2622 static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2624 __ptlrpc_free_bulk(bulk, 0);
2626 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2627 cfs_page_t *page, int pageoffset, int len, int);
2628 static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2629 cfs_page_t *page, int pageoffset,
2632 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2635 static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2636 cfs_page_t *page, int pageoffset,
2639 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2642 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2643 struct obd_import *imp);
2644 __u64 ptlrpc_next_xid(void);
2645 __u64 ptlrpc_sample_next_xid(void);
2646 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2648 /* Set of routines to run a function in ptlrpcd context */
2649 void *ptlrpcd_alloc_work(struct obd_import *imp,
2650 int (*cb)(const struct lu_env *, void *), void *data);
2651 void ptlrpcd_destroy_work(void *handler);
2652 int ptlrpcd_queue_work(void *handler);
2655 struct ptlrpc_service_buf_conf {
2656 /* nbufs is buffers # to allocate when growing the pool */
2657 unsigned int bc_nbufs;
2658 /* buffer size to post */
2659 unsigned int bc_buf_size;
2660 /* portal to listed for requests on */
2661 unsigned int bc_req_portal;
2662 /* portal of where to send replies to */
2663 unsigned int bc_rep_portal;
2664 /* maximum request size to be accepted for this service */
2665 unsigned int bc_req_max_size;
2666 /* maximum reply size this service can ever send */
2667 unsigned int bc_rep_max_size;
2670 struct ptlrpc_service_thr_conf {
2671 /* threadname should be 8 characters or less - 6 will be added on */
2673 /* threads increasing factor for each CPU */
2674 unsigned int tc_thr_factor;
2675 /* service threads # to start on each partition while initializing */
2676 unsigned int tc_nthrs_init;
2678 * low water of threads # upper-limit on each partition while running,
2679 * service availability may be impacted if threads number is lower
2680 * than this value. It can be ZERO if the service doesn't require
2681 * CPU affinity or there is only one partition.
2683 unsigned int tc_nthrs_base;
2684 /* "soft" limit for total threads number */
2685 unsigned int tc_nthrs_max;
2686 /* user specified threads number, it will be validated due to
2687 * other members of this structure. */
2688 unsigned int tc_nthrs_user;
2689 /* set NUMA node affinity for service threads */
2690 unsigned int tc_cpu_affinity;
2691 /* Tags for lu_context associated with service thread */
2695 struct ptlrpc_service_cpt_conf {
2696 struct cfs_cpt_table *cc_cptable;
2697 /* string pattern to describe CPTs for a service */
2701 struct ptlrpc_service_conf {
2704 /* soft watchdog timeout multiplifier to print stuck service traces */
2705 unsigned int psc_watchdog_factor;
2706 /* buffer information */
2707 struct ptlrpc_service_buf_conf psc_buf;
2708 /* thread information */
2709 struct ptlrpc_service_thr_conf psc_thr;
2710 /* CPU partition information */
2711 struct ptlrpc_service_cpt_conf psc_cpt;
2712 /* function table */
2713 struct ptlrpc_service_ops psc_ops;
2716 /* ptlrpc/service.c */
2718 * Server-side services API. Register/unregister service, request state
2719 * management, service thread management
2723 void ptlrpc_save_lock(struct ptlrpc_request *req,
2724 struct lustre_handle *lock, int mode, int no_ack);
2725 void ptlrpc_commit_replies(struct obd_export *exp);
2726 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2727 void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
2728 int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
2729 struct ptlrpc_service *ptlrpc_register_service(
2730 struct ptlrpc_service_conf *conf,
2731 struct proc_dir_entry *proc_entry);
2732 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
2734 int ptlrpc_start_threads(struct ptlrpc_service *svc);
2735 int ptlrpc_unregister_service(struct ptlrpc_service *service);
2736 int liblustre_check_services(void *arg);
2737 void ptlrpc_daemonize(char *name);
2738 int ptlrpc_service_health_check(struct ptlrpc_service *);
2739 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
2742 int ptlrpc_hr_init(void);
2743 void ptlrpc_hr_fini(void);
2745 # define ptlrpc_hr_init() (0)
2746 # define ptlrpc_hr_fini() do {} while(0)
2751 /* ptlrpc/import.c */
2756 int ptlrpc_connect_import(struct obd_import *imp);
2757 int ptlrpc_init_import(struct obd_import *imp);
2758 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2759 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
2760 void deuuidify(char *uuid, const char *prefix, char **uuid_start,
2763 /* ptlrpc/pack_generic.c */
2764 int ptlrpc_reconnect_import(struct obd_import *imp);
2768 * ptlrpc msg buffer and swab interface
2772 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2774 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
2776 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2777 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2779 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
2780 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2782 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2783 __u32 *lens, char **bufs);
2784 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2786 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2787 __u32 *lens, char **bufs, int flags);
2788 #define LPRFL_EARLY_REPLY 1
2789 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2790 char **bufs, int flags);
2791 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2792 unsigned int newlen, int move_data);
2793 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2794 int __lustre_unpack_msg(struct lustre_msg *m, int len);
2795 int lustre_msg_hdr_size(__u32 magic, int count);
2796 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2797 int lustre_msg_size_v2(int count, __u32 *lengths);
2798 int lustre_packed_msg_size(struct lustre_msg *msg);
2799 int lustre_msg_early_size(void);
2800 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2801 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2802 int lustre_msg_buflen(struct lustre_msg *m, int n);
2803 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
2804 int lustre_msg_bufcount(struct lustre_msg *m);
2805 char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2806 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2807 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2808 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
2809 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2810 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2811 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2812 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2813 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
2814 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
2815 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2816 __u32 lustre_msg_get_type(struct lustre_msg *msg);
2817 __u32 lustre_msg_get_version(struct lustre_msg *msg);
2818 void lustre_msg_add_version(struct lustre_msg *msg, int version);
2819 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
2820 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
2821 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2822 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2823 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
2824 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
2825 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
2826 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2827 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2828 int lustre_msg_get_status(struct lustre_msg *msg);
2829 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
2830 int lustre_msg_is_v1(struct lustre_msg *msg);
2831 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
2832 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2833 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
2834 char *lustre_msg_get_jobid(struct lustre_msg *msg);
2835 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
2836 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
2837 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
2839 # warning "remove checksum compatibility support for b1_8"
2840 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
2842 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
2843 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2844 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2845 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
2846 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
2847 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2848 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2849 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2850 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
2851 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
2852 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2853 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2854 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2855 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2856 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2859 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2860 unsigned int newlen, int move_data)
2862 LASSERT(req->rq_reply_state);
2863 LASSERT(req->rq_repmsg);
2864 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2869 /** Change request phase of \a req to \a new_phase */
2871 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2873 if (req->rq_phase == new_phase)
2876 if (new_phase == RQ_PHASE_UNREGISTERING) {
2877 req->rq_next_phase = req->rq_phase;
2879 cfs_atomic_inc(&req->rq_import->imp_unregistering);
2882 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
2884 cfs_atomic_dec(&req->rq_import->imp_unregistering);
2887 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2888 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2890 req->rq_phase = new_phase;
2894 * Returns true if request \a req got early reply and hard deadline is not met
2897 ptlrpc_client_early(struct ptlrpc_request *req)
2899 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2900 req->rq_reply_deadline > cfs_time_current_sec())
2902 return req->rq_early;
2906 * Returns true if we got real reply from server for this request
2909 ptlrpc_client_replied(struct ptlrpc_request *req)
2911 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2912 req->rq_reply_deadline > cfs_time_current_sec())
2914 return req->rq_replied;
2917 /** Returns true if request \a req is in process of receiving server reply */
2919 ptlrpc_client_recv(struct ptlrpc_request *req)
2921 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2922 req->rq_reply_deadline > cfs_time_current_sec())
2924 return req->rq_receiving_reply;
2928 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
2932 spin_lock(&req->rq_lock);
2933 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2934 req->rq_reply_deadline > cfs_time_current_sec()) {
2935 spin_unlock(&req->rq_lock);
2938 rc = req->rq_receiving_reply || req->rq_must_unlink;
2939 spin_unlock(&req->rq_lock);
2944 ptlrpc_client_wake_req(struct ptlrpc_request *req)
2946 if (req->rq_set == NULL)
2947 cfs_waitq_signal(&req->rq_reply_waitq);
2949 cfs_waitq_signal(&req->rq_set->set_waitq);
2953 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
2955 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
2956 cfs_atomic_inc(&rs->rs_refcount);
2960 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
2962 LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
2963 if (cfs_atomic_dec_and_test(&rs->rs_refcount))
2964 lustre_free_reply_state(rs);
2967 /* Should only be called once per req */
2968 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
2970 if (req->rq_reply_state == NULL)
2971 return; /* shouldn't occur */
2972 ptlrpc_rs_decref(req->rq_reply_state);
2973 req->rq_reply_state = NULL;
2974 req->rq_repmsg = NULL;
2977 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
2979 return lustre_msg_get_magic(req->rq_reqmsg);
2982 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
2984 switch (req->rq_reqmsg->lm_magic) {
2985 case LUSTRE_MSG_MAGIC_V2:
2986 return req->rq_reqmsg->lm_repsize;
2988 LASSERTF(0, "incorrect message magic: %08x\n",
2989 req->rq_reqmsg->lm_magic);
2994 static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
2996 if (req->rq_delay_limit != 0 &&
2997 cfs_time_before(cfs_time_add(req->rq_queued_time,
2998 cfs_time_seconds(req->rq_delay_limit)),
2999 cfs_time_current())) {
3005 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
3007 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
3008 spin_lock(&req->rq_lock);
3009 req->rq_no_resend = 1;
3010 spin_unlock(&req->rq_lock);
3012 return req->rq_no_resend;
3016 ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
3018 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
3020 return svcpt->scp_service->srv_watchdog_factor *
3021 max_t(int, at, obd_timeout);
3024 static inline struct ptlrpc_service *
3025 ptlrpc_req2svc(struct ptlrpc_request *req)
3027 LASSERT(req->rq_rqbd != NULL);
3028 return req->rq_rqbd->rqbd_svcpt->scp_service;
3031 /* ldlm/ldlm_lib.c */
3033 * Target client logic
3036 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
3037 int client_obd_cleanup(struct obd_device *obddev);
3038 int client_connect_import(const struct lu_env *env,
3039 struct obd_export **exp, struct obd_device *obd,
3040 struct obd_uuid *cluuid, struct obd_connect_data *,
3042 int client_disconnect_export(struct obd_export *exp);
3043 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
3045 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
3046 int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
3047 struct obd_uuid *uuid);
3048 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
3049 void client_destroy_import(struct obd_import *imp);
3052 #ifdef HAVE_SERVER_SUPPORT
3053 int server_disconnect_export(struct obd_export *exp);
3056 /* ptlrpc/pinger.c */
3058 * Pinger API (client side only)
3061 extern int suppress_pings;
3062 enum timeout_event {
3065 struct timeout_item;
3066 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
3067 int ptlrpc_pinger_add_import(struct obd_import *imp);
3068 int ptlrpc_pinger_del_import(struct obd_import *imp);
3069 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
3070 timeout_cb_t cb, void *data,
3071 cfs_list_t *obd_list);
3072 int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
3073 enum timeout_event event);
3074 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
3075 int ptlrpc_obd_ping(struct obd_device *obd);
3076 cfs_time_t ptlrpc_suspend_wakeup_time(void);
3078 void ping_evictor_start(void);
3079 void ping_evictor_stop(void);
3081 #define ping_evictor_start() do {} while (0)
3082 #define ping_evictor_stop() do {} while (0)
3084 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
3087 /* ptlrpc daemon bind policy */
3089 /* all ptlrpcd threads are free mode */
3090 PDB_POLICY_NONE = 1,
3091 /* all ptlrpcd threads are bound mode */
3092 PDB_POLICY_FULL = 2,
3093 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
3094 PDB_POLICY_PAIR = 3,
3095 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
3096 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
3097 * If kernel supports NUMA, pthrpcd threads are binded and
3098 * grouped by NUMA node */
3099 PDB_POLICY_NEIGHBOR = 4,
3102 /* ptlrpc daemon load policy
3103 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
3104 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
3105 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
3106 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
3107 * depends on which is scheduled firstly, to accelerate the RPC processing. */
3109 /* on the same CPU core as the caller */
3110 PDL_POLICY_SAME = 1,
3111 /* within the same CPU partition, but not the same core as the caller */
3112 PDL_POLICY_LOCAL = 2,
3113 /* round-robin on all CPU cores, but not the same core as the caller */
3114 PDL_POLICY_ROUND = 3,
3115 /* the specified CPU core is preferred, but not enforced */
3116 PDL_POLICY_PREFERRED = 4,
3119 /* ptlrpc/ptlrpcd.c */
3120 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
3121 void ptlrpcd_free(struct ptlrpcd_ctl *pc);
3122 void ptlrpcd_wake(struct ptlrpc_request *req);
3123 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
3124 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
3125 int ptlrpcd_addref(void);
3126 void ptlrpcd_decref(void);
3128 /* ptlrpc/lproc_ptlrpc.c */
3130 * procfs output related functions
3133 const char* ll_opcode2str(__u32 opcode);
3135 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
3136 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
3137 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
3139 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
3140 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
3141 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
3145 /* ptlrpc/llog_server.c */
3146 int llog_origin_handle_open(struct ptlrpc_request *req);
3147 int llog_origin_handle_destroy(struct ptlrpc_request *req);
3148 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
3149 int llog_origin_handle_next_block(struct ptlrpc_request *req);
3150 int llog_origin_handle_read_header(struct ptlrpc_request *req);
3151 int llog_origin_handle_close(struct ptlrpc_request *req);
3152 int llog_origin_handle_cancel(struct ptlrpc_request *req);
3154 /* ptlrpc/llog_client.c */
3155 extern struct llog_operations llog_client_ops;