4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 /** \defgroup PtlRPC Portal RPC and networking module.
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
44 * PortalRPC utilizes LNet as its transport layer.
58 #if defined(__linux__)
59 #include <linux/lustre_net.h>
61 #error Unsupported operating system.
64 #include <libcfs/libcfs.h>
66 #include <lnet/lnet.h>
67 #include <lustre/lustre_idl.h>
68 #include <lustre_ha.h>
69 #include <lustre_sec.h>
70 #include <lustre_import.h>
71 #include <lprocfs_status.h>
72 #include <lu_object.h>
73 #include <lustre_req_layout.h>
75 #include <obd_support.h>
76 #include <lustre_ver.h>
78 /* MD flags we _always_ use */
79 #define PTLRPC_MD_OPTIONS 0
82 * Max # of bulk operations in one request.
83 * In order for the client and server to properly negotiate the maximum
84 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
85 * value. The client is free to limit the actual RPC size for any bulk
86 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
87 #define PTLRPC_BULK_OPS_BITS 2
88 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
90 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
91 * should not be used on the server at all. Otherwise, it imposes a
92 * protocol limitation on the maximum RPC size that can be used by any
93 * RPC sent to that server in the future. Instead, the server should
94 * use the negotiated per-client ocd_brw_size to determine the bulk
96 #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
99 * Define maxima for bulk I/O.
101 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
102 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
103 * currently supported maximum between peers at connect via ocd_brw_size.
105 #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
106 #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
107 #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
109 #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
110 #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
111 #define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
112 #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
113 #define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
114 #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
116 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
118 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
119 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
121 # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
122 # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
124 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
125 # error "PTLRPC_MAX_BRW_SIZE too big"
127 # if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
128 # error "PTLRPC_MAX_BRW_PAGES too big"
130 #endif /* __KERNEL__ */
132 #define PTLRPC_NTHRS_INIT 2
137 * Constants determine how memory is used to buffer incoming service requests.
139 * ?_NBUFS # buffers to allocate when growing the pool
140 * ?_BUFSIZE # bytes in a single request buffer
141 * ?_MAXREQSIZE # maximum request service will receive
143 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
144 * of ?_NBUFS is added to the pool.
146 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
147 * considered full when less than ?_MAXREQSIZE is left in them.
152 * Constants determine how threads are created for ptlrpc service.
154 * ?_NTHRS_INIT # threads to create for each service partition on
155 * initializing. If it's non-affinity service and
156 * there is only one partition, it's the overall #
157 * threads for the service while initializing.
158 * ?_NTHRS_BASE # threads should be created at least for each
159 * ptlrpc partition to keep the service healthy.
160 * It's the low-water mark of threads upper-limit
161 * for each partition.
162 * ?_THR_FACTOR # threads can be added on threads upper-limit for
163 * each CPU core. This factor is only for reference,
164 * we might decrease value of factor if number of cores
165 * per CPT is above a limit.
166 * ?_NTHRS_MAX # overall threads can be created for a service,
167 * it's a soft limit because if service is running
168 * on machine with hundreds of cores and tens of
169 * CPU partitions, we need to guarantee each partition
170 * has ?_NTHRS_BASE threads, which means total threads
171 * will be ?_NTHRS_BASE * number_of_cpts which can
172 * exceed ?_NTHRS_MAX.
176 * #define MDS_NTHRS_INIT 2
177 * #define MDS_NTHRS_BASE 64
178 * #define MDS_NTHRS_FACTOR 8
179 * #define MDS_NTHRS_MAX 1024
182 * ---------------------------------------------------------------------
183 * Server(A) has 16 cores, user configured it to 4 partitions so each
184 * partition has 4 cores, then actual number of service threads on each
186 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
188 * Total number of threads for the service is:
189 * 96 * partitions(4) = 384
192 * ---------------------------------------------------------------------
193 * Server(B) has 32 cores, user configured it to 4 partitions so each
194 * partition has 8 cores, then actual number of service threads on each
196 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
198 * Total number of threads for the service is:
199 * 128 * partitions(4) = 512
202 * ---------------------------------------------------------------------
203 * Server(B) has 96 cores, user configured it to 8 partitions so each
204 * partition has 12 cores, then actual number of service threads on each
206 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
208 * Total number of threads for the service is:
209 * 160 * partitions(8) = 1280
211 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
212 * as upper limit of threads number for each partition:
213 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
216 * ---------------------------------------------------------------------
217 * Server(C) have a thousand of cores and user configured it to 32 partitions
218 * MDS_NTHRS_BASE(64) * 32 = 2048
220 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
221 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
222 * to keep service healthy, so total number of threads will just be 2048.
224 * NB: we don't suggest to choose server with that many cores because backend
225 * filesystem itself, buffer cache, or underlying network stack might
226 * have some SMP scalability issues at that large scale.
228 * If user already has a fat machine with hundreds or thousands of cores,
229 * there are two choices for configuration:
230 * a) create CPU table from subset of all CPUs and run Lustre on
232 * b) bind service threads on a few partitions, see modparameters of
233 * MDS and OSS for details
235 * NB: these calculations (and examples below) are simplified to help
236 * understanding, the real implementation is a little more complex,
237 * please see ptlrpc_server_nthreads_check() for details.
242 * LDLM threads constants:
244 * Given 8 as factor and 24 as base threads number
247 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
250 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
251 * threads for each partition and total threads number will be 112.
254 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
255 * threads for each partition to keep service healthy, so total threads
256 * number should be 24 * 8 = 192.
258 * So with these constants, threads number will be at the similar level
259 * of old versions, unless target machine has over a hundred cores
261 #define LDLM_THR_FACTOR 8
262 #define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
263 #define LDLM_NTHRS_BASE 24
264 #define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
266 #define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
267 #define LDLM_CLIENT_NBUFS 1
268 #define LDLM_SERVER_NBUFS 64
269 #define LDLM_BUFSIZE (8 * 1024)
270 #define LDLM_MAXREQSIZE (5 * 1024)
271 #define LDLM_MAXREPSIZE (1024)
274 * MDS threads constants:
276 * Please see examples in "Thread Constants", MDS threads number will be at
277 * the comparable level of old versions, unless the server has many cores.
279 #ifndef MDS_MAX_THREADS
280 #define MDS_MAX_THREADS 1024
281 #define MDS_MAX_OTHR_THREADS 256
283 #else /* MDS_MAX_THREADS */
284 #if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT
285 #undef MDS_MAX_THREADS
286 #define MDS_MAX_THREADS PTLRPC_NTHRS_INIT
288 #define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2)
291 /* default service */
292 #define MDS_THR_FACTOR 8
293 #define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT
294 #define MDS_NTHRS_MAX MDS_MAX_THREADS
295 #define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX)
297 /* read-page service */
298 #define MDS_RDPG_THR_FACTOR 4
299 #define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT
300 #define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS
301 #define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX)
303 /* these should be removed when we remove setattr service in the future */
304 #define MDS_SETA_THR_FACTOR 4
305 #define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT
306 #define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS
307 #define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX)
309 /* non-affinity threads */
310 #define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
311 #define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
316 * Assume file name length = FNAME_MAX = 256 (true for ext3).
317 * path name length = PATH_MAX = 4096
318 * LOV MD size max = EA_MAX = 24 * 2000
319 * (NB: 24 is size of lov_ost_data)
320 * LOV LOGCOOKIE size max = 32 * 2000
321 * (NB: 32 is size of llog_cookie)
322 * symlink: FNAME_MAX + PATH_MAX <- largest
323 * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
324 * rename: FNAME_MAX + FNAME_MAX
325 * open: FNAME_MAX + EA_MAX
327 * MDS_MAXREQSIZE ~= 4736 bytes =
328 * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
329 * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
331 * Realistic size is about 512 bytes (20 character name + 128 char symlink),
332 * except in the open case where there are a large number of OSTs in a LOV.
334 #define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
335 #define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
338 * MDS incoming request with LOV EA
339 * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
341 #define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
342 362 + LOV_MAX_STRIPE_COUNT * 24)
344 * MDS outgoing reply with LOV EA
346 * NB: max reply size Lustre 2.4+ client can get from old MDS is:
347 * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
349 * but 2.4 or later MDS will never send reply with llog_cookie to any
350 * version client. This macro is defined for server side reply buffer size.
352 #define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
355 * This is the size of a maximum REINT_SETXATTR request:
357 * lustre_msg 56 (32 + 4 x 5 + 4)
359 * mdt_rec_setxattr 136
361 * name 256 (XATTR_NAME_MAX)
362 * value 65536 (XATTR_SIZE_MAX)
364 #define MDS_EA_MAXREQSIZE 66288
367 * These are the maximum request and reply sizes (rounded up to 1 KB
368 * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL.
370 #define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \
371 MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10)
372 #define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE
375 * The update request includes all of updates from the create, which might
376 * include linkea (4K maxim), together with other updates, we set it to 9K:
377 * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
379 #define OUT_MAXREQSIZE (9 * 1024)
380 #define OUT_MAXREPSIZE MDS_MAXREPSIZE
382 /** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
383 #define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
387 * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD.
388 * However, we need to allocate a much larger buffer for it because LNet
389 * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid
390 * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a
391 * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request
392 * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory
393 * utilization is very low.
395 * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
396 * reused until all requests fit in it have been processed and released,
397 * which means one long blocked request can prevent the rqbd be reused.
398 * Now we set request buffer size to 160 KB, so even each rqbd is unlinked
399 * from LNet with unused 65 KB, buffer utilization will be about 59%.
400 * Please check LU-2432 for details.
402 #define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
406 * OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
407 * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
408 * extra bytes to each request buffer to improve buffer utilization rate.
410 #define OUT_BUFSIZE max(OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
413 /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
414 #define FLD_MAXREQSIZE (160)
416 /** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
417 #define FLD_MAXREPSIZE (152)
418 #define FLD_BUFSIZE (1 << 12)
421 * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
423 #define SEQ_MAXREQSIZE (160)
425 /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
426 #define SEQ_MAXREPSIZE (152)
427 #define SEQ_BUFSIZE (1 << 12)
429 /** MGS threads must be >= 3, see bug 22458 comment #28 */
430 #define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
431 #define MGS_NTHRS_MAX 32
434 #define MGS_BUFSIZE (8 * 1024)
435 #define MGS_MAXREQSIZE (7 * 1024)
436 #define MGS_MAXREPSIZE (9 * 1024)
439 * OSS threads constants:
441 * Given 8 as factor and 64 as base threads number
444 * On 8-core server configured to 2 partitions, we will have
445 * 64 + 8 * 4 = 96 threads for each partition, 192 total threads.
448 * On 32-core machine configured to 4 partitions, we will have
449 * 64 + 8 * 8 = 112 threads for each partition, so total threads number
450 * will be 112 * 4 = 448.
453 * On 64-core machine configured to 4 partitions, we will have
454 * 64 + 16 * 8 = 192 threads for each partition, so total threads number
455 * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we
456 * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads
457 * for each partition.
459 * So we can see that with these constants, threads number wil be at the
460 * similar level of old versions, unless the server has many cores.
462 /* depress threads factor for VM with small memory size */
463 #define OSS_THR_FACTOR min_t(int, 8, \
464 NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
465 #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
466 #define OSS_NTHRS_BASE 64
467 #define OSS_NTHRS_MAX 512
469 /* threads for handling "create" request */
470 #define OSS_CR_THR_FACTOR 1
471 #define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT
472 #define OSS_CR_NTHRS_BASE 8
473 #define OSS_CR_NTHRS_MAX 64
476 * OST_IO_MAXREQSIZE ~=
477 * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
478 * DT_MAX_BRW_PAGES * niobuf_remote
480 * - single object with 16 pages is 512 bytes
481 * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
482 * - Must be a multiple of 1024
483 * - actual size is about 18K
485 #define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
486 sizeof(struct ptlrpc_body) + \
487 sizeof(struct obdo) + \
488 sizeof(struct obd_ioobj) + \
489 sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
491 * FIEMAP request can be 4K+ for now
493 #define OST_MAXREQSIZE (16 * 1024)
494 #define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
495 (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
497 #define OST_MAXREPSIZE (9 * 1024)
498 #define OST_IO_MAXREPSIZE OST_MAXREPSIZE
501 /** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
502 #define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
504 * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
505 * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
507 #define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
509 /* Macro to hide a typecast. */
510 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
513 * Structure to single define portal connection.
515 struct ptlrpc_connection {
516 /** linkage for connections hash table */
517 struct hlist_node c_hash;
518 /** Our own lnet nid for this connection */
520 /** Remote side nid for this connection */
521 lnet_process_id_t c_peer;
522 /** UUID of the other side */
523 struct obd_uuid c_remote_uuid;
524 /** reference counter for this connection */
528 /** Client definition for PortalRPC */
529 struct ptlrpc_client {
530 /** What lnet portal does this client send messages to by default */
531 __u32 cli_request_portal;
532 /** What portal do we expect replies on */
533 __u32 cli_reply_portal;
534 /** Name of the client */
538 /** state flags of requests */
539 /* XXX only ones left are those used by the bulk descs as well! */
540 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
541 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
543 #define REQ_MAX_ACK_LOCKS 8
545 union ptlrpc_async_args {
547 * Scratchpad for passing args to completion interpreter. Users
548 * cast to the struct of their choosing, and CLASSERT that this is
549 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
550 * a pointer to it here. The pointer_arg ensures this struct is at
551 * least big enough for that.
553 void *pointer_arg[11];
557 struct ptlrpc_request_set;
558 typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
559 typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
562 * Definition of request set structure.
563 * Request set is a list of requests (not necessary to the same target) that
564 * once populated with RPCs could be sent in parallel.
565 * There are two kinds of request sets. General purpose and with dedicated
566 * serving thread. Example of the latter is ptlrpcd set.
567 * For general purpose sets once request set started sending it is impossible
568 * to add new requests to such set.
569 * Provides a way to call "completion callbacks" when all requests in the set
572 struct ptlrpc_request_set {
573 atomic_t set_refcount;
574 /** number of in queue requests */
575 atomic_t set_new_count;
576 /** number of uncompleted requests */
577 atomic_t set_remaining;
578 /** wait queue to wait on for request events */
579 wait_queue_head_t set_waitq;
580 wait_queue_head_t *set_wakeup_ptr;
581 /** List of requests in the set */
582 struct list_head set_requests;
584 * List of completion callbacks to be called when the set is completed
585 * This is only used if \a set_interpret is NULL.
586 * Links struct ptlrpc_set_cbdata.
588 struct list_head set_cblist;
589 /** Completion callback, if only one. */
590 set_interpreter_func set_interpret;
591 /** opaq argument passed to completion \a set_interpret callback. */
594 * Lock for \a set_new_requests manipulations
595 * locked so that any old caller can communicate requests to
596 * the set holder who can then fold them into the lock-free set
598 spinlock_t set_new_req_lock;
599 /** List of new yet unsent requests. Only used with ptlrpcd now. */
600 struct list_head set_new_requests;
602 /** rq_status of requests that have been freed already */
604 /** Additional fields used by the flow control extension */
605 /** Maximum number of RPCs in flight */
606 int set_max_inflight;
607 /** Callback function used to generate RPCs */
608 set_producer_func set_producer;
609 /** opaq argument passed to the producer callback */
610 void *set_producer_arg;
614 * Description of a single ptrlrpc_set callback
616 struct ptlrpc_set_cbdata {
617 /** List linkage item */
618 struct list_head psc_item;
619 /** Pointer to interpreting function */
620 set_interpreter_func psc_interpret;
621 /** Opaq argument to pass to the callback */
625 struct ptlrpc_bulk_desc;
626 struct ptlrpc_service_part;
627 struct ptlrpc_service;
630 * ptlrpc callback & work item stuff
632 struct ptlrpc_cb_id {
633 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
634 void *cbid_arg; /* additional arg */
637 /** Maximum number of locks to fit into reply state */
638 #define RS_MAX_LOCKS 8
642 * Structure to define reply state on the server
643 * Reply state holds various reply message information. Also for "difficult"
644 * replies (rep-ack case) we store the state after sending reply and wait
645 * for the client to acknowledge the reception. In these cases locks could be
646 * added to the state for replay/failover consistency guarantees.
648 struct ptlrpc_reply_state {
649 /** Callback description */
650 struct ptlrpc_cb_id rs_cb_id;
651 /** Linkage for list of all reply states in a system */
652 struct list_head rs_list;
653 /** Linkage for list of all reply states on same export */
654 struct list_head rs_exp_list;
655 /** Linkage for list of all reply states for same obd */
656 struct list_head rs_obd_list;
658 struct list_head rs_debug_list;
660 /** A spinlock to protect the reply state flags */
662 /** Reply state flags */
663 unsigned long rs_difficult:1; /* ACK/commit stuff */
664 unsigned long rs_no_ack:1; /* no ACK, even for
665 difficult requests */
666 unsigned long rs_scheduled:1; /* being handled? */
667 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
668 unsigned long rs_handled:1; /* been handled yet? */
669 unsigned long rs_on_net:1; /* reply_out_callback pending? */
670 unsigned long rs_prealloc:1; /* rs from prealloc list */
671 unsigned long rs_committed:1;/* the transaction was committed
672 and the rs was dispatched
673 by ptlrpc_commit_replies */
674 /** Size of the state */
678 /** Transaction number */
682 struct obd_export *rs_export;
683 struct ptlrpc_service_part *rs_svcpt;
684 /** Lnet metadata handle for the reply */
685 lnet_handle_md_t rs_md_h;
686 atomic_t rs_refcount;
688 /** Context for the sevice thread */
689 struct ptlrpc_svc_ctx *rs_svc_ctx;
690 /** Reply buffer (actually sent to the client), encoded if needed */
691 struct lustre_msg *rs_repbuf; /* wrapper */
692 /** Size of the reply buffer */
693 int rs_repbuf_len; /* wrapper buf length */
694 /** Size of the reply message */
695 int rs_repdata_len; /* wrapper msg length */
697 * Actual reply message. Its content is encrupted (if needed) to
698 * produce reply buffer for actual sending. In simple case
699 * of no network encryption we jus set \a rs_repbuf to \a rs_msg
701 struct lustre_msg *rs_msg; /* reply message */
703 /** Number of locks awaiting client ACK */
705 /** Handles of locks awaiting client reply ACK */
706 struct lustre_handle rs_locks[RS_MAX_LOCKS];
707 /** Lock modes of locks in \a rs_locks */
708 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
711 struct ptlrpc_thread;
715 RQ_PHASE_NEW = 0xebc0de00,
716 RQ_PHASE_RPC = 0xebc0de01,
717 RQ_PHASE_BULK = 0xebc0de02,
718 RQ_PHASE_INTERPRET = 0xebc0de03,
719 RQ_PHASE_COMPLETE = 0xebc0de04,
720 RQ_PHASE_UNREGISTERING = 0xebc0de05,
721 RQ_PHASE_UNDEFINED = 0xebc0de06
724 /** Type of request interpreter call-back */
725 typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
726 struct ptlrpc_request *req,
730 * Definition of request pool structure.
731 * The pool is used to store empty preallocated requests for the case
732 * when we would actually need to send something without performing
733 * any allocations (to avoid e.g. OOM).
735 struct ptlrpc_request_pool {
736 /** Locks the list */
738 /** list of ptlrpc_request structs */
739 struct list_head prp_req_list;
740 /** Maximum message size that would fit into a rquest from this pool */
742 /** Function to allocate more requests for this pool */
743 void (*prp_populate)(struct ptlrpc_request_pool *, int);
752 * \defgroup nrs Network Request Scheduler
755 struct ptlrpc_nrs_policy;
756 struct ptlrpc_nrs_resource;
757 struct ptlrpc_nrs_request;
760 * NRS control operations.
762 * These are common for all policies.
764 enum ptlrpc_nrs_ctl {
766 * Not a valid opcode.
768 PTLRPC_NRS_CTL_INVALID,
770 * Activate the policy.
772 PTLRPC_NRS_CTL_START,
774 * Reserved for multiple primary policies, which may be a possibility
779 * Policies can start using opcodes from this value and onwards for
780 * their own purposes; the assigned value itself is arbitrary.
782 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
786 * ORR policy operations
789 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
790 NRS_CTL_ORR_WR_QUANTUM,
791 NRS_CTL_ORR_RD_OFF_TYPE,
792 NRS_CTL_ORR_WR_OFF_TYPE,
793 NRS_CTL_ORR_RD_SUPP_REQ,
794 NRS_CTL_ORR_WR_SUPP_REQ,
798 * NRS policy operations.
800 * These determine the behaviour of a policy, and are called in response to
803 struct ptlrpc_nrs_pol_ops {
805 * Called during policy registration; this operation is optional.
807 * \param[in,out] policy The policy being initialized
809 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
811 * Called during policy unregistration; this operation is optional.
813 * \param[in,out] policy The policy being unregistered/finalized
815 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
817 * Called when activating a policy via lprocfs; policies allocate and
818 * initialize their resources here; this operation is optional.
820 * \param[in,out] policy The policy being started
821 * \param[in,out] arg A generic char buffer
823 * \see nrs_policy_start_locked()
825 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy,
828 * Called when deactivating a policy via lprocfs; policies deallocate
829 * their resources here; this operation is optional
831 * \param[in,out] policy The policy being stopped
833 * \see nrs_policy_stop0()
835 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
837 * Used for policy-specific operations; i.e. not generic ones like
838 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
839 * to an ioctl; this operation is optional.
841 * \param[in,out] policy The policy carrying out operation \a opc
842 * \param[in] opc The command operation being carried out
843 * \param[in,out] arg An generic buffer for communication between the
844 * user and the control operation
849 * \see ptlrpc_nrs_policy_control()
851 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
852 enum ptlrpc_nrs_ctl opc, void *arg);
855 * Called when obtaining references to the resources of the resource
856 * hierarchy for a request that has arrived for handling at the PTLRPC
857 * service. Policies should return -ve for requests they do not wish
858 * to handle. This operation is mandatory.
860 * \param[in,out] policy The policy we're getting resources for.
861 * \param[in,out] nrq The request we are getting resources for.
862 * \param[in] parent The parent resource of the resource being
863 * requested; set to NULL if none.
864 * \param[out] resp The resource is to be returned here; the
865 * fallback policy in an NRS head should
866 * \e always return a non-NULL pointer value.
867 * \param[in] moving_req When set, signifies that this is an attempt
868 * to obtain resources for a request being moved
869 * to the high-priority NRS head by
870 * ldlm_lock_reorder_req().
871 * This implies two things:
872 * 1. We are under obd_export::exp_rpc_lock and
873 * so should not sleep.
874 * 2. We should not perform non-idempotent or can
875 * skip performing idempotent operations that
876 * were carried out when resources were first
877 * taken for the request when it was initialized
878 * in ptlrpc_nrs_req_initialize().
880 * \retval 0, +ve The level of the returned resource in the resource
881 * hierarchy; currently only 0 (for a non-leaf resource)
882 * and 1 (for a leaf resource) are supported by the
886 * \see ptlrpc_nrs_req_initialize()
887 * \see ptlrpc_nrs_hpreq_add_nolock()
888 * \see ptlrpc_nrs_req_hp_move()
890 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
891 struct ptlrpc_nrs_request *nrq,
892 const struct ptlrpc_nrs_resource *parent,
893 struct ptlrpc_nrs_resource **resp,
896 * Called when releasing references taken for resources in the resource
897 * hierarchy for the request; this operation is optional.
899 * \param[in,out] policy The policy the resource belongs to
900 * \param[in] res The resource to be freed
902 * \see ptlrpc_nrs_req_finalize()
903 * \see ptlrpc_nrs_hpreq_add_nolock()
904 * \see ptlrpc_nrs_req_hp_move()
906 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
907 const struct ptlrpc_nrs_resource *res);
910 * Obtains a request for handling from the policy, and optionally
911 * removes the request from the policy; this operation is mandatory.
913 * \param[in,out] policy The policy to poll
914 * \param[in] peek When set, signifies that we just want to
915 * examine the request, and not handle it, so the
916 * request is not removed from the policy.
917 * \param[in] force When set, it will force a policy to return a
918 * request if it has one queued.
920 * \retval NULL No request available for handling
921 * \retval valid-pointer The request polled for handling
923 * \see ptlrpc_nrs_req_get_nolock()
925 struct ptlrpc_nrs_request *
926 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
929 * Called when attempting to add a request to a policy for later
930 * handling; this operation is mandatory.
932 * \param[in,out] policy The policy on which to enqueue \a nrq
933 * \param[in,out] nrq The request to enqueue
938 * \see ptlrpc_nrs_req_add_nolock()
940 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
941 struct ptlrpc_nrs_request *nrq);
943 * Removes a request from the policy's set of pending requests. Normally
944 * called after a request has been polled successfully from the policy
945 * for handling; this operation is mandatory.
947 * \param[in,out] policy The policy the request \a nrq belongs to
948 * \param[in,out] nrq The request to dequeue
950 * \see ptlrpc_nrs_req_del_nolock()
952 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
953 struct ptlrpc_nrs_request *nrq);
955 * Called after the request being carried out. Could be used for
956 * job/resource control; this operation is optional.
958 * \param[in,out] policy The policy which is stopping to handle request
960 * \param[in,out] nrq The request
962 * \pre assert_spin_locked(&svcpt->scp_req_lock)
964 * \see ptlrpc_nrs_req_stop_nolock()
966 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
967 struct ptlrpc_nrs_request *nrq);
969 * Registers the policy's lprocfs interface with a PTLRPC service.
971 * \param[in] svc The service
976 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
978 * Unegisters the policy's lprocfs interface with a PTLRPC service.
980 * In cases of failed policy registration in
981 * \e ptlrpc_nrs_policy_register(), this function may be called for a
982 * service which has not registered the policy successfully, so
983 * implementations of this method should make sure their operations are
984 * safe in such cases.
986 * \param[in] svc The service
988 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
994 enum nrs_policy_flags {
996 * Fallback policy, use this flag only on a single supported policy per
997 * service. The flag cannot be used on policies that use
998 * \e PTLRPC_NRS_FL_REG_EXTERN
1000 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
1002 * Start policy immediately after registering.
1004 PTLRPC_NRS_FL_REG_START = (1 << 1),
1006 * This is a policy registering from a module different to the one NRS
1007 * core ships in (currently ptlrpc).
1009 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
1015 * Denotes whether an NRS instance is for handling normal or high-priority
1016 * RPCs, or whether an operation pertains to one or both of the NRS instances
1019 enum ptlrpc_nrs_queue_type {
1020 PTLRPC_NRS_QUEUE_REG = (1 << 0),
1021 PTLRPC_NRS_QUEUE_HP = (1 << 1),
1022 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
1028 * A PTLRPC service has at least one NRS head instance for handling normal
1029 * priority RPCs, and may optionally have a second NRS head instance for
1030 * handling high-priority RPCs. Each NRS head maintains a list of available
1031 * policies, of which one and only one policy is acting as the fallback policy,
1032 * and optionally a different policy may be acting as the primary policy. For
1033 * all RPCs handled by this NRS head instance, NRS core will first attempt to
1034 * enqueue the RPC using the primary policy (if any). The fallback policy is
1035 * used in the following cases:
1036 * - when there was no primary policy in the
1037 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
1039 * - when the primary policy that was at the
1040 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
1041 * RPC was initialized, denoted it did not wish, or for some other reason was
1042 * not able to handle the request, by returning a non-valid NRS resource
1044 * - when the primary policy that was at the
1045 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
1046 * RPC was initialized, fails later during the request enqueueing stage.
1048 * \see nrs_resource_get_safe()
1049 * \see nrs_request_enqueue()
1052 spinlock_t nrs_lock;
1053 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
1055 * List of registered policies
1057 struct list_head nrs_policy_list;
1059 * List of policies with queued requests. Policies that have any
1060 * outstanding requests are queued here, and this list is queried
1061 * in a round-robin manner from NRS core when obtaining a request
1062 * for handling. This ensures that requests from policies that at some
1063 * point transition away from the
1064 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
1066 struct list_head nrs_policy_queued;
1068 * Service partition for this NRS head
1070 struct ptlrpc_service_part *nrs_svcpt;
1072 * Primary policy, which is the preferred policy for handling RPCs
1074 struct ptlrpc_nrs_policy *nrs_policy_primary;
1076 * Fallback policy, which is the backup policy for handling RPCs
1078 struct ptlrpc_nrs_policy *nrs_policy_fallback;
1080 * This NRS head handles either HP or regular requests
1082 enum ptlrpc_nrs_queue_type nrs_queue_type;
1084 * # queued requests from all policies in this NRS head
1086 unsigned long nrs_req_queued;
1088 * # scheduled requests from all policies in this NRS head
1090 unsigned long nrs_req_started;
1092 * # policies on this NRS
1094 unsigned nrs_num_pols;
1096 * This NRS head is in progress of starting a policy
1098 unsigned nrs_policy_starting:1;
1100 * In progress of shutting down the whole NRS head; used during
1103 unsigned nrs_stopping:1;
1105 * NRS policy is throttling reqeust
1107 unsigned nrs_throttling:1;
1110 #define NRS_POL_NAME_MAX 16
1112 struct ptlrpc_nrs_pol_desc;
1115 * Service compatibility predicate; this determines whether a policy is adequate
1116 * for handling RPCs of a particular PTLRPC service.
1118 * XXX:This should give the same result during policy registration and
1119 * unregistration, and for all partitions of a service; so the result should not
1120 * depend on temporal service or other properties, that may influence the
1123 typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
1124 const struct ptlrpc_nrs_pol_desc *desc);
1126 struct ptlrpc_nrs_pol_conf {
1128 * Human-readable policy name
1130 char nc_name[NRS_POL_NAME_MAX];
1132 * NRS operations for this policy
1134 const struct ptlrpc_nrs_pol_ops *nc_ops;
1136 * Service compatibility predicate
1138 nrs_pol_desc_compat_t nc_compat;
1140 * Set for policies that support a single ptlrpc service, i.e. ones that
1141 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
1142 * depicts the name of the single service that such policies are
1145 const char *nc_compat_svc_name;
1147 * Owner module for this policy descriptor; policies registering from a
1148 * different module to the one the NRS framework is held within
1149 * (currently ptlrpc), should set this field to THIS_MODULE.
1151 struct module *nc_owner;
1153 * Policy registration flags; a bitmast of \e nrs_policy_flags
1159 * NRS policy registering descriptor
1161 * Is used to hold a description of a policy that can be passed to NRS core in
1162 * order to register the policy with NRS heads in different PTLRPC services.
1164 struct ptlrpc_nrs_pol_desc {
1166 * Human-readable policy name
1168 char pd_name[NRS_POL_NAME_MAX];
1170 * Link into nrs_core::nrs_policies
1172 struct list_head pd_list;
1174 * NRS operations for this policy
1176 const struct ptlrpc_nrs_pol_ops *pd_ops;
1178 * Service compatibility predicate
1180 nrs_pol_desc_compat_t pd_compat;
1182 * Set for policies that are compatible with only one PTLRPC service.
1184 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
1186 const char *pd_compat_svc_name;
1188 * Owner module for this policy descriptor.
1190 * We need to hold a reference to the module whenever we might make use
1191 * of any of the module's contents, i.e.
1192 * - If one or more instances of the policy are at a state where they
1193 * might be handling a request, i.e.
1194 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
1195 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
1196 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
1197 * is taken on the module when
1198 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
1199 * becomes 0, so that we hold only one reference to the module maximum
1202 * We do not need to hold a reference to the module, even though we
1203 * might use code and data from the module, in the following cases:
1204 * - During external policy registration, because this should happen in
1205 * the module's init() function, in which case the module is safe from
1206 * removal because a reference is being held on the module by the
1207 * kernel, and iirc kmod (and I guess module-init-tools also) will
1208 * serialize any racing processes properly anyway.
1209 * - During external policy unregistration, because this should happen
1210 * in a module's exit() function, and any attempts to start a policy
1211 * instance would need to take a reference on the module, and this is
1212 * not possible once we have reached the point where the exit()
1213 * handler is called.
1214 * - During service registration and unregistration, as service setup
1215 * and cleanup, and policy registration, unregistration and policy
1216 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
1217 * as long as users adhere to the convention of registering policies
1218 * in init() and unregistering them in module exit() functions, there
1219 * should not be a race between these operations.
1220 * - During any policy-specific lprocfs operations, because a reference
1221 * is held by the kernel on a proc entry that has been entered by a
1222 * syscall, so as long as proc entries are removed during unregistration time,
1223 * then unregistration and lprocfs operations will be properly
1226 struct module *pd_owner;
1228 * Bitmask of \e nrs_policy_flags
1232 * # of references on this descriptor
1240 * Policies transition from one state to the other during their lifetime
1242 enum ptlrpc_nrs_pol_state {
1244 * Not a valid policy state.
1246 NRS_POL_STATE_INVALID,
1248 * Policies are at this state either at the start of their life, or
1249 * transition here when the user selects a different policy to act
1250 * as the primary one.
1252 NRS_POL_STATE_STOPPED,
1254 * Policy is progress of stopping
1256 NRS_POL_STATE_STOPPING,
1258 * Policy is in progress of starting
1260 NRS_POL_STATE_STARTING,
1262 * A policy is in this state in two cases:
1263 * - it is the fallback policy, which is always in this state.
1264 * - it has been activated by the user; i.e. it is the primary policy,
1266 NRS_POL_STATE_STARTED,
1270 * NRS policy information
1272 * Used for obtaining information for the status of a policy via lprocfs
1274 struct ptlrpc_nrs_pol_info {
1278 char pi_name[NRS_POL_NAME_MAX];
1280 * Current policy state
1282 enum ptlrpc_nrs_pol_state pi_state;
1284 * # RPCs enqueued for later dispatching by the policy
1288 * # RPCs started for dispatch by the policy
1290 long pi_req_started;
1292 * Is this a fallback policy?
1294 unsigned pi_fallback:1;
1300 * There is one instance of this for each policy in each NRS head of each
1301 * PTLRPC service partition.
1303 struct ptlrpc_nrs_policy {
1305 * Linkage into the NRS head's list of policies,
1306 * ptlrpc_nrs:nrs_policy_list
1308 struct list_head pol_list;
1310 * Linkage into the NRS head's list of policies with enqueued
1311 * requests ptlrpc_nrs:nrs_policy_queued
1313 struct list_head pol_list_queued;
1315 * Current state of this policy
1317 enum ptlrpc_nrs_pol_state pol_state;
1319 * Bitmask of nrs_policy_flags
1323 * # RPCs enqueued for later dispatching by the policy
1325 long pol_req_queued;
1327 * # RPCs started for dispatch by the policy
1329 long pol_req_started;
1331 * Usage Reference count taken on the policy instance
1335 * The NRS head this policy has been created at
1337 struct ptlrpc_nrs *pol_nrs;
1339 * Private policy data; varies by policy type
1343 * Policy descriptor for this policy instance.
1345 struct ptlrpc_nrs_pol_desc *pol_desc;
1351 * Resources are embedded into two types of NRS entities:
1352 * - Inside NRS policies, in the policy's private data in
1353 * ptlrpc_nrs_policy::pol_private
1354 * - In objects that act as prime-level scheduling entities in different NRS
1355 * policies; e.g. on a policy that performs round robin or similar order
1356 * scheduling across client NIDs, there would be one NRS resource per unique
1357 * client NID. On a policy which performs round robin scheduling across
1358 * backend filesystem objects, there would be one resource associated with
1359 * each of the backend filesystem objects partaking in the scheduling
1360 * performed by the policy.
1362 * NRS resources share a parent-child relationship, in which resources embedded
1363 * in policy instances are the parent entities, with all scheduling entities
1364 * a policy schedules across being the children, thus forming a simple resource
1365 * hierarchy. This hierarchy may be extended with one or more levels in the
1366 * future if the ability to have more than one primary policy is added.
1368 * Upon request initialization, references to the then active NRS policies are
1369 * taken and used to later handle the dispatching of the request with one of
1372 * \see nrs_resource_get_safe()
1373 * \see ptlrpc_nrs_req_add()
1375 struct ptlrpc_nrs_resource {
1377 * This NRS resource's parent; is NULL for resources embedded in NRS
1378 * policy instances; i.e. those are top-level ones.
1380 struct ptlrpc_nrs_resource *res_parent;
1382 * The policy associated with this resource.
1384 struct ptlrpc_nrs_policy *res_policy;
1397 * This policy is a logical wrapper around previous, non-NRS functionality.
1398 * It dispatches RPCs in the same order as they arrive from the network. This
1399 * policy is currently used as the fallback policy, and the only enabled policy
1400 * on all NRS heads of all PTLRPC service partitions.
1405 * Private data structure for the FIFO policy
1407 struct nrs_fifo_head {
1409 * Resource object for policy instance.
1411 struct ptlrpc_nrs_resource fh_res;
1413 * List of queued requests.
1415 struct list_head fh_list;
1417 * For debugging purposes.
1422 struct nrs_fifo_req {
1423 struct list_head fr_list;
1432 * CRR-N, Client Round Robin over NIDs
1437 * private data structure for CRR-N NRS
1439 struct nrs_crrn_net {
1440 struct ptlrpc_nrs_resource cn_res;
1441 cfs_binheap_t *cn_binheap;
1442 cfs_hash_t *cn_cli_hash;
1444 * Used when a new scheduling round commences, in order to synchronize
1445 * all clients with the new round number.
1449 * Determines the relevant ordering amongst request batches within a
1454 * Round Robin quantum; the maximum number of RPCs that each request
1455 * batch for each client can have in a scheduling round.
1461 * Object representing a client in CRR-N, as identified by its NID
1463 struct nrs_crrn_client {
1464 struct ptlrpc_nrs_resource cc_res;
1465 struct hlist_node cc_hnode;
1468 * The round number against which this client is currently scheduling
1473 * The sequence number used for requests scheduled by this client during
1474 * the current round number.
1479 * Round Robin quantum; the maximum number of RPCs the client is allowed
1480 * to schedule in a single batch of each round.
1484 * # of pending requests for this client, on all existing rounds
1490 * CRR-N NRS request definition
1492 struct nrs_crrn_req {
1494 * Round number for this request; shared with all other requests in the
1499 * Sequence number for this request; shared with all other requests in
1506 * CRR-N policy operations.
1510 * Read the RR quantum size of a CRR-N policy.
1512 NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
1514 * Write the RR quantum size of a CRR-N policy.
1516 NRS_CTL_CRRN_WR_QUANTUM,
1524 * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
1529 * Lower and upper byte offsets of a brw RPC
1531 struct nrs_orr_req_range {
1537 * RPC types supported by the ORR/TRR policies
1540 NOS_OST_READ = (1 << 0),
1541 NOS_OST_WRITE = (1 << 1),
1542 NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE),
1544 * Default value for policies.
1546 NOS_DFLT = NOS_OST_READ
1550 * As unique keys for grouping RPCs together, we use the object's OST FID for
1551 * the ORR policy, and the OST index for the TRR policy.
1553 * XXX: We waste some space for TRR policy instances by using a union, but it
1554 * allows to consolidate some of the code between ORR and TRR, and these
1555 * policies will probably eventually merge into one anyway.
1557 struct nrs_orr_key {
1559 /** object FID for ORR */
1560 struct lu_fid ok_fid;
1561 /** OST index for TRR */
1567 * The largest base string for unique hash/slab object names is
1568 * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT
1569 * id number, so this _should_ be more than enough for the maximum number of
1570 * CPTs on any system. If it does happen that this statement is incorrect,
1571 * nrs_orr_genobjname() will inevitably yield a non-unique name and cause
1572 * kmem_cache_create() to complain (on Linux), so the erroneous situation
1573 * will hopefully not go unnoticed.
1575 #define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
1578 * private data structure for ORR and TRR NRS
1580 struct nrs_orr_data {
1581 struct ptlrpc_nrs_resource od_res;
1582 cfs_binheap_t *od_binheap;
1583 cfs_hash_t *od_obj_hash;
1584 struct kmem_cache *od_cache;
1586 * Used when a new scheduling round commences, in order to synchronize
1587 * all object or OST batches with the new round number.
1591 * Determines the relevant ordering amongst request batches within a
1596 * RPC types that are currently supported.
1598 enum nrs_orr_supp od_supp;
1600 * Round Robin quantum; the maxium number of RPCs that each request
1601 * batch for each object or OST can have in a scheduling round.
1605 * Whether to use physical disk offsets or logical file offsets.
1609 * XXX: We need to provide a persistently allocated string to hold
1610 * unique object names for this policy, since in currently supported
1611 * versions of Linux by Lustre, kmem_cache_create() just sets a pointer
1612 * to the name string provided. kstrdup() is used in the version of
1613 * kmeme_cache_create() in current Linux mainline, so we may be able to
1614 * remove this in the future.
1616 char od_objname[NRS_ORR_OBJ_NAME_MAX];
1620 * Represents a backend-fs object or OST in the ORR and TRR policies
1623 struct nrs_orr_object {
1624 struct ptlrpc_nrs_resource oo_res;
1625 struct hlist_node oo_hnode;
1627 * The round number against which requests are being scheduled for this
1632 * The sequence number used for requests scheduled for this object or
1633 * OST during the current round number.
1637 * The key of the object or OST for which this structure instance is
1640 struct nrs_orr_key oo_key;
1643 * Round Robin quantum; the maximum number of RPCs that are allowed to
1644 * be scheduled for the object or OST in a single batch of each round.
1648 * # of pending requests for this object or OST, on all existing rounds
1654 * ORR/TRR NRS request definition
1656 struct nrs_orr_req {
1658 * The offset range this request covers
1660 struct nrs_orr_req_range or_range;
1662 * Round number for this request; shared with all other requests in the
1667 * Sequence number for this request; shared with all other requests in
1672 * For debugging purposes.
1674 struct nrs_orr_key or_key;
1676 * An ORR policy instance has filled in request information while
1677 * enqueueing the request on the service partition's regular NRS head.
1679 unsigned int or_orr_set:1;
1681 * A TRR policy instance has filled in request information while
1682 * enqueueing the request on the service partition's regular NRS head.
1684 unsigned int or_trr_set:1;
1686 * Request offset ranges have been filled in with logical offset
1689 unsigned int or_logical_set:1;
1691 * Request offset ranges have been filled in with physical offset
1694 unsigned int or_physical_set:1;
1699 #include <lustre_nrs_tbf.h>
1704 * Instances of this object exist embedded within ptlrpc_request; the main
1705 * purpose of this object is to hold references to the request's resources
1706 * for the lifetime of the request, and to hold properties that policies use
1707 * use for determining the request's scheduling priority.
1709 struct ptlrpc_nrs_request {
1711 * The request's resource hierarchy.
1713 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1715 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1716 * policy that was used to enqueue the request.
1718 * \see nrs_request_enqueue()
1720 unsigned nr_res_idx;
1721 unsigned nr_initialized:1;
1722 unsigned nr_enqueued:1;
1723 unsigned nr_started:1;
1724 unsigned nr_finalized:1;
1725 cfs_binheap_node_t nr_node;
1728 * Policy-specific fields, used for determining a request's scheduling
1729 * priority, and other supporting functionality.
1733 * Fields for the FIFO policy
1735 struct nrs_fifo_req fifo;
1737 * CRR-N request defintion
1739 struct nrs_crrn_req crr;
1740 /** ORR and TRR share the same request definition */
1741 struct nrs_orr_req orr;
1743 * TBF request definition
1745 struct nrs_tbf_req tbf;
1748 * Externally-registering policies may want to use this to allocate
1749 * their own request properties.
1757 * Basic request prioritization operations structure.
1758 * The whole idea is centered around locks and RPCs that might affect locks.
1759 * When a lock is contended we try to give priority to RPCs that might lead
1760 * to fastest release of that lock.
1761 * Currently only implemented for OSTs only in a way that makes all
1762 * IO and truncate RPCs that are coming from a locked region where a lock is
1763 * contended a priority over other requests.
1765 struct ptlrpc_hpreq_ops {
1767 * Check if the lock handle of the given lock is the same as
1768 * taken from the request.
1770 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1772 * Check if the request is a high priority one.
1774 int (*hpreq_check)(struct ptlrpc_request *);
1776 * Called after the request has been handled.
1778 void (*hpreq_fini)(struct ptlrpc_request *);
1782 * Represents remote procedure call.
1784 * This is a staple structure used by everybody wanting to send a request
1787 struct ptlrpc_request {
1788 /* Request type: one of PTL_RPC_MSG_* */
1790 /** Result of request processing */
1793 * Linkage item through which this request is included into
1794 * sending/delayed lists on client and into rqbd list on server
1796 struct list_head rq_list;
1798 * Server side list of incoming unserved requests sorted by arrival
1799 * time. Traversed from time to time to notice about to expire
1800 * requests and sent back "early replies" to clients to let them
1801 * know server is alive and well, just very busy to service their
1804 struct list_head rq_timed_list;
1805 /** server-side history, used for debuging purposes. */
1806 struct list_head rq_history_list;
1807 /** server-side per-export list */
1808 struct list_head rq_exp_list;
1809 /** server-side hp handlers */
1810 struct ptlrpc_hpreq_ops *rq_ops;
1812 /** initial thread servicing this request */
1813 struct ptlrpc_thread *rq_svc_thread;
1815 /** history sequence # */
1816 __u64 rq_history_seq;
1820 /** stub for NRS request */
1821 struct ptlrpc_nrs_request rq_nrq;
1823 /** the index of service's srv_at_array into which request is linked */
1825 /** Lock to protect request flags and some other important bits, like
1829 /** client-side flags are serialized by rq_lock */
1830 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1831 rq_timedout:1, rq_resend:1, rq_restart:1,
1833 * when ->rq_replay is set, request is kept by the client even
1834 * after server commits corresponding transaction. This is
1835 * used for operations that require sequence of multiple
1836 * requests to be replayed. The only example currently is file
1837 * open/close. When last request in such a sequence is
1838 * committed, ->rq_replay is cleared on all requests in the
1842 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1843 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
1845 rq_req_unlink:1, rq_reply_unlink:1,
1846 rq_memalloc:1, /* req originated from "kswapd" */
1847 /* server-side flags */
1848 rq_packed_final:1, /* packed final reply */
1849 rq_hp:1, /* high priority RPC */
1850 rq_at_linked:1, /* link into service's srv_at_array */
1851 rq_reply_truncate:1,
1853 /* whether the "rq_set" is a valid one */
1855 rq_generation_set:1,
1856 /* do not resend request on -EINPROGRESS */
1857 rq_no_retry_einprogress:1,
1858 /* allow the req to be sent if the import is in recovery
1861 /* bulk request, sent to server, but uncommitted */
1864 unsigned int rq_nr_resend;
1866 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1867 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1868 atomic_t rq_refcount;/* client-side refcount for SENT race,
1869 server-side refcounf for multiple replies */
1871 /** Portal to which this request would be sent */
1872 short rq_request_portal; /* XXX FIXME bug 249 */
1873 /** Portal where to wait for reply and where reply would be sent */
1874 short rq_reply_portal; /* XXX FIXME bug 249 */
1878 * !rq_truncate : # reply bytes actually received,
1879 * rq_truncate : required repbuf_len for resend
1881 int rq_nob_received;
1882 /** Request length */
1886 /** Request message - what client sent */
1887 struct lustre_msg *rq_reqmsg;
1888 /** Reply message - server response */
1889 struct lustre_msg *rq_repmsg;
1890 /** Transaction number */
1895 * List item to for replay list. Not yet commited requests get linked
1897 * Also see \a rq_replay comment above.
1899 struct list_head rq_replay_list;
1902 * security and encryption data
1904 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1905 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1906 struct list_head rq_ctx_chain; /**< link to waited ctx */
1908 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1909 enum lustre_sec_part rq_sp_from;
1911 /* client/server security flags */
1913 rq_ctx_init:1, /* context initiation */
1914 rq_ctx_fini:1, /* context destroy */
1915 rq_bulk_read:1, /* request bulk read */
1916 rq_bulk_write:1, /* request bulk write */
1917 /* server authentication flags */
1918 rq_auth_gss:1, /* authenticated by gss */
1919 rq_auth_remote:1, /* authed as remote user */
1920 rq_auth_usr_root:1, /* authed as root */
1921 rq_auth_usr_mdt:1, /* authed as mdt */
1922 rq_auth_usr_ost:1, /* authed as ost */
1923 /* security tfm flags */
1926 /* doesn't expect reply FIXME */
1928 rq_pill_init:1; /* pill initialized */
1930 uid_t rq_auth_uid; /* authed uid */
1931 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1933 /* (server side), pointed directly into req buffer */
1934 struct ptlrpc_user_desc *rq_user_desc;
1936 /* various buffer pointers */
1937 struct lustre_msg *rq_reqbuf; /* req wrapper */
1938 char *rq_repbuf; /* rep buffer */
1939 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1940 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1941 int rq_reqbuf_len; /* req wrapper buf len */
1942 int rq_reqdata_len; /* req wrapper msg len */
1943 int rq_repbuf_len; /* rep buffer len */
1944 int rq_repdata_len; /* rep wrapper msg len */
1945 int rq_clrbuf_len; /* only in priv mode */
1946 int rq_clrdata_len; /* only in priv mode */
1948 /** early replies go to offset 0, regular replies go after that */
1949 unsigned int rq_reply_off;
1953 /** Fields that help to see if request and reply were swabbed or not */
1954 __u32 rq_req_swab_mask;
1955 __u32 rq_rep_swab_mask;
1957 /** What was import generation when this request was sent */
1958 int rq_import_generation;
1959 enum lustre_imp_state rq_send_state;
1961 /** how many early replies (for stats) */
1964 /** client+server request */
1965 lnet_handle_md_t rq_req_md_h;
1966 struct ptlrpc_cb_id rq_req_cbid;
1967 /** optional time limit for send attempts */
1968 cfs_duration_t rq_delay_limit;
1969 /** time request was first queued */
1970 cfs_time_t rq_queued_time;
1972 /* server-side... */
1973 /** request arrival time */
1974 struct timeval rq_arrival_time;
1975 /** separated reply state */
1976 struct ptlrpc_reply_state *rq_reply_state;
1977 /** incoming request buffer */
1978 struct ptlrpc_request_buffer_desc *rq_rqbd;
1980 /** client-only incoming reply */
1981 lnet_handle_md_t rq_reply_md_h;
1982 wait_queue_head_t rq_reply_waitq;
1983 struct ptlrpc_cb_id rq_reply_cbid;
1987 /** Peer description (the other side) */
1988 lnet_process_id_t rq_peer;
1989 /** Server-side, export on which request was received */
1990 struct obd_export *rq_export;
1991 /** Client side, import where request is being sent */
1992 struct obd_import *rq_import;
1994 /** Replay callback, called after request is replayed at recovery */
1995 void (*rq_replay_cb)(struct ptlrpc_request *);
1997 * Commit callback, called when request is committed and about to be
2000 void (*rq_commit_cb)(struct ptlrpc_request *);
2001 /** Opaq data for replay and commit callbacks. */
2004 /** For bulk requests on client only: bulk descriptor */
2005 struct ptlrpc_bulk_desc *rq_bulk;
2007 /** client outgoing req */
2009 * when request/reply sent (secs), or time when request should be sent
2012 /** time for request really sent out */
2013 time_t rq_real_sent;
2015 /** when request must finish. volatile
2016 * so that servers' early reply updates to the deadline aren't
2017 * kept in per-cpu cache */
2018 volatile time_t rq_deadline;
2019 /** when req reply unlink must finish. */
2020 time_t rq_reply_deadline;
2021 /** when req bulk unlink must finish. */
2022 time_t rq_bulk_deadline;
2024 * service time estimate (secs)
2025 * If the requestsis not served by this time, it is marked as timed out.
2029 /** Multi-rpc bits */
2030 /** Per-request waitq introduced by bug 21938 for recovery waiting */
2031 wait_queue_head_t rq_set_waitq;
2032 /** Link item for request set lists */
2033 struct list_head rq_set_chain;
2034 /** Link back to the request set */
2035 struct ptlrpc_request_set *rq_set;
2036 /** Async completion handler, called when reply is received */
2037 ptlrpc_interpterer_t rq_interpret_reply;
2038 /** Async completion context */
2039 union ptlrpc_async_args rq_async_args;
2041 /** Pool if request is from preallocated list */
2042 struct ptlrpc_request_pool *rq_pool;
2044 struct lu_context rq_session;
2046 /** request format description */
2047 struct req_capsule rq_pill;
2051 * Call completion handler for rpc if any, return it's status or original
2052 * rc if there was no handler defined for this request.
2054 static inline int ptlrpc_req_interpret(const struct lu_env *env,
2055 struct ptlrpc_request *req, int rc)
2057 if (req->rq_interpret_reply != NULL) {
2058 req->rq_status = req->rq_interpret_reply(env, req,
2059 &req->rq_async_args,
2061 return req->rq_status;
2069 int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
2070 int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
2071 void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
2072 void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
2073 struct ptlrpc_nrs_pol_info *info);
2076 * Can the request be moved from the regular NRS head to the high-priority NRS
2077 * head (of the same PTLRPC service partition), if any?
2079 * For a reliable result, this should be checked under svcpt->scp_req lock.
2081 static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
2083 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
2086 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
2087 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
2088 * to make sure it has not been scheduled yet (analogous to previous
2089 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
2091 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
2096 * Returns 1 if request buffer at offset \a index was already swabbed
2098 static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
2100 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
2101 return req->rq_req_swab_mask & (1 << index);
2105 * Returns 1 if request reply buffer at offset \a index was already swabbed
2107 static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
2109 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
2110 return req->rq_rep_swab_mask & (1 << index);
2114 * Returns 1 if request needs to be swabbed into local cpu byteorder
2116 static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
2118 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2122 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
2124 static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
2126 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2130 * Mark request buffer at offset \a index that it was already swabbed
2132 static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
2135 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
2136 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
2137 req->rq_req_swab_mask |= 1 << index;
2141 * Mark request reply buffer at offset \a index that it was already swabbed
2143 static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
2146 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
2147 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
2148 req->rq_rep_swab_mask |= 1 << index;
2152 * Convert numerical request phase value \a phase into text string description
2154 static inline const char *
2155 ptlrpc_phase2str(enum rq_phase phase)
2164 case RQ_PHASE_INTERPRET:
2166 case RQ_PHASE_COMPLETE:
2168 case RQ_PHASE_UNREGISTERING:
2169 return "Unregistering";
2176 * Convert numerical request phase of the request \a req into text stringi
2179 static inline const char *
2180 ptlrpc_rqphase2str(struct ptlrpc_request *req)
2182 return ptlrpc_phase2str(req->rq_phase);
2186 * Debugging functions and helpers to print request structure into debug log
2189 /* Spare the preprocessor, spoil the bugs. */
2190 #define FLAG(field, str) (field ? str : "")
2192 /** Convert bit flags into a string */
2193 #define DEBUG_REQ_FLAGS(req) \
2194 ptlrpc_rqphase2str(req), \
2195 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
2196 FLAG(req->rq_err, "E"), \
2197 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
2198 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
2199 FLAG(req->rq_no_resend, "N"), \
2200 FLAG(req->rq_waiting, "W"), \
2201 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
2202 FLAG(req->rq_committed, "M")
2204 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
2206 void _debug_req(struct ptlrpc_request *req,
2207 struct libcfs_debug_msg_data *data, const char *fmt, ...)
2208 __attribute__ ((format (printf, 3, 4)));
2211 * Helper that decides if we need to print request accordig to current debug
2214 #define debug_req(msgdata, mask, cdls, req, fmt, a...) \
2216 CFS_CHECK_STACK(msgdata, mask, cdls); \
2218 if (((mask) & D_CANTMASK) != 0 || \
2219 ((libcfs_debug & (mask)) != 0 && \
2220 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
2221 _debug_req((req), msgdata, fmt, ##a); \
2225 * This is the debug print function you need to use to print request sturucture
2226 * content into lustre debug log.
2227 * for most callers (level is a constant) this is resolved at compile time */
2228 #define DEBUG_REQ(level, req, fmt, args...) \
2230 if ((level) & (D_ERROR | D_WARNING)) { \
2231 static cfs_debug_limit_state_t cdls; \
2232 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
2233 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
2235 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
2236 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
2242 * Structure that defines a single page of a bulk transfer
2244 struct ptlrpc_bulk_page {
2245 /** Linkage to list of pages in a bulk */
2246 struct list_head bp_link;
2248 * Number of bytes in a page to transfer starting from \a bp_pageoffset
2251 /** offset within a page */
2253 /** The page itself */
2254 struct page *bp_page;
2257 #define BULK_GET_SOURCE 0
2258 #define BULK_PUT_SINK 1
2259 #define BULK_GET_SINK 2
2260 #define BULK_PUT_SOURCE 3
2263 * Definition of bulk descriptor.
2264 * Bulks are special "Two phase" RPCs where initial request message
2265 * is sent first and it is followed bt a transfer (o receiving) of a large
2266 * amount of data to be settled into pages referenced from the bulk descriptors.
2267 * Bulks transfers (the actual data following the small requests) are done
2268 * on separate LNet portals.
2269 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
2270 * Another user is readpage for MDT.
2272 struct ptlrpc_bulk_desc {
2273 /** completed with failure */
2274 unsigned long bd_failure:1;
2275 /** {put,get}{source,sink} */
2276 unsigned long bd_type:2;
2278 unsigned long bd_registered:1;
2279 /** For serialization with callback */
2281 /** Import generation when request for this bulk was sent */
2282 int bd_import_generation;
2283 /** LNet portal for this bulk */
2285 /** Server side - export this bulk created for */
2286 struct obd_export *bd_export;
2287 /** Client side - import this bulk was sent on */
2288 struct obd_import *bd_import;
2289 /** Back pointer to the request */
2290 struct ptlrpc_request *bd_req;
2291 wait_queue_head_t bd_waitq; /* server side only WQ */
2292 int bd_iov_count; /* # entries in bd_iov */
2293 int bd_max_iov; /* allocated size of bd_iov */
2294 int bd_nob; /* # bytes covered */
2295 int bd_nob_transferred; /* # bytes GOT/PUT */
2299 struct ptlrpc_cb_id bd_cbid; /* network callback info */
2300 lnet_nid_t bd_sender; /* stash event::sender */
2301 int bd_md_count; /* # valid entries in bd_mds */
2302 int bd_md_max_brw; /* max entries in bd_mds */
2303 /** array of associated MDs */
2304 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
2306 #if defined(__KERNEL__)
2308 * encrypt iov, size is either 0 or bd_iov_count.
2310 lnet_kiov_t *bd_enc_iov;
2312 lnet_kiov_t bd_iov[0];
2314 lnet_md_iovec_t bd_iov[0];
2319 SVC_STOPPED = 1 << 0,
2320 SVC_STOPPING = 1 << 1,
2321 SVC_STARTING = 1 << 2,
2322 SVC_RUNNING = 1 << 3,
2324 SVC_SIGNAL = 1 << 5,
2327 #define PTLRPC_THR_NAME_LEN 32
2329 * Definition of server service thread structure
2331 struct ptlrpc_thread {
2333 * List of active threads in svc->srv_threads
2335 struct list_head t_link;
2337 * thread-private data (preallocated memory)
2342 * service thread index, from ptlrpc_start_threads
2346 * service thread pid
2350 * put watchdog in the structure per thread b=14840
2352 struct lc_watchdog *t_watchdog;
2354 * the svc this thread belonged to b=18582
2356 struct ptlrpc_service_part *t_svcpt;
2357 wait_queue_head_t t_ctl_waitq;
2358 struct lu_env *t_env;
2359 char t_name[PTLRPC_THR_NAME_LEN];
2362 static inline int thread_is_init(struct ptlrpc_thread *thread)
2364 return thread->t_flags == 0;
2367 static inline int thread_is_stopped(struct ptlrpc_thread *thread)
2369 return !!(thread->t_flags & SVC_STOPPED);
2372 static inline int thread_is_stopping(struct ptlrpc_thread *thread)
2374 return !!(thread->t_flags & SVC_STOPPING);
2377 static inline int thread_is_starting(struct ptlrpc_thread *thread)
2379 return !!(thread->t_flags & SVC_STARTING);
2382 static inline int thread_is_running(struct ptlrpc_thread *thread)
2384 return !!(thread->t_flags & SVC_RUNNING);
2387 static inline int thread_is_event(struct ptlrpc_thread *thread)
2389 return !!(thread->t_flags & SVC_EVENT);
2392 static inline int thread_is_signal(struct ptlrpc_thread *thread)
2394 return !!(thread->t_flags & SVC_SIGNAL);
2397 static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
2399 thread->t_flags &= ~flags;
2402 static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
2404 thread->t_flags = flags;
2407 static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
2409 thread->t_flags |= flags;
2412 static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
2415 if (thread->t_flags & flags) {
2416 thread->t_flags &= ~flags;
2423 * Request buffer descriptor structure.
2424 * This is a structure that contains one posted request buffer for service.
2425 * Once data land into a buffer, event callback creates actual request and
2426 * notifies wakes one of the service threads to process new incoming request.
2427 * More than one request can fit into the buffer.
2429 struct ptlrpc_request_buffer_desc {
2430 /** Link item for rqbds on a service */
2431 struct list_head rqbd_list;
2432 /** History of requests for this buffer */
2433 struct list_head rqbd_reqs;
2434 /** Back pointer to service for which this buffer is registered */
2435 struct ptlrpc_service_part *rqbd_svcpt;
2436 /** LNet descriptor */
2437 lnet_handle_md_t rqbd_md_h;
2439 /** The buffer itself */
2441 struct ptlrpc_cb_id rqbd_cbid;
2443 * This "embedded" request structure is only used for the
2444 * last request to fit into the buffer
2446 struct ptlrpc_request rqbd_req;
2449 typedef int (*svc_handler_t)(struct ptlrpc_request *req);
2451 struct ptlrpc_service_ops {
2453 * if non-NULL called during thread creation (ptlrpc_start_thread())
2454 * to initialize service specific per-thread state.
2456 int (*so_thr_init)(struct ptlrpc_thread *thr);
2458 * if non-NULL called during thread shutdown (ptlrpc_main()) to
2459 * destruct state created by ->srv_init().
2461 void (*so_thr_done)(struct ptlrpc_thread *thr);
2463 * Handler function for incoming requests for this service
2465 int (*so_req_handler)(struct ptlrpc_request *req);
2467 * function to determine priority of the request, it's called
2468 * on every new request
2470 int (*so_hpreq_handler)(struct ptlrpc_request *);
2472 * service-specific print fn
2474 void (*so_req_printer)(void *, struct ptlrpc_request *);
2477 #ifndef __cfs_cacheline_aligned
2478 /* NB: put it here for reducing patche dependence */
2479 # define __cfs_cacheline_aligned
2483 * How many high priority requests to serve before serving one normal
2486 #define PTLRPC_SVC_HP_RATIO 10
2489 * Definition of PortalRPC service.
2490 * The service is listening on a particular portal (like tcp port)
2491 * and perform actions for a specific server like IO service for OST
2492 * or general metadata service for MDS.
2494 struct ptlrpc_service {
2495 /** serialize /proc operations */
2496 spinlock_t srv_lock;
2497 /** most often accessed fields */
2498 /** chain thru all services */
2499 struct list_head srv_list;
2500 /** service operations table */
2501 struct ptlrpc_service_ops srv_ops;
2502 /** only statically allocated strings here; we don't clean them */
2504 /** only statically allocated strings here; we don't clean them */
2505 char *srv_thread_name;
2506 /** service thread list */
2507 struct list_head srv_threads;
2508 /** threads # should be created for each partition on initializing */
2509 int srv_nthrs_cpt_init;
2510 /** limit of threads number for each partition */
2511 int srv_nthrs_cpt_limit;
2512 /** Root of /proc dir tree for this service */
2513 cfs_proc_dir_entry_t *srv_procroot;
2514 /** Pointer to statistic data for this service */
2515 struct lprocfs_stats *srv_stats;
2516 /** # hp per lp reqs to handle */
2517 int srv_hpreq_ratio;
2518 /** biggest request to receive */
2519 int srv_max_req_size;
2520 /** biggest reply to send */
2521 int srv_max_reply_size;
2522 /** size of individual buffers */
2524 /** # buffers to allocate in 1 group */
2525 int srv_nbuf_per_group;
2526 /** Local portal on which to receive requests */
2527 __u32 srv_req_portal;
2528 /** Portal on the client to send replies to */
2529 __u32 srv_rep_portal;
2531 * Tags for lu_context associated with this thread, see struct
2535 /** soft watchdog timeout multiplier */
2536 int srv_watchdog_factor;
2537 /** under unregister_service */
2538 unsigned srv_is_stopping:1;
2540 /** max # request buffers in history per partition */
2541 int srv_hist_nrqbds_cpt_max;
2542 /** number of CPTs this service bound on */
2544 /** CPTs array this service bound on */
2546 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2548 /** CPT table this service is running over */
2549 struct cfs_cpt_table *srv_cptable;
2551 * partition data for ptlrpc service
2553 struct ptlrpc_service_part *srv_parts[0];
2557 * Definition of PortalRPC service partition data.
2558 * Although a service only has one instance of it right now, but we
2559 * will have multiple instances very soon (instance per CPT).
2561 * it has four locks:
2563 * serialize operations on rqbd and requests waiting for preprocess
2565 * serialize operations active requests sent to this portal
2567 * serialize adaptive timeout stuff
2569 * serialize operations on RS list (reply states)
2571 * We don't have any use-case to take two or more locks at the same time
2572 * for now, so there is no lock order issue.
2574 struct ptlrpc_service_part {
2575 /** back reference to owner */
2576 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2577 /* CPT id, reserved */
2579 /** always increasing number */
2581 /** # of starting threads */
2582 int scp_nthrs_starting;
2583 /** # of stopping threads, reserved for shrinking threads */
2584 int scp_nthrs_stopping;
2585 /** # running threads */
2586 int scp_nthrs_running;
2587 /** service threads list */
2588 struct list_head scp_threads;
2591 * serialize the following fields, used for protecting
2592 * rqbd list and incoming requests waiting for preprocess,
2593 * threads starting & stopping are also protected by this lock.
2595 spinlock_t scp_lock __cfs_cacheline_aligned;
2596 /** total # req buffer descs allocated */
2597 int scp_nrqbds_total;
2598 /** # posted request buffers for receiving */
2599 int scp_nrqbds_posted;
2600 /** in progress of allocating rqbd */
2601 int scp_rqbd_allocating;
2602 /** # incoming reqs */
2603 int scp_nreqs_incoming;
2604 /** request buffers to be reposted */
2605 struct list_head scp_rqbd_idle;
2606 /** req buffers receiving */
2607 struct list_head scp_rqbd_posted;
2608 /** incoming reqs */
2609 struct list_head scp_req_incoming;
2610 /** timeout before re-posting reqs, in tick */
2611 cfs_duration_t scp_rqbd_timeout;
2613 * all threads sleep on this. This wait-queue is signalled when new
2614 * incoming request arrives and when difficult reply has to be handled.
2616 wait_queue_head_t scp_waitq;
2618 /** request history */
2619 struct list_head scp_hist_reqs;
2620 /** request buffer history */
2621 struct list_head scp_hist_rqbds;
2622 /** # request buffers in history */
2623 int scp_hist_nrqbds;
2624 /** sequence number for request */
2626 /** highest seq culled from history */
2627 __u64 scp_hist_seq_culled;
2630 * serialize the following fields, used for processing requests
2631 * sent to this portal
2633 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2634 /** # reqs in either of the NRS heads below */
2635 /** # reqs being served */
2636 int scp_nreqs_active;
2637 /** # HPreqs being served */
2638 int scp_nhreqs_active;
2639 /** # hp requests handled */
2642 /** NRS head for regular requests */
2643 struct ptlrpc_nrs scp_nrs_reg;
2644 /** NRS head for HP requests; this is only valid for services that can
2645 * handle HP requests */
2646 struct ptlrpc_nrs *scp_nrs_hp;
2651 * serialize the following fields, used for changes on
2654 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2655 /** estimated rpc service time */
2656 struct adaptive_timeout scp_at_estimate;
2657 /** reqs waiting for replies */
2658 struct ptlrpc_at_array scp_at_array;
2659 /** early reply timer */
2660 struct timer_list scp_at_timer;
2662 cfs_time_t scp_at_checktime;
2663 /** check early replies */
2664 unsigned scp_at_check;
2668 * serialize the following fields, used for processing
2669 * replies for this portal
2671 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2672 /** all the active replies */
2673 struct list_head scp_rep_active;
2675 /** replies waiting for service */
2676 struct list_head scp_rep_queue;
2678 /** List of free reply_states */
2679 struct list_head scp_rep_idle;
2680 /** waitq to run, when adding stuff to srv_free_rs_list */
2681 wait_queue_head_t scp_rep_waitq;
2682 /** # 'difficult' replies */
2683 atomic_t scp_nreps_difficult;
2686 #define ptlrpc_service_for_each_part(part, i, svc) \
2688 i < (svc)->srv_ncpts && \
2689 (svc)->srv_parts != NULL && \
2690 ((part) = (svc)->srv_parts[i]) != NULL; i++)
2693 * Declaration of ptlrpcd control structure
2695 struct ptlrpcd_ctl {
2697 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2699 unsigned long pc_flags;
2701 * Thread lock protecting structure fields.
2707 struct completion pc_starting;
2711 struct completion pc_finishing;
2713 * Thread requests set.
2715 struct ptlrpc_request_set *pc_set;
2717 * Thread name used in kthread_run()
2721 * Environment for request interpreters to run in.
2723 struct lu_env pc_env;
2725 * Index of ptlrpcd thread in the array.
2729 * Number of the ptlrpcd's partners.
2733 * Pointer to the array of partners' ptlrpcd_ctl structure.
2735 struct ptlrpcd_ctl **pc_partners;
2737 * Record the partner index to be processed next.
2742 * Async rpcs flag to make sure that ptlrpcd_check() is called only
2747 * Currently not used.
2751 * User-space async rpcs callback.
2753 void *pc_wait_callback;
2755 * User-space check idle rpcs callback.
2757 void *pc_idle_callback;
2761 /* Bits for pc_flags */
2762 enum ptlrpcd_ctl_flags {
2764 * Ptlrpc thread start flag.
2766 LIOD_START = 1 << 0,
2768 * Ptlrpc thread stop flag.
2772 * Ptlrpc thread force flag (only stop force so far).
2773 * This will cause aborting any inflight rpcs handled
2774 * by thread if LIOD_STOP is specified.
2776 LIOD_FORCE = 1 << 2,
2778 * This is a recovery ptlrpc thread.
2780 LIOD_RECOVERY = 1 << 3,
2782 * The ptlrpcd is bound to some CPU core.
2791 * Service compatibility function; the policy is compatible with all services.
2793 * \param[in] svc The service the policy is attempting to register with.
2794 * \param[in] desc The policy descriptor
2796 * \retval true The policy is compatible with the service
2798 * \see ptlrpc_nrs_pol_desc::pd_compat()
2800 static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2801 const struct ptlrpc_nrs_pol_desc *desc)
2807 * Service compatibility function; the policy is compatible with only a specific
2808 * service which is identified by its human-readable name at
2809 * ptlrpc_service::srv_name.
2811 * \param[in] svc The service the policy is attempting to register with.
2812 * \param[in] desc The policy descriptor
2814 * \retval false The policy is not compatible with the service
2815 * \retval true The policy is compatible with the service
2817 * \see ptlrpc_nrs_pol_desc::pd_compat()
2819 static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2820 const struct ptlrpc_nrs_pol_desc *desc)
2822 LASSERT(desc->pd_compat_svc_name != NULL);
2823 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2828 /* ptlrpc/events.c */
2829 extern lnet_handle_eq_t ptlrpc_eq_h;
2830 extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2831 lnet_process_id_t *peer, lnet_nid_t *self);
2833 * These callbacks are invoked by LNet when something happened to
2837 extern void request_out_callback(lnet_event_t *ev);
2838 extern void reply_in_callback(lnet_event_t *ev);
2839 extern void client_bulk_callback(lnet_event_t *ev);
2840 extern void request_in_callback(lnet_event_t *ev);
2841 extern void reply_out_callback(lnet_event_t *ev);
2842 #ifdef HAVE_SERVER_SUPPORT
2843 extern void server_bulk_callback(lnet_event_t *ev);
2847 /* ptlrpc/connection.c */
2848 struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2850 struct obd_uuid *uuid);
2851 int ptlrpc_connection_put(struct ptlrpc_connection *c);
2852 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2853 int ptlrpc_connection_init(void);
2854 void ptlrpc_connection_fini(void);
2855 extern lnet_pid_t ptl_get_pid(void);
2857 /* ptlrpc/niobuf.c */
2859 * Actual interfacing with LNet to put/get/register/unregister stuff
2862 #ifdef HAVE_SERVER_SUPPORT
2863 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
2864 unsigned npages, unsigned max_brw,
2865 unsigned type, unsigned portal);
2866 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
2867 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
2869 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
2873 LASSERT(desc != NULL);
2875 spin_lock(&desc->bd_lock);
2876 rc = desc->bd_md_count;
2877 spin_unlock(&desc->bd_lock);
2882 int ptlrpc_register_bulk(struct ptlrpc_request *req);
2883 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2885 static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2887 struct ptlrpc_bulk_desc *desc;
2890 LASSERT(req != NULL);
2891 desc = req->rq_bulk;
2893 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
2894 req->rq_bulk_deadline > cfs_time_current_sec())
2900 spin_lock(&desc->bd_lock);
2901 rc = desc->bd_md_count;
2902 spin_unlock(&desc->bd_lock);
2906 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2907 #define PTLRPC_REPLY_EARLY 0x02
2908 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2909 int ptlrpc_reply(struct ptlrpc_request *req);
2910 int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2911 int ptlrpc_error(struct ptlrpc_request *req);
2912 void ptlrpc_resend_req(struct ptlrpc_request *request);
2913 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2914 int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2915 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2918 /* ptlrpc/client.c */
2920 * Client-side portals API. Everything to send requests, receive replies,
2921 * request queues, request management, etc.
2924 void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
2926 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2927 struct ptlrpc_client *);
2928 void ptlrpc_cleanup_client(struct obd_import *imp);
2929 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2931 int ptlrpc_queue_wait(struct ptlrpc_request *req);
2932 int ptlrpc_replay_req(struct ptlrpc_request *req);
2933 int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2934 void ptlrpc_restart_req(struct ptlrpc_request *req);
2935 void ptlrpc_abort_inflight(struct obd_import *imp);
2936 void ptlrpc_cleanup_imp(struct obd_import *imp);
2937 void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2939 struct ptlrpc_request_set *ptlrpc_prep_set(void);
2940 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2942 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
2943 set_interpreter_func fn, void *data);
2944 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2945 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2946 int ptlrpc_set_wait(struct ptlrpc_request_set *);
2947 int ptlrpc_expired_set(void *data);
2948 void ptlrpc_interrupted_set(void *data);
2949 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2950 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2951 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2952 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2953 struct ptlrpc_request *req);
2955 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2956 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2958 struct ptlrpc_request_pool *
2959 ptlrpc_init_rq_pool(int, int,
2960 void (*populate_pool)(struct ptlrpc_request_pool *, int));
2962 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2963 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2964 const struct req_format *format);
2965 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2966 struct ptlrpc_request_pool *,
2967 const struct req_format *format);
2968 void ptlrpc_request_free(struct ptlrpc_request *request);
2969 int ptlrpc_request_pack(struct ptlrpc_request *request,
2970 __u32 version, int opcode);
2971 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
2972 const struct req_format *format,
2973 __u32 version, int opcode);
2974 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2975 __u32 version, int opcode, char **bufs,
2976 struct ptlrpc_cli_ctx *ctx);
2977 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
2978 int opcode, int count, __u32 *lengths,
2980 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
2981 __u32 version, int opcode,
2982 int count, __u32 *lengths, char **bufs,
2983 struct ptlrpc_request_pool *pool);
2984 void ptlrpc_req_finished(struct ptlrpc_request *request);
2985 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
2986 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2987 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2988 unsigned npages, unsigned max_brw,
2989 unsigned type, unsigned portal);
2990 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2991 static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2993 __ptlrpc_free_bulk(bulk, 1);
2995 static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2997 __ptlrpc_free_bulk(bulk, 0);
2999 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
3000 struct page *page, int pageoffset, int len, int);
3001 static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
3002 struct page *page, int pageoffset,
3005 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
3008 static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
3009 struct page *page, int pageoffset,
3012 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
3015 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3016 struct obd_import *imp);
3017 __u64 ptlrpc_next_xid(void);
3018 __u64 ptlrpc_sample_next_xid(void);
3019 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
3021 /* Set of routines to run a function in ptlrpcd context */
3022 void *ptlrpcd_alloc_work(struct obd_import *imp,
3023 int (*cb)(const struct lu_env *, void *), void *data);
3024 void ptlrpcd_destroy_work(void *handler);
3025 int ptlrpcd_queue_work(void *handler);
3028 struct ptlrpc_service_buf_conf {
3029 /* nbufs is buffers # to allocate when growing the pool */
3030 unsigned int bc_nbufs;
3031 /* buffer size to post */
3032 unsigned int bc_buf_size;
3033 /* portal to listed for requests on */
3034 unsigned int bc_req_portal;
3035 /* portal of where to send replies to */
3036 unsigned int bc_rep_portal;
3037 /* maximum request size to be accepted for this service */
3038 unsigned int bc_req_max_size;
3039 /* maximum reply size this service can ever send */
3040 unsigned int bc_rep_max_size;
3043 struct ptlrpc_service_thr_conf {
3044 /* threadname should be 8 characters or less - 6 will be added on */
3046 /* threads increasing factor for each CPU */
3047 unsigned int tc_thr_factor;
3048 /* service threads # to start on each partition while initializing */
3049 unsigned int tc_nthrs_init;
3051 * low water of threads # upper-limit on each partition while running,
3052 * service availability may be impacted if threads number is lower
3053 * than this value. It can be ZERO if the service doesn't require
3054 * CPU affinity or there is only one partition.
3056 unsigned int tc_nthrs_base;
3057 /* "soft" limit for total threads number */
3058 unsigned int tc_nthrs_max;
3059 /* user specified threads number, it will be validated due to
3060 * other members of this structure. */
3061 unsigned int tc_nthrs_user;
3062 /* set NUMA node affinity for service threads */
3063 unsigned int tc_cpu_affinity;
3064 /* Tags for lu_context associated with service thread */
3068 struct ptlrpc_service_cpt_conf {
3069 struct cfs_cpt_table *cc_cptable;
3070 /* string pattern to describe CPTs for a service */
3074 struct ptlrpc_service_conf {
3077 /* soft watchdog timeout multiplifier to print stuck service traces */
3078 unsigned int psc_watchdog_factor;
3079 /* buffer information */
3080 struct ptlrpc_service_buf_conf psc_buf;
3081 /* thread information */
3082 struct ptlrpc_service_thr_conf psc_thr;
3083 /* CPU partition information */
3084 struct ptlrpc_service_cpt_conf psc_cpt;
3085 /* function table */
3086 struct ptlrpc_service_ops psc_ops;
3089 /* ptlrpc/service.c */
3091 * Server-side services API. Register/unregister service, request state
3092 * management, service thread management
3096 void ptlrpc_save_lock(struct ptlrpc_request *req,
3097 struct lustre_handle *lock, int mode, int no_ack);
3098 void ptlrpc_commit_replies(struct obd_export *exp);
3099 void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
3100 void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
3101 int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
3102 struct ptlrpc_service *ptlrpc_register_service(
3103 struct ptlrpc_service_conf *conf,
3104 struct proc_dir_entry *proc_entry);
3105 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
3107 int ptlrpc_start_threads(struct ptlrpc_service *svc);
3108 int ptlrpc_unregister_service(struct ptlrpc_service *service);
3109 int liblustre_check_services(void *arg);
3110 void ptlrpc_daemonize(char *name);
3111 int ptlrpc_service_health_check(struct ptlrpc_service *);
3112 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
3113 void ptlrpc_request_change_export(struct ptlrpc_request *req,
3114 struct obd_export *export);
3117 int ptlrpc_hr_init(void);
3118 void ptlrpc_hr_fini(void);
3120 # define ptlrpc_hr_init() (0)
3121 # define ptlrpc_hr_fini() do {} while(0)
3126 /* ptlrpc/import.c */
3131 int ptlrpc_connect_import(struct obd_import *imp);
3132 int ptlrpc_init_import(struct obd_import *imp);
3133 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
3134 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
3135 void deuuidify(char *uuid, const char *prefix, char **uuid_start,
3138 /* ptlrpc/pack_generic.c */
3139 int ptlrpc_reconnect_import(struct obd_import *imp);
3143 * ptlrpc msg buffer and swab interface
3147 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
3149 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
3151 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
3152 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
3154 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
3155 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
3157 int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
3158 __u32 *lens, char **bufs);
3159 int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
3161 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
3162 __u32 *lens, char **bufs, int flags);
3163 #define LPRFL_EARLY_REPLY 1
3164 int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
3165 char **bufs, int flags);
3166 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
3167 unsigned int newlen, int move_data);
3168 void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
3169 int __lustre_unpack_msg(struct lustre_msg *m, int len);
3170 int lustre_msg_hdr_size(__u32 magic, int count);
3171 int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
3172 int lustre_msg_size_v2(int count, __u32 *lengths);
3173 int lustre_packed_msg_size(struct lustre_msg *msg);
3174 int lustre_msg_early_size(void);
3175 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
3176 void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
3177 int lustre_msg_buflen(struct lustre_msg *m, int n);
3178 void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
3179 int lustre_msg_bufcount(struct lustre_msg *m);
3180 char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
3181 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
3182 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
3183 __u32 lustre_msg_get_flags(struct lustre_msg *msg);
3184 void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
3185 void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
3186 void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
3187 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
3188 void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
3189 void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
3190 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
3191 __u32 lustre_msg_get_type(struct lustre_msg *msg);
3192 __u32 lustre_msg_get_version(struct lustre_msg *msg);
3193 void lustre_msg_add_version(struct lustre_msg *msg, int version);
3194 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
3195 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
3196 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
3197 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
3198 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
3199 __u64 lustre_msg_get_slv(struct lustre_msg *msg);
3200 __u32 lustre_msg_get_limit(struct lustre_msg *msg);
3201 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
3202 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
3203 int lustre_msg_get_status(struct lustre_msg *msg);
3204 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
3205 int lustre_msg_is_v1(struct lustre_msg *msg);
3206 __u32 lustre_msg_get_magic(struct lustre_msg *msg);
3207 __u32 lustre_msg_get_timeout(struct lustre_msg *msg);
3208 __u32 lustre_msg_get_service_time(struct lustre_msg *msg);
3209 char *lustre_msg_get_jobid(struct lustre_msg *msg);
3210 __u32 lustre_msg_get_cksum(struct lustre_msg *msg);
3211 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
3212 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
3214 # warning "remove checksum compatibility support for b1_8"
3215 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
3217 void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
3218 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
3219 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
3220 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
3221 void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
3222 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
3223 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
3224 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
3225 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
3226 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
3227 void ptlrpc_request_set_replen(struct ptlrpc_request *req);
3228 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
3229 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
3230 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
3231 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
3234 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
3235 unsigned int newlen, int move_data)
3237 LASSERT(req->rq_reply_state);
3238 LASSERT(req->rq_repmsg);
3239 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
3243 #ifdef LUSTRE_TRANSLATE_ERRNOS
3245 static inline int ptlrpc_status_hton(int h)
3248 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
3249 * ELDLM_LOCK_ABORTED, etc.
3252 return -lustre_errno_hton(-h);
3257 static inline int ptlrpc_status_ntoh(int n)
3260 * See the comment in ptlrpc_status_hton().
3263 return -lustre_errno_ntoh(-n);
3270 #define ptlrpc_status_hton(h) (h)
3271 #define ptlrpc_status_ntoh(n) (n)
3276 /** Change request phase of \a req to \a new_phase */
3278 ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
3280 if (req->rq_phase == new_phase)
3283 if (new_phase == RQ_PHASE_UNREGISTERING) {
3284 req->rq_next_phase = req->rq_phase;
3286 atomic_inc(&req->rq_import->imp_unregistering);
3289 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
3291 atomic_dec(&req->rq_import->imp_unregistering);
3294 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
3295 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
3297 req->rq_phase = new_phase;
3301 * Returns true if request \a req got early reply and hard deadline is not met
3304 ptlrpc_client_early(struct ptlrpc_request *req)
3306 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3307 req->rq_reply_deadline > cfs_time_current_sec())
3309 return req->rq_early;
3313 * Returns true if we got real reply from server for this request
3316 ptlrpc_client_replied(struct ptlrpc_request *req)
3318 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3319 req->rq_reply_deadline > cfs_time_current_sec())
3321 return req->rq_replied;
3324 /** Returns true if request \a req is in process of receiving server reply */
3326 ptlrpc_client_recv(struct ptlrpc_request *req)
3328 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3329 req->rq_reply_deadline > cfs_time_current_sec())
3331 return req->rq_receiving_reply;
3335 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
3339 spin_lock(&req->rq_lock);
3340 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3341 req->rq_reply_deadline > cfs_time_current_sec()) {
3342 spin_unlock(&req->rq_lock);
3345 rc = req->rq_receiving_reply ;
3346 rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
3347 spin_unlock(&req->rq_lock);
3352 ptlrpc_client_wake_req(struct ptlrpc_request *req)
3354 if (req->rq_set == NULL)
3355 wake_up(&req->rq_reply_waitq);
3357 wake_up(&req->rq_set->set_waitq);
3361 ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
3363 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3364 atomic_inc(&rs->rs_refcount);
3368 ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
3370 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3371 if (atomic_dec_and_test(&rs->rs_refcount))
3372 lustre_free_reply_state(rs);
3375 /* Should only be called once per req */
3376 static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
3378 if (req->rq_reply_state == NULL)
3379 return; /* shouldn't occur */
3380 ptlrpc_rs_decref(req->rq_reply_state);
3381 req->rq_reply_state = NULL;
3382 req->rq_repmsg = NULL;
3385 static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
3387 return lustre_msg_get_magic(req->rq_reqmsg);
3390 static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
3392 switch (req->rq_reqmsg->lm_magic) {
3393 case LUSTRE_MSG_MAGIC_V2:
3394 return req->rq_reqmsg->lm_repsize;
3396 LASSERTF(0, "incorrect message magic: %08x\n",
3397 req->rq_reqmsg->lm_magic);
3402 static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
3404 if (req->rq_delay_limit != 0 &&
3405 cfs_time_before(cfs_time_add(req->rq_queued_time,
3406 cfs_time_seconds(req->rq_delay_limit)),
3407 cfs_time_current())) {
3413 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
3415 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
3416 spin_lock(&req->rq_lock);
3417 req->rq_no_resend = 1;
3418 spin_unlock(&req->rq_lock);
3420 return req->rq_no_resend;
3424 ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
3426 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
3428 return svcpt->scp_service->srv_watchdog_factor *
3429 max_t(int, at, obd_timeout);
3432 static inline struct ptlrpc_service *
3433 ptlrpc_req2svc(struct ptlrpc_request *req)
3435 LASSERT(req->rq_rqbd != NULL);
3436 return req->rq_rqbd->rqbd_svcpt->scp_service;
3439 /* ldlm/ldlm_lib.c */
3441 * Target client logic
3444 int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
3445 int client_obd_cleanup(struct obd_device *obddev);
3446 int client_connect_import(const struct lu_env *env,
3447 struct obd_export **exp, struct obd_device *obd,
3448 struct obd_uuid *cluuid, struct obd_connect_data *,
3450 int client_disconnect_export(struct obd_export *exp);
3451 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
3453 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
3454 int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
3455 struct obd_uuid *uuid);
3456 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
3457 void client_destroy_import(struct obd_import *imp);
3460 #ifdef HAVE_SERVER_SUPPORT
3461 int server_disconnect_export(struct obd_export *exp);
3464 /* ptlrpc/pinger.c */
3466 * Pinger API (client side only)
3469 enum timeout_event {
3472 struct timeout_item;
3473 typedef int (*timeout_cb_t)(struct timeout_item *, void *);
3474 int ptlrpc_pinger_add_import(struct obd_import *imp);
3475 int ptlrpc_pinger_del_import(struct obd_import *imp);
3476 int ptlrpc_add_timeout_client(int time, enum timeout_event event,
3477 timeout_cb_t cb, void *data,
3478 struct list_head *obd_list);
3479 int ptlrpc_del_timeout_client(struct list_head *obd_list,
3480 enum timeout_event event);
3481 struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
3482 int ptlrpc_obd_ping(struct obd_device *obd);
3484 void ping_evictor_start(void);
3485 void ping_evictor_stop(void);
3487 #define ping_evictor_start() do {} while (0)
3488 #define ping_evictor_stop() do {} while (0)
3490 void ptlrpc_pinger_ir_up(void);
3491 void ptlrpc_pinger_ir_down(void);
3493 int ptlrpc_pinger_suppress_pings(void);
3495 /* ptlrpc daemon bind policy */
3497 /* all ptlrpcd threads are free mode */
3498 PDB_POLICY_NONE = 1,
3499 /* all ptlrpcd threads are bound mode */
3500 PDB_POLICY_FULL = 2,
3501 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
3502 PDB_POLICY_PAIR = 3,
3503 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
3504 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
3505 * If kernel supports NUMA, pthrpcd threads are binded and
3506 * grouped by NUMA node */
3507 PDB_POLICY_NEIGHBOR = 4,
3510 /* ptlrpc daemon load policy
3511 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
3512 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
3513 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
3514 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
3515 * depends on which is scheduled firstly, to accelerate the RPC processing. */
3517 /* on the same CPU core as the caller */
3518 PDL_POLICY_SAME = 1,
3519 /* within the same CPU partition, but not the same core as the caller */
3520 PDL_POLICY_LOCAL = 2,
3521 /* round-robin on all CPU cores, but not the same core as the caller */
3522 PDL_POLICY_ROUND = 3,
3523 /* the specified CPU core is preferred, but not enforced */
3524 PDL_POLICY_PREFERRED = 4,
3527 /* ptlrpc/ptlrpcd.c */
3528 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
3529 void ptlrpcd_free(struct ptlrpcd_ctl *pc);
3530 void ptlrpcd_wake(struct ptlrpc_request *req);
3531 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
3532 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
3533 int ptlrpcd_addref(void);
3534 void ptlrpcd_decref(void);
3536 /* ptlrpc/lproc_ptlrpc.c */
3538 * procfs output related functions
3541 const char* ll_opcode2str(__u32 opcode);
3543 void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
3544 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
3545 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
3547 static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
3548 static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
3549 static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
3553 /* ptlrpc/llog_server.c */
3554 int llog_origin_handle_open(struct ptlrpc_request *req);
3555 int llog_origin_handle_destroy(struct ptlrpc_request *req);
3556 int llog_origin_handle_prev_block(struct ptlrpc_request *req);
3557 int llog_origin_handle_next_block(struct ptlrpc_request *req);
3558 int llog_origin_handle_read_header(struct ptlrpc_request *req);
3559 int llog_origin_handle_close(struct ptlrpc_request *req);
3561 /* ptlrpc/llog_client.c */
3562 extern struct llog_operations llog_client_ops;