X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=cd6225a6208d279ebca195a9a95236bb3623d70d;hp=a85c8c33b41cf26607f6de58fb2ca9f35b7df54b;hb=b859a51e5fa580797dd833bb8e5ec7d6e41411af;hpb=910f68d04916fece5b54137bf78535f5306289f8 diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index a85c8c3..cd6225a 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2010, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -83,12 +83,39 @@ #define PTLRPC_MD_OPTIONS 0 /** - * Define maxima for bulk I/O - * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) - * these limits are system wide and not interface-local. */ -#define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS -#define PTLRPC_MAX_BRW_SIZE (1<> CFS_PAGE_SHIFT) + * Max # of bulk operations in one request. + * In order for the client and server to properly negotiate the maximum + * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two + * value. The client is free to limit the actual RPC size for any bulk + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ +#define PTLRPC_BULK_OPS_BITS 2 +#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) +/** + * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and + * should not be used on the server at all. Otherwise, it imposes a + * protocol limitation on the maximum RPC size that can be used by any + * RPC sent to that server in the future. Instead, the server should + * use the negotiated per-client ocd_brw_size to determine the bulk + * RPC count. */ +#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) + +/** + * Define maxima for bulk I/O. + * + * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT + * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the + * currently supported maximum between peers at connect via ocd_brw_size. + */ +#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) +#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) + +#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) +#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ #ifdef __KERNEL__ @@ -98,21 +125,20 @@ # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE)) # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE" # endif -# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU) +# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" # endif -# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV) +# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_PAGES too big" # endif #endif /* __KERNEL__ */ -/* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request - * buffers */ -#define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE) +#define PTLRPC_NTHRS_INIT 2 /** - * The following constants determine how memory is used to buffer incoming - * service requests. + * Buffer Constants + * + * Constants determine how memory is used to buffer incoming service requests. * * ?_NBUFS # buffers to allocate when growing the pool * ?_BUFSIZE # bytes in a single request buffer @@ -124,53 +150,255 @@ * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are * considered full when less than ?_MAXREQSIZE is left in them. */ -#define LDLM_THREADS_AUTO_MIN (2) -#define LDLM_THREADS_AUTO_MAX min_t(unsigned, cfs_num_online_cpus() * \ - cfs_num_online_cpus() * 32, 128) -#define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN -#define LDLM_NBUFS (64 * cfs_num_online_cpus()) -#define LDLM_BUFSIZE (8 * 1024) -#define LDLM_MAXREQSIZE (5 * 1024) -#define LDLM_MAXREPSIZE (1024) - -#define MDT_MIN_THREADS 2UL -#define MDT_MAX_THREADS 512UL -#define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \ - cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \ - 2UL) - -/** Absolute limits */ -#define MDS_THREADS_MIN 2 -#define MDS_THREADS_MAX 512 -#define MDS_THREADS_MIN_READPAGE 2 -#define MDS_NBUFS (64 * cfs_num_online_cpus()) -#define MDS_BUFSIZE (8 * 1024) +/** + * Thread Constants + * + * Constants determine how threads are created for ptlrpc service. + * + * ?_NTHRS_INIT # threads to create for each service partition on + * initializing. If it's non-affinity service and + * there is only one partition, it's the overall # + * threads for the service while initializing. + * ?_NTHRS_BASE # threads should be created at least for each + * ptlrpc partition to keep the service healthy. + * It's the low-water mark of threads upper-limit + * for each partition. + * ?_THR_FACTOR # threads can be added on threads upper-limit for + * each CPU core. This factor is only for reference, + * we might decrease value of factor if number of cores + * per CPT is above a limit. + * ?_NTHRS_MAX # overall threads can be created for a service, + * it's a soft limit because if service is running + * on machine with hundreds of cores and tens of + * CPU partitions, we need to guarantee each partition + * has ?_NTHRS_BASE threads, which means total threads + * will be ?_NTHRS_BASE * number_of_cpts which can + * exceed ?_NTHRS_MAX. + * + * Examples + * + * #define MDS_NTHRS_INIT 2 + * #define MDS_NTHRS_BASE 64 + * #define MDS_NTHRS_FACTOR 8 + * #define MDS_NTHRS_MAX 1024 + * + * Example 1): + * --------------------------------------------------------------------- + * Server(A) has 16 cores, user configured it to 4 partitions so each + * partition has 4 cores, then actual number of service threads on each + * partition is: + * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96 + * + * Total number of threads for the service is: + * 96 * partitions(4) = 384 + * + * Example 2): + * --------------------------------------------------------------------- + * Server(B) has 32 cores, user configured it to 4 partitions so each + * partition has 8 cores, then actual number of service threads on each + * partition is: + * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128 + * + * Total number of threads for the service is: + * 128 * partitions(4) = 512 + * + * Example 3): + * --------------------------------------------------------------------- + * Server(B) has 96 cores, user configured it to 8 partitions so each + * partition has 12 cores, then actual number of service threads on each + * partition is: + * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160 + * + * Total number of threads for the service is: + * 160 * partitions(8) = 1280 + * + * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number + * as upper limit of threads number for each partition: + * MDS_NTHRS_MAX(1024) / partitions(8) = 128 + * + * Example 4): + * --------------------------------------------------------------------- + * Server(C) have a thousand of cores and user configured it to 32 partitions + * MDS_NTHRS_BASE(64) * 32 = 2048 + * + * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need + * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads + * to keep service healthy, so total number of threads will just be 2048. + * + * NB: we don't suggest to choose server with that many cores because backend + * filesystem itself, buffer cache, or underlying network stack might + * have some SMP scalability issues at that large scale. + * + * If user already has a fat machine with hundreds or thousands of cores, + * there are two choices for configuration: + * a) create CPU table from subset of all CPUs and run Lustre on + * top of this subset + * b) bind service threads on a few partitions, see modparameters of + * MDS and OSS for details +* + * NB: these calculations (and examples below) are simplified to help + * understanding, the real implementation is a little more complex, + * please see ptlrpc_server_nthreads_check() for details. + * + */ + + /* + * LDLM threads constants: + * + * Given 8 as factor and 24 as base threads number + * + * example 1) + * On 4-core machine we will have 24 + 8 * 4 = 56 threads. + * + * example 2) + * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56 + * threads for each partition and total threads number will be 112. + * + * example 3) + * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24) + * threads for each partition to keep service healthy, so total threads + * number should be 24 * 8 = 192. + * + * So with these constants, threads number will be at the similar level + * of old versions, unless target machine has over a hundred cores + */ +#define LDLM_THR_FACTOR 8 +#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT +#define LDLM_NTHRS_BASE 24 +#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128) + +#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT +#define LDLM_CLIENT_NBUFS 1 +#define LDLM_SERVER_NBUFS 64 +#define LDLM_BUFSIZE (8 * 1024) +#define LDLM_MAXREQSIZE (5 * 1024) +#define LDLM_MAXREPSIZE (1024) + + /* + * MDS threads constants: + * + * Please see examples in "Thread Constants", MDS threads number will be at + * the comparable level of old versions, unless the server has many cores. + */ +#ifndef MDS_MAX_THREADS +#define MDS_MAX_THREADS 1024 +#define MDS_MAX_OTHR_THREADS 256 + +#else /* MDS_MAX_THREADS */ +#if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT +#undef MDS_MAX_THREADS +#define MDS_MAX_THREADS PTLRPC_NTHRS_INIT +#endif +#define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2) +#endif + +/* default service */ +#define MDS_THR_FACTOR 8 +#define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_NTHRS_MAX MDS_MAX_THREADS +#define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX) + +/* read-page service */ +#define MDS_RDPG_THR_FACTOR 4 +#define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS +#define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX) + +/* these should be removed when we remove setattr service in the future */ +#define MDS_SETA_THR_FACTOR 4 +#define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS +#define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX) + +/* non-affinity threads */ +#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS + +#define MDS_NBUFS 64 /** * Assume file name length = FNAME_MAX = 256 (true for ext3). - * path name length = PATH_MAX = 4096 - * LOV MD size max = EA_MAX = 4000 + * path name length = PATH_MAX = 4096 + * LOV MD size max = EA_MAX = 24 * 2000 + * (NB: 24 is size of lov_ost_data) + * LOV LOGCOOKIE size max = 32 * 2000 + * (NB: 32 is size of llog_cookie) * symlink: FNAME_MAX + PATH_MAX <- largest * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create) * rename: FNAME_MAX + FNAME_MAX * open: FNAME_MAX + EA_MAX * * MDS_MAXREQSIZE ~= 4736 bytes = - * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX + * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header - * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem: - * = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize) * * Realistic size is about 512 bytes (20 character name + 128 char symlink), * except in the open case where there are a large number of OSTs in a LOV. */ -#define MDS_MAXREQSIZE (5 * 1024) -#define MDS_MAXREPSIZE max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56) +#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */ +#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */ + +/** + * MDS incoming request with LOV EA + * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate + */ +#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \ + 362 + LOV_MAX_STRIPE_COUNT * 24) +/** + * MDS outgoing reply with LOV EA + * + * NB: max reply size Lustre 2.4+ client can get from old MDS is: + * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes + * + * but 2.4 or later MDS will never send reply with llog_cookie to any + * version client. This macro is defined for server side reply buffer size. + */ +#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE -/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */ +/** + * The update request includes all of updates from the create, which might + * include linkea (4K maxim), together with other updates, we set it to 9K: + * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K) + */ +#define MDS_OUT_MAXREQSIZE (9 * 1024) +#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE + +/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */ +#define MDS_BUFSIZE max_t(int, MDS_MAXREQSIZE + 1024, 8 * 1024) + +/** + * MDS_LOV_BUFSIZE should be at least max_reqsize (with LOV EA) + + * max sptlrpc payload size, however, we need to allocate a much larger buffer + * for it because LNet requires each MD(rqbd) has at least MDS_LOVE_MAXREQSIZE + * bytes left to avoid dropping of maximum-sized incoming request. + * So if MDS_LOV_BUFSIZE is only a little larger than MDS_LOV_MAXREQSIZE, + * then it can only fit in one request even there are 48K bytes left in + * a rqbd, and memory utilization is very low. + * + * In the meanwhile, size of rqbd can't be too large, because rqbd can't be + * reused until all requests fit in it have been processed and released, + * which means one long blocked request can prevent the rqbd be reused. + * Now we set request buffer size to 128K, so even each rqbd is unlinked + * from LNet with unused 48K, buffer utilization will be about 62%. + * Please check LU-2432 for details. + */ +/** MDS_LOV_BUFSIZE = max_reqsize (w/ LOV EA) + max sptlrpc payload size */ +#define MDS_LOV_BUFSIZE max_t(int, MDS_LOV_MAXREQSIZE + 1024, \ + 128 * 1024) + +/** + * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is + * about 10K, for the same reason as MDS_LOV_BUFSIZE, we also give some + * extra bytes to each request buffer to improve buffer utilization rate. + */ +#define MDS_OUT_BUFSIZE max_t(int, MDS_OUT_MAXREQSIZE + 1024, \ + 24 * 1024) + +/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */ #define FLD_MAXREQSIZE (160) -/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */ +/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */ #define FLD_MAXREPSIZE (152) +#define FLD_BUFSIZE (1 << 12) /** * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range + @@ -179,30 +407,87 @@ /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */ #define SEQ_MAXREPSIZE (152) +#define SEQ_BUFSIZE (1 << 12) /** MGS threads must be >= 3, see bug 22458 comment #28 */ -#define MGS_THREADS_AUTO_MIN 3 -#define MGS_THREADS_AUTO_MAX 32 -#define MGS_NBUFS (64 * cfs_num_online_cpus()) +#define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) +#define MGS_NTHRS_MAX 32 + +#define MGS_NBUFS 64 #define MGS_BUFSIZE (8 * 1024) #define MGS_MAXREQSIZE (7 * 1024) #define MGS_MAXREPSIZE (9 * 1024) -/** Absolute OSS limits */ -#define OSS_THREADS_MIN 3 /* difficult replies, HPQ, others */ -#define OSS_THREADS_MAX 512 -#define OST_NBUFS (64 * cfs_num_online_cpus()) -#define OST_BUFSIZE (8 * 1024) + /* + * OSS threads constants: + * + * Given 8 as factor and 64 as base threads number + * + * example 1): + * On 8-core server configured to 2 partitions, we will have + * 64 + 8 * 4 = 96 threads for each partition, 192 total threads. + * + * example 2): + * On 32-core machine configured to 4 partitions, we will have + * 64 + 8 * 8 = 112 threads for each partition, so total threads number + * will be 112 * 4 = 448. + * + * example 3): + * On 64-core machine configured to 4 partitions, we will have + * 64 + 16 * 8 = 192 threads for each partition, so total threads number + * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we + * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads + * for each partition. + * + * So we can see that with these constants, threads number wil be at the + * similar level of old versions, unless the server has many cores. + */ + /* depress threads factor for VM with small memory size */ +#define OSS_THR_FACTOR min_t(int, 8, \ + CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT)) +#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) +#define OSS_NTHRS_BASE 64 +#define OSS_NTHRS_MAX 512 + +/* threads for handling "create" request */ +#define OSS_CR_THR_FACTOR 1 +#define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT +#define OSS_CR_NTHRS_BASE 8 +#define OSS_CR_NTHRS_MAX 64 /** - * OST_MAXREQSIZE ~= 4768 bytes = - * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote + * OST_IO_MAXREQSIZE ~= + * lustre_msg + ptlrpc_body + obdo + obd_ioobj + + * DT_MAX_BRW_PAGES * niobuf_remote * * - single object with 16 pages is 512 bytes - * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover + * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover + * - Must be a multiple of 1024 + * - actual size is about 18K */ -#define OST_MAXREQSIZE (5 * 1024) -#define OST_MAXREPSIZE (9 * 1024) +#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \ + sizeof(struct ptlrpc_body) + \ + sizeof(struct obdo) + \ + sizeof(struct obd_ioobj) + \ + sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES) +/** + * FIEMAP request can be 4K+ for now + */ +#define OST_MAXREQSIZE (5 * 1024) +#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \ + (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1)) + +#define OST_MAXREPSIZE (9 * 1024) +#define OST_IO_MAXREPSIZE OST_MAXREPSIZE + +#define OST_NBUFS 64 +/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */ +#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024) +/** + * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization + * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details. + */ +#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024) /* Macro to hide a typecast. */ #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) @@ -243,17 +528,18 @@ struct ptlrpc_client { union ptlrpc_async_args { /** * Scratchpad for passing args to completion interpreter. Users - * cast to the struct of their choosing, and LASSERT that this is + * cast to the struct of their choosing, and CLASSERT that this is * big enough. For _tons_ of context, OBD_ALLOC a struct and store * a pointer to it here. The pointer_arg ensures this struct is at * least big enough for that. */ void *pointer_arg[11]; - __u64 space[6]; + __u64 space[7]; }; struct ptlrpc_request_set; typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int); +typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); /** * Definition of request set structure. @@ -267,31 +553,44 @@ typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int); * returned. */ struct ptlrpc_request_set { - /** number of uncompleted requests */ - cfs_atomic_t set_remaining; - /** wait queue to wait on for request events */ - cfs_waitq_t set_waitq; - cfs_waitq_t *set_wakeup_ptr; - /** List of requests in the set */ - cfs_list_t set_requests; - /** - * List of completion callbacks to be called when the set is completed - * This is only used if \a set_interpret is NULL. - * Links struct ptlrpc_set_cbdata. - */ - cfs_list_t set_cblist; - /** Completion callback, if only one. */ - set_interpreter_func set_interpret; - /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; - /** - * Lock for \a set_new_requests manipulations - * locked so that any old caller can communicate requests to - * the set holder who can then fold them into the lock-free set - */ - cfs_spinlock_t set_new_req_lock; - /** List of new yet unsent requests. Only used with ptlrpcd now. */ - cfs_list_t set_new_requests; + cfs_atomic_t set_refcount; + /** number of in queue requests */ + cfs_atomic_t set_new_count; + /** number of uncompleted requests */ + cfs_atomic_t set_remaining; + /** wait queue to wait on for request events */ + cfs_waitq_t set_waitq; + cfs_waitq_t *set_wakeup_ptr; + /** List of requests in the set */ + cfs_list_t set_requests; + /** + * List of completion callbacks to be called when the set is completed + * This is only used if \a set_interpret is NULL. + * Links struct ptlrpc_set_cbdata. + */ + cfs_list_t set_cblist; + /** Completion callback, if only one. */ + set_interpreter_func set_interpret; + /** opaq argument passed to completion \a set_interpret callback. */ + void *set_arg; + /** + * Lock for \a set_new_requests manipulations + * locked so that any old caller can communicate requests to + * the set holder who can then fold them into the lock-free set + */ + spinlock_t set_new_req_lock; + /** List of new yet unsent requests. Only used with ptlrpcd now. */ + cfs_list_t set_new_requests; + + /** rq_status of requests that have been freed already */ + int set_rc; + /** Additional fields used by the flow control extension */ + /** Maximum number of RPCs in flight */ + int set_max_inflight; + /** Callback function used to generate RPCs */ + set_producer_func set_producer; + /** opaq argument passed to the producer callback */ + void *set_producer_arg; }; /** @@ -307,6 +606,8 @@ struct ptlrpc_set_cbdata { }; struct ptlrpc_bulk_desc; +struct ptlrpc_service_part; +struct ptlrpc_service; /** * ptlrpc callback & work item stuff @@ -340,7 +641,7 @@ struct ptlrpc_reply_state { cfs_list_t rs_debug_list; #endif /** A spinlock to protect the reply state flags */ - cfs_spinlock_t rs_lock; + spinlock_t rs_lock; /** Reply state flags */ unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for @@ -362,7 +663,7 @@ struct ptlrpc_reply_state { /** xid */ __u64 rs_xid; struct obd_export *rs_export; - struct ptlrpc_service *rs_service; + struct ptlrpc_service_part *rs_svcpt; /** Lnet metadata handle for the reply */ lnet_handle_md_t rs_md_h; cfs_atomic_t rs_refcount; @@ -415,8 +716,8 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, * any allocations (to avoid e.g. OOM). */ struct ptlrpc_request_pool { - /** Locks the list */ - cfs_spinlock_t prp_lock; + /** Locks the list */ + spinlock_t prp_lock; /** list of ptlrpc_request structs */ cfs_list_t prp_req_list; /** Maximum message size that would fit into a rquest from this pool */ @@ -431,6 +732,658 @@ struct lu_env; struct ldlm_lock; /** + * \defgroup nrs Network Request Scheduler + * @{ + */ +struct ptlrpc_nrs_policy; +struct ptlrpc_nrs_resource; +struct ptlrpc_nrs_request; + +/** + * NRS control operations. + * + * These are common for all policies. + */ +enum ptlrpc_nrs_ctl { + /** + * Activate the policy. + */ + PTLRPC_NRS_CTL_START, + /** + * Reserved for multiple primary policies, which may be a possibility + * in the future. + */ + PTLRPC_NRS_CTL_STOP, + /** + * Recycle resources for inactive policies. + */ + PTLRPC_NRS_CTL_SHRINK, + /** + * Not a valid opcode. + */ + PTLRPC_NRS_CTL_INVALID, + /** + * Policies can start using opcodes from this value and onwards for + * their own purposes; the assigned value itself is arbitrary. + */ + PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, +}; + +/** + * NRS policy operations. + * + * These determine the behaviour of a policy, and are called in response to + * NRS core events. + */ +struct ptlrpc_nrs_pol_ops { + /** + * Called during policy registration; this operation is optional. + * + * \param[in] policy The policy being initialized + */ + int (*op_policy_init) (struct ptlrpc_nrs_policy *policy); + /** + * Called during policy unregistration; this operation is optional. + * + * \param[in] policy The policy being unregistered/finalized + */ + void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy); + /** + * Called when activating a policy via lprocfs; policies allocate and + * initialize their resources here; this operation is optional. + * + * \param[in] policy The policy being started + * + * \see nrs_policy_start_locked() + */ + int (*op_policy_start) (struct ptlrpc_nrs_policy *policy); + /** + * Called when deactivating a policy via lprocfs; policies deallocate + * their resources here; this operation is optional + * + * \param[in] policy The policy being stopped + * + * \see nrs_policy_stop_final() + */ + void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy); + /** + * Used for policy-specific operations; i.e. not generic ones like + * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous + * to an ioctl; this operation is optional. + * + * \param[in] policy The policy carrying out operation \a opc + * \param[in] opc The command operation being carried out + * \param[in,out] arg An generic buffer for communication between the + * user and the control operation + * + * \retval -ve error + * \retval 0 success + * + * \see ptlrpc_nrs_policy_control() + */ + int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy, + enum ptlrpc_nrs_ctl opc, void *arg); + + /** + * Called when obtaining references to the resources of the resource + * hierarchy for a request that has arrived for handling at the PTLRPC + * service. Policies should return -ve for requests they do not wish + * to handle. This operation is mandatory. + * + * \param[in] policy The policy we're getting resources for. + * \param[in] nrq The request we are getting resources for. + * \param[in] parent The parent resource of the resource being + * requested; set to NULL if none. + * \param[out] resp The resource is to be returned here; the + * fallback policy in an NRS head should + * \e always return a non-NULL pointer value. + * \param[in] moving_req When set, signifies that this is an attempt + * to obtain resources for a request being moved + * to the high-priority NRS head by + * ldlm_lock_reorder_req(). + * This implies two things: + * 1. We are under obd_export::exp_rpc_lock and + * so should not sleep. + * 2. We should not perform non-idempotent or can + * skip performing idempotent operations that + * were carried out when resources were first + * taken for the request when it was initialized + * in ptlrpc_nrs_req_initialize(). + * + * \retval 0, +ve The level of the returned resource in the resource + * hierarchy; currently only 0 (for a non-leaf resource) + * and 1 (for a leaf resource) are supported by the + * framework. + * \retval -ve error + * + * \see ptlrpc_nrs_req_initialize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + int (*op_res_get) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq, + struct ptlrpc_nrs_resource *parent, + struct ptlrpc_nrs_resource **resp, + bool moving_req); + /** + * Called when releasing references taken for resources in the resource + * hierarchy for the request; this operation is optional. + * + * \param[in] policy The policy the resource belongs to + * \param[in] res The resource to be freed + * + * \see ptlrpc_nrs_req_finalize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + void (*op_res_put) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_resource *res); + + /** + * Obtain a request for handling from the policy via polling; this + * operation is mandatory. + * + * \param[in] policy The policy to poll + * + * \retval NULL No erquest available for handling + * \retval valid-pointer The request polled for handling + * + * \see ptlrpc_nrs_req_poll_nolock() + */ + struct ptlrpc_nrs_request * + (*op_req_poll) (struct ptlrpc_nrs_policy *policy); + /** + * Called when attempting to add a request to a policy for later + * handling; this operation is mandatory. + * + * \param[in] policy The policy on which to enqueue \a nrq + * \param[in] nrq The request to enqueue + * + * \retval 0 success + * \retval != 0 error + * + * \see ptlrpc_nrs_req_add_nolock() + */ + int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Removes a request from the policy's set of pending requests. Normally + * called after a request has been polled successfully from the policy + * for handling; this operation is mandatory. + * + * \param[in] policy The policy the request \a nrq belongs to + * \param[in] nrq The request to dequeue + * + * \see ptlrpc_nrs_req_del_nolock() + */ + void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Called before carrying out the request; should not block. Could be + * used for job/resource control; this operation is optional. + * + * \param[in] policy The policy which is starting to handle request + * \a nrq + * \param[in] nrq The request + * + * \pre spin_is_locked(&svcpt->scp_req_lock) + * + * \see ptlrpc_nrs_req_start_nolock() + */ + void (*op_req_start) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Called after the request being carried out. Could be used for + * job/resource control; this operation is optional. + * + * \param[in] policy The policy which is stopping to handle request + * \a nrq + * \param[in] nrq The request + * + * \pre spin_is_locked(&svcpt->scp_req_lock) + * + * \see ptlrpc_nrs_req_stop_nolock() + */ + void (*op_req_stop) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Registers the policy's lprocfs interface with a PTLRPC service. + * + * \param[in] svc The service + * + * \retval 0 success + * \retval != 0 error + */ + int (*op_lprocfs_init) (struct ptlrpc_service *svc); + /** + * Unegisters the policy's lprocfs interface with a PTLRPC service. + * + * \param[in] svc The service + */ + void (*op_lprocfs_fini) (struct ptlrpc_service *svc); +}; + +/** + * Policy flags + */ +enum nrs_policy_flags { + /** + * Fallback policy, use this flag only on a single supported policy per + * service. Do not use this flag for policies registering using + * ptlrpc_nrs_policy_register() (i.e. ones that are not in + * \e nrs_pols_builtin). + */ + PTLRPC_NRS_FL_FALLBACK = (1 << 0), + /** + * Start policy immediately after registering. + */ + PTLRPC_NRS_FL_REG_START = (1 << 1), + /** + * This is a polciy registering externally with NRS core, via + * ptlrpc_nrs_policy_register(), (i.e. one that is not in + * \e nrs_pols_builtin. Used to avoid ptlrpc_nrs_policy_register() + * racing with a policy start operation issued by the user via lprocfs. + */ + PTLRPC_NRS_FL_REG_EXTERN = (1 << 2), +}; + +/** + * NRS queue type. + * + * Denotes whether an NRS instance is for handling normal or high-priority + * RPCs, or whether an operation pertains to one or both of the NRS instances + * in a service. + */ +enum ptlrpc_nrs_queue_type { + PTLRPC_NRS_QUEUE_REG, + PTLRPC_NRS_QUEUE_HP, + PTLRPC_NRS_QUEUE_BOTH, +}; + +/** + * NRS head + * + * A PTLRPC service has at least one NRS head instance for handling normal + * priority RPCs, and may optionally have a second NRS head instance for + * handling high-priority RPCs. Each NRS head maintains a list of available + * policies, of which one and only one policy is acting as the fallback policy, + * and optionally a different policy may be acting as the primary policy. For + * all RPCs handled by this NRS head instance, NRS core will first attempt to + * enqueue the RPC using the primary policy (if any). The fallback policy is + * used in the following cases: + * - when there was no primary policy in the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request + * was initialized. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, denoted it did not wish, or for some other reason was + * not able to handle the request, by returning a non-valid NRS resource + * reference. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, fails later during the request enqueueing stage. + * + * \see nrs_resource_get_safe() + * \see nrs_request_enqueue() + */ +struct ptlrpc_nrs { + spinlock_t nrs_lock; + /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ + /** + * Linkage into nrs_core_heads_list + */ + cfs_list_t nrs_heads; + /** + * List of registered policies + */ + cfs_list_t nrs_policy_list; + /** + * List of policies with queued requests. Policies that have any + * outstanding requests are queued here, and this list is queried + * in a round-robin manner from NRS core when obtaining a request + * for handling. This ensures that requests from policies that at some + * point transition away from the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. + */ + cfs_list_t nrs_policy_queued; + /** + * Service partition for this NRS head + */ + struct ptlrpc_service_part *nrs_svcpt; + /** + * Primary policy, which is the preferred policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_primary; + /** + * Fallback policy, which is the backup policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_fallback; + /** + * This NRS head handles either HP or regular requests + */ + enum ptlrpc_nrs_queue_type nrs_queue_type; + /** + * # queued requests from all policies in this NRS head + */ + unsigned long nrs_req_queued; + /** + * # scheduled requests from all policies in this NRS head + */ + unsigned long nrs_req_started; + /** + * # policies on this NRS + * TODO: Can we avoid having this? + */ + unsigned nrs_num_pols; + /** + * This NRS head is in progress of starting a policy + */ + unsigned nrs_policy_starting:1; + /** + * In progress of shutting down the whole NRS head; used during + * unregistration + */ + unsigned nrs_stopping:1; +}; + +#define NRS_POL_NAME_MAX 16 + +/** + * NRS policy registering descriptor + * + * Is used to hold a description of a policy that can be passed to NRS core in + * order to register the policy with NRS heads in different PTLRPC services. + */ +struct ptlrpc_nrs_pol_desc { + /** + * Human-readable policy name + */ + char pd_name[NRS_POL_NAME_MAX]; + /** + * NRS operations for this policy + */ + struct ptlrpc_nrs_pol_ops *pd_ops; + /** + * Service Compatibility function; this determines whether a policy is + * adequate for handling RPCs of a particular PTLRPC service. + * + * XXX:This should give the same result during policy + * registration and unregistration, and for all partitions of a + * service; so the result should not depend on temporal service + * or other properties, that may influence the result. + */ + bool (*pd_compat) (struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc); + /** + * Optionally set for policies that support a single ptlrpc service, + * i.e. ones that have \a pd_compat set to nrs_policy_compat_one() + */ + char *pd_compat_svc_name; + /** + * Bitmask of nrs_policy_flags + */ + unsigned pd_flags; + /** + * Link into nrs_core::nrs_policies + */ + cfs_list_t pd_list; +}; + +/** + * NRS policy state + * + * Policies transition from one state to the other during their lifetime + */ +enum ptlrpc_nrs_pol_state { + /** + * Not a valid policy state. + */ + NRS_POL_STATE_INVALID, + /** + * For now, this state is used exclusively for policies that register + * externally to NRS core, i.e. ones that do so via + * ptlrpc_nrs_policy_register() and are not part of nrs_pols_builtin; + * it is used to prevent a race condition between the policy registering + * with more than one service partition while service is operational, + * and the user starting the policy via lprocfs. + * + * \see nrs_pol_make_avail() + */ + NRS_POL_STATE_UNAVAIL, + /** + * Policies are at this state either at the start of their life, or + * transition here when the user selects a different policy to act + * as the primary one. + */ + NRS_POL_STATE_STOPPED, + /** + * Policy is progress of stopping + */ + NRS_POL_STATE_STOPPING, + /** + * Policy is in progress of starting + */ + NRS_POL_STATE_STARTING, + /** + * A policy is in this state in two cases: + * - it is the fallback policy, which is always in this state. + * - it has been activated by the user; i.e. it is the primary policy, + */ + NRS_POL_STATE_STARTED, +}; + +/** + * NRS policy information + * + * Used for obtaining information for the status of a policy via lprocfs + */ +struct ptlrpc_nrs_pol_info { + /** + * Policy name + */ + char pi_name[NRS_POL_NAME_MAX]; + /** + * Current policy state + */ + enum ptlrpc_nrs_pol_state pi_state; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pi_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pi_req_started; + /** + * Is this a fallback policy? + */ + unsigned pi_fallback:1; +}; + +/** + * NRS policy + * + * There is one instance of this for each policy in each NRS head of each + * PTLRPC service partition. + */ +struct ptlrpc_nrs_policy { + /** + * Linkage into the NRS head's list of policies, + * ptlrpc_nrs:nrs_policy_list + */ + cfs_list_t pol_list; + /** + * Linkage into the NRS head's list of policies with enqueued + * requests ptlrpc_nrs:nrs_policy_queued + */ + cfs_list_t pol_list_queued; + /** + * Current state of this policy + */ + enum ptlrpc_nrs_pol_state pol_state; + /** + * Bitmask of nrs_policy_flags + */ + unsigned pol_flags; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pol_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pol_req_started; + /** + * Usage Reference count taken on the policy instance + */ + long pol_ref; + /** + * The NRS head this policy has been created at + */ + struct ptlrpc_nrs *pol_nrs; + /** + * NRS operations for this policy; points to ptlrpc_nrs_pol_desc::pd_ops + */ + struct ptlrpc_nrs_pol_ops *pol_ops; + /** + * Private policy data; varies by policy type + */ + void *pol_private; + /** + * Human-readable policy name; point to ptlrpc_nrs_pol_desc::pd_name + */ + char *pol_name; +}; + +/** + * NRS resource + * + * Resources are embedded into two types of NRS entities: + * - Inside NRS policies, in the policy's private data in + * ptlrpc_nrs_policy::pol_private + * - In objects that act as prime-level scheduling entities in different NRS + * policies; e.g. on a policy that performs round robin or similar order + * scheduling across client NIDs, there would be one NRS resource per unique + * client NID. On a policy which performs round robin scheduling across + * backend filesystem objects, there would be one resource associated with + * each of the backend filesystem objects partaking in the scheduling + * performed by the policy. + * + * NRS resources share a parent-child relationship, in which resources embedded + * in policy instances are the parent entities, with all scheduling entities + * a policy schedules across being the children, thus forming a simple resource + * hierarchy. This hierarchy may be extended with one or more levels in the + * future if the ability to have more than one primary policy is added. + * + * Upon request initialization, references to the then active NRS policies are + * taken and used to later handle the dispatching of the request with one of + * these policies. + * + * \see nrs_resource_get_safe() + * \see ptlrpc_nrs_req_add() + */ +struct ptlrpc_nrs_resource { + /** + * This NRS resource's parent; is NULL for resources embedded in NRS + * policy instances; i.e. those are top-level ones. + */ + struct ptlrpc_nrs_resource *res_parent; + /** + * The policy associated with this resource. + */ + struct ptlrpc_nrs_policy *res_policy; +}; + +enum { + NRS_RES_FALLBACK, + NRS_RES_PRIMARY, + NRS_RES_MAX +}; + +/* \name fifo + * + * FIFO policy + * + * This policy is a logical wrapper around previous, non-NRS functionality. + * It dispatches RPCs in the same order as they arrive from the network. This + * policy is currently used as the fallback policy, and the only enabled policy + * on all NRS heads of all PTLRPC service partitions. + * @{ + */ + +/** + * Private data structure for the FIFO policy + */ +struct nrs_fifo_head { + /** + * Resource object for policy instance. + */ + struct ptlrpc_nrs_resource fh_res; + /** + * List of queued requests. + */ + cfs_list_t fh_list; + /** + * For debugging purposes. + */ + __u64 fh_sequence; +}; + +struct nrs_fifo_req { + /** request header, must be the first member of structure */ + cfs_list_t fr_list; + __u64 fr_sequence; +}; + +/** @} fifo */ + +/** + * NRS request + * + * Instances of this object exist embedded within ptlrpc_request; the main + * purpose of this object is to hold references to the request's resources + * for the lifetime of the request, and to hold properties that policies use + * use for determining the request's scheduling priority. + * */ +struct ptlrpc_nrs_request { + /** + * The request's resource hierarchy. + */ + struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; + /** + * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the + * policy that was used to enqueue the request. + * + * \see nrs_request_enqueue() + */ + unsigned nr_res_idx; + unsigned nr_initialized:1; + unsigned nr_enqueued:1; + unsigned nr_dequeued:1; + unsigned nr_started:1; + unsigned nr_finalized:1; + cfs_binheap_node_t nr_node; + + /** + * Policy-specific fields, used for determining a request's scheduling + * priority, and other supporting functionality. + */ + union { + /** + * Fields for the FIFO policy + */ + struct nrs_fifo_req fifo; + } nr_u; + /** + * Externally-registering policies may want to use this to allocate + * their own request properties. + */ + void *ext; +}; + +/** @} nrs */ + +/** * Basic request prioritization operations structure. * The whole idea is centered around locks and RPCs that might affect locks. * When a lock is contended we try to give priority to RPCs that might lead @@ -449,6 +1402,10 @@ struct ptlrpc_hpreq_ops { * Check if the request is a high priority one. */ int (*hpreq_check)(struct ptlrpc_request *); + /** + * Called after the request has been handled. + */ + void (*hpreq_fini)(struct ptlrpc_request *); }; /** @@ -458,8 +1415,10 @@ struct ptlrpc_hpreq_ops { * in Lustre. */ struct ptlrpc_request { - /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; + /* Request type: one of PTL_RPC_MSG_* */ + int rq_type; + /** Result of request processing */ + int rq_status; /** * Linkage item through which this request is included into * sending/delayed lists on client and into rqbd list on server @@ -479,18 +1438,26 @@ struct ptlrpc_request { cfs_list_t rq_exp_list; /** server-side hp handlers */ struct ptlrpc_hpreq_ops *rq_ops; + + /** initial thread servicing this request */ + struct ptlrpc_thread *rq_svc_thread; + /** history sequence # */ __u64 rq_history_seq; + /** \addtogroup nrs + * @{ + */ + /** stub for NRS request */ + struct ptlrpc_nrs_request rq_nrq; + /** @} nrs */ /** the index of service's srv_at_array into which request is linked */ time_t rq_at_index; - /** Result of request processing */ - int rq_status; /** Lock to protect request flags and some other important bits, like * rq_list */ - cfs_spinlock_t rq_lock; - /** client-side flags are serialized by rq_lock */ - unsigned long rq_intr:1, rq_replied:1, rq_err:1, + spinlock_t rq_lock; + /** client-side flags are serialized by rq_lock */ + unsigned int rq_intr:1, rq_replied:1, rq_err:1, rq_timedout:1, rq_resend:1, rq_restart:1, /** * when ->rq_replay is set, request is kept by the client even @@ -505,7 +1472,6 @@ struct ptlrpc_request { rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, rq_early:1, rq_must_unlink:1, - rq_fake:1, /* this fake req */ rq_memalloc:1, /* req originated from "kswapd" */ /* server-side flags */ rq_packed_final:1, /* packed final reply */ @@ -514,20 +1480,25 @@ struct ptlrpc_request { rq_reply_truncate:1, rq_committed:1, /* whether the "rq_set" is a valid one */ - rq_invalid_rqset:1; + rq_invalid_rqset:1, + rq_generation_set:1, + /* do not resend request on -EINPROGRESS */ + rq_no_retry_einprogress:1, + /* allow the req to be sent if the import is in recovery + * status */ + rq_allow_replay:1; + + unsigned int rq_nr_resend; enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ cfs_atomic_t rq_refcount;/* client-side refcount for SENT race, server-side refcounf for multiple replies */ - /** initial thread servicing this request */ - struct ptlrpc_thread *rq_svc_thread; - - /** Portal to which this request would be sent */ - int rq_request_portal; /* XXX FIXME bug 249 */ - /** Portal where to wait for reply and where reply would be sent */ - int rq_reply_portal; /* XXX FIXME bug 249 */ + /** Portal to which this request would be sent */ + short rq_request_portal; /* XXX FIXME bug 249 */ + /** Portal where to wait for reply and where reply would be sent */ + short rq_reply_portal; /* XXX FIXME bug 249 */ /** * client-side: @@ -537,11 +1508,10 @@ struct ptlrpc_request { int rq_nob_received; /** Request length */ int rq_reqlen; - /** Request message - what client sent */ - struct lustre_msg *rq_reqmsg; - /** Reply length */ int rq_replen; + /** Request message - what client sent */ + struct lustre_msg *rq_reqmsg; /** Reply message - server response */ struct lustre_msg *rq_repmsg; /** Transaction number */ @@ -565,7 +1535,8 @@ struct ptlrpc_request { struct sptlrpc_flavor rq_flvr; /**< for client & server */ enum lustre_sec_part rq_sp_from; - unsigned long /* client/server security flags */ + /* client/server security flags */ + unsigned int rq_ctx_init:1, /* context initiation */ rq_ctx_fini:1, /* context destroy */ rq_bulk_read:1, /* request bulk read */ @@ -575,6 +1546,7 @@ struct ptlrpc_request { rq_auth_remote:1, /* authed as remote user */ rq_auth_usr_root:1, /* authed as root */ rq_auth_usr_mdt:1, /* authed as mdt */ + rq_auth_usr_ost:1, /* authed as ost */ /* security tfm flags */ rq_pack_udesc:1, rq_pack_bulk:1, @@ -588,21 +1560,21 @@ struct ptlrpc_request { /* (server side), pointed directly into req buffer */ struct ptlrpc_user_desc *rq_user_desc; - /** early replies go to offset 0, regular replies go after that */ - unsigned int rq_reply_off; - /* various buffer pointers */ struct lustre_msg *rq_reqbuf; /* req wrapper */ + char *rq_repbuf; /* rep buffer */ + struct lustre_msg *rq_repdata; /* rep wrapper msg */ + struct lustre_msg *rq_clrbuf; /* only in priv mode */ int rq_reqbuf_len; /* req wrapper buf len */ int rq_reqdata_len; /* req wrapper msg len */ - char *rq_repbuf; /* rep buffer */ int rq_repbuf_len; /* rep buffer len */ - struct lustre_msg *rq_repdata; /* rep wrapper msg */ int rq_repdata_len; /* rep wrapper msg len */ - struct lustre_msg *rq_clrbuf; /* only in priv mode */ int rq_clrbuf_len; /* only in priv mode */ int rq_clrdata_len; /* only in priv mode */ + /** early replies go to offset 0, regular replies go after that */ + unsigned int rq_reply_off; + /** @} */ /** Fields that help to see if request and reply were swabbed or not */ @@ -631,9 +1603,6 @@ struct ptlrpc_request { struct ptlrpc_reply_state *rq_reply_state; /** incoming request buffer */ struct ptlrpc_request_buffer_desc *rq_rqbd; -#ifdef CRAY_XT3 - __u32 rq_uid; /* peer uid, used in MDS only */ -#endif /** client-only incoming reply */ lnet_handle_md_t rq_reply_md_h; @@ -685,10 +1654,10 @@ struct ptlrpc_request { int rq_timeout; /** Multi-rpc bits */ - /** Link item for request set lists */ - cfs_list_t rq_set_chain; /** Per-request waitq introduced by bug 21938 for recovery waiting */ cfs_waitq_t rq_set_waitq; + /** Link item for request set lists */ + cfs_list_t rq_set_chain; /** Link back to the request set */ struct ptlrpc_request_set *rq_set; /** Async completion handler, called when reply is received */ @@ -722,6 +1691,36 @@ static inline int ptlrpc_req_interpret(const struct lu_env *env, return rc; } +/** \addtogroup nrs + * @{ + */ +int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_desc *desc); +int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_desc *desc); +void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req); +void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_pol_info *info); + +/* + * Can the request be moved from the regular NRS head to the high-priority NRS + * head (of the same PTLRPC service partition), if any? + * + * For a reliable result, this should be checked under svcpt->scp_req lock. + */ +static inline bool +ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) +{ + struct ptlrpc_nrs_request *nrq = &req->rq_nrq; + + /** + * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the + * request has been enqueued first, and ptlrpc_nrs_request::nr_started + * to make sure it has not been scheduled yet (analogous to previous + * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list). + */ + return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp; +} +/** @} nrs */ + /** * Returns 1 if request buffer at offset \a index was already swabbed */ @@ -831,25 +1830,22 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req) #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s" -void _debug_req(struct ptlrpc_request *req, __u32 mask, +void _debug_req(struct ptlrpc_request *req, struct libcfs_debug_msg_data *data, const char *fmt, ...) - __attribute__ ((format (printf, 4, 5))); + __attribute__ ((format (printf, 3, 4))); /** * Helper that decides if we need to print request accordig to current debug * level settings */ -#define debug_req(cdls, level, req, file, func, line, fmt, a...) \ +#define debug_req(msgdata, mask, cdls, req, fmt, a...) \ do { \ - CFS_CHECK_STACK(); \ + CFS_CHECK_STACK(msgdata, mask, cdls); \ \ - if (((level) & D_CANTMASK) != 0 || \ - ((libcfs_debug & (level)) != 0 && \ - (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \ - static struct libcfs_debug_msg_data _req_dbg_data = \ - DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \ - _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \ - } \ + if (((mask) & D_CANTMASK) != 0 || \ + ((libcfs_debug & (mask)) != 0 && \ + (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \ + _debug_req((req), msgdata, fmt, ##a); \ } while(0) /** @@ -860,11 +1856,12 @@ do { \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ static cfs_debug_limit_state_t cdls; \ - debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \ - "@@@ "fmt" ", ## args); \ - } else \ - debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \ - "@@@ "fmt" ", ## args); \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \ + debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\ + } else { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \ + debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \ + } \ } while (0) /** @} */ @@ -890,7 +1887,7 @@ struct ptlrpc_bulk_page { #define BULK_PUT_SOURCE 3 /** - * Definition of buk descriptor. + * Definition of bulk descriptor. * Bulks are special "Two phase" RPCs where initial request message * is sent first and it is followed bt a transfer (o receiving) of a large * amount of data to be settled into pages referenced from the bulk descriptors. @@ -900,50 +1897,61 @@ struct ptlrpc_bulk_page { * Another user is readpage for MDT. */ struct ptlrpc_bulk_desc { - /** completed successfully */ - unsigned long bd_success:1; - /** accessible to the network (network io potentially in progress) */ - unsigned long bd_network_rw:1; - /** {put,get}{source,sink} */ - unsigned long bd_type:2; - /** client side */ - unsigned long bd_registered:1; - /** For serialization with callback */ - cfs_spinlock_t bd_lock; - /** Import generation when request for this bulk was sent */ - int bd_import_generation; - /** Server side - export this bulk created for */ - struct obd_export *bd_export; - /** Client side - import this bulk was sent on */ - struct obd_import *bd_import; - /** LNet portal for this bulk */ - __u32 bd_portal; - /** Back pointer to the request */ - struct ptlrpc_request *bd_req; - cfs_waitq_t bd_waitq; /* server side only WQ */ - int bd_iov_count; /* # entries in bd_iov */ - int bd_max_iov; /* allocated size of bd_iov */ - int bd_nob; /* # bytes covered */ - int bd_nob_transferred; /* # bytes GOT/PUT */ - - __u64 bd_last_xid; - - struct ptlrpc_cb_id bd_cbid; /* network callback info */ - lnet_handle_md_t bd_md_h; /* associated MD */ - lnet_nid_t bd_sender; /* stash event::sender */ + /** completed with failure */ + unsigned long bd_failure:1; + /** {put,get}{source,sink} */ + unsigned long bd_type:2; + /** client side */ + unsigned long bd_registered:1; + /** For serialization with callback */ + spinlock_t bd_lock; + /** Import generation when request for this bulk was sent */ + int bd_import_generation; + /** LNet portal for this bulk */ + __u32 bd_portal; + /** Server side - export this bulk created for */ + struct obd_export *bd_export; + /** Client side - import this bulk was sent on */ + struct obd_import *bd_import; + /** Back pointer to the request */ + struct ptlrpc_request *bd_req; + cfs_waitq_t bd_waitq; /* server side only WQ */ + int bd_iov_count; /* # entries in bd_iov */ + int bd_max_iov; /* allocated size of bd_iov */ + int bd_nob; /* # bytes covered */ + int bd_nob_transferred; /* # bytes GOT/PUT */ + + __u64 bd_last_xid; + + struct ptlrpc_cb_id bd_cbid; /* network callback info */ + lnet_nid_t bd_sender; /* stash event::sender */ + int bd_md_count; /* # valid entries in bd_mds */ + int bd_md_max_brw; /* max entries in bd_mds */ + /** array of associated MDs */ + lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; #if defined(__KERNEL__) - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - lnet_kiov_t *bd_enc_iov; + /* + * encrypt iov, size is either 0 or bd_iov_count. + */ + lnet_kiov_t *bd_enc_iov; - lnet_kiov_t bd_iov[0]; + lnet_kiov_t bd_iov[0]; #else - lnet_md_iovec_t bd_iov[0]; + lnet_md_iovec_t bd_iov[0]; #endif }; +enum { + SVC_STOPPED = 1 << 0, + SVC_STOPPING = 1 << 1, + SVC_STARTING = 1 << 2, + SVC_RUNNING = 1 << 3, + SVC_EVENT = 1 << 4, + SVC_SIGNAL = 1 << 5, +}; + +#define PTLRPC_THR_NAME_LEN 32 /** * Definition of server service thread structure */ @@ -972,11 +1980,72 @@ struct ptlrpc_thread { /** * the svc this thread belonged to b=18582 */ - struct ptlrpc_service *t_svc; - cfs_waitq_t t_ctl_waitq; - struct lu_env *t_env; + struct ptlrpc_service_part *t_svcpt; + cfs_waitq_t t_ctl_waitq; + struct lu_env *t_env; + char t_name[PTLRPC_THR_NAME_LEN]; }; +static inline int thread_is_init(struct ptlrpc_thread *thread) +{ + return thread->t_flags == 0; +} + +static inline int thread_is_stopped(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_STOPPED); +} + +static inline int thread_is_stopping(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_STOPPING); +} + +static inline int thread_is_starting(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_STARTING); +} + +static inline int thread_is_running(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_RUNNING); +} + +static inline int thread_is_event(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_EVENT); +} + +static inline int thread_is_signal(struct ptlrpc_thread *thread) +{ + return !!(thread->t_flags & SVC_SIGNAL); +} + +static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags) +{ + thread->t_flags &= ~flags; +} + +static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags) +{ + thread->t_flags = flags; +} + +static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags) +{ + thread->t_flags |= flags; +} + +static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread, + __u32 flags) +{ + if (thread->t_flags & flags) { + thread->t_flags &= ~flags; + return 1; + } + return 0; +} + /** * Request buffer descriptor structure. * This is a structure that contains one posted request buffer for service. @@ -990,7 +2059,7 @@ struct ptlrpc_request_buffer_desc { /** History of requests for this buffer */ cfs_list_t rqbd_reqs; /** Back pointer to service for which this buffer is registered */ - struct ptlrpc_service *rqbd_service; + struct ptlrpc_service_part *rqbd_svcpt; /** LNet descriptor */ lnet_handle_md_t rqbd_md_h; int rqbd_refcount; @@ -1004,9 +2073,38 @@ struct ptlrpc_request_buffer_desc { struct ptlrpc_request rqbd_req; }; -typedef int (*svc_handler_t)(struct ptlrpc_request *req); -typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *); -typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *); +typedef int (*svc_handler_t)(struct ptlrpc_request *req); + +struct ptlrpc_service_ops { + /** + * if non-NULL called during thread creation (ptlrpc_start_thread()) + * to initialize service specific per-thread state. + */ + int (*so_thr_init)(struct ptlrpc_thread *thr); + /** + * if non-NULL called during thread shutdown (ptlrpc_main()) to + * destruct state created by ->srv_init(). + */ + void (*so_thr_done)(struct ptlrpc_thread *thr); + /** + * Handler function for incoming requests for this service + */ + int (*so_req_handler)(struct ptlrpc_request *req); + /** + * function to determine priority of the request, it's called + * on every new request + */ + int (*so_hpreq_handler)(struct ptlrpc_request *); + /** + * service-specific print fn + */ + void (*so_req_printer)(void *, struct ptlrpc_request *); +}; + +#ifndef __cfs_cacheline_aligned +/* NB: put it here for reducing patche dependence */ +# define __cfs_cacheline_aligned +#endif /** * How many high priority requests to serve before serving one normal @@ -1021,139 +2119,251 @@ typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *); * or general metadata service for MDS. */ struct ptlrpc_service { - cfs_list_t srv_list; /* chain thru all services */ - int srv_max_req_size; /* biggest request to receive */ - int srv_max_reply_size; /* biggest reply to send */ - int srv_buf_size; /* size of individual buffers */ - int srv_nbuf_per_group; /* # buffers to allocate in 1 group */ - int srv_nbufs; /* total # req buffer descs allocated */ - int srv_threads_min; /* threads to start at SOW */ - int srv_threads_max; /* thread upper limit */ - int srv_threads_started; /* index of last started thread */ - int srv_threads_running; /* # running threads */ - cfs_atomic_t srv_n_difficult_replies; /* # 'difficult' replies */ - int srv_n_active_reqs; /* # reqs being served */ - int srv_n_hpreq; /* # HPreqs being served */ - cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */ - int srv_watchdog_factor; /* soft watchdog timeout multiplier */ - unsigned srv_cpu_affinity:1; /* bind threads to CPUs */ - unsigned srv_at_check:1; /* check early replies */ - unsigned srv_is_stopping:1; /* under unregister_service */ - cfs_time_t srv_at_checktime; /* debug */ - - /** Local portal on which to receive requests */ - __u32 srv_req_portal; - /** Portal on the client to send replies to */ - __u32 srv_rep_portal; - - /** AT stuff */ - /** @{ */ - struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */ - cfs_spinlock_t srv_at_lock; - struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */ - cfs_timer_t srv_at_timer; /* early reply timer */ - /** @} */ - - int srv_n_queued_reqs; /* # reqs in either of the queues below */ - int srv_hpreq_count; /* # hp requests handled */ - int srv_hpreq_ratio; /* # hp per lp reqs to handle */ - cfs_list_t srv_req_in_queue; /* incoming reqs */ - cfs_list_t srv_request_queue; /* reqs waiting for service */ - cfs_list_t srv_request_hpq; /* high priority queue */ - - cfs_list_t srv_request_history; /* request history */ - __u64 srv_request_seq; /* next request sequence # */ - __u64 srv_request_max_cull_seq; /* highest seq culled from history */ - svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */ - - cfs_list_t srv_idle_rqbds; /* request buffers to be reposted */ - cfs_list_t srv_active_rqbds; /* req buffers receiving */ - cfs_list_t srv_history_rqbds; /* request buffer history */ - int srv_nrqbd_receiving; /* # posted request buffers */ - int srv_n_history_rqbds; /* # request buffers in history */ - int srv_max_history_rqbds;/* max # request buffers in history */ - - cfs_atomic_t srv_outstanding_replies; - cfs_list_t srv_active_replies; /* all the active replies */ -#ifndef __KERNEL__ - cfs_list_t srv_reply_queue; /* replies waiting for service */ -#endif - cfs_waitq_t srv_waitq; /* all threads sleep on this. This - * wait-queue is signalled when new - * incoming request arrives and when - * difficult reply has to be handled. */ - - cfs_list_t srv_threads; /* service thread list */ - /** Handler function for incoming requests for this service */ - svc_handler_t srv_handler; - svc_hpreq_handler_t srv_hpreq_handler; /* hp request handler */ - - char *srv_name; /* only statically allocated strings here; we don't clean them */ - char *srv_thread_name; /* only statically allocated strings here; we don't clean them */ - - cfs_spinlock_t srv_lock; - + /** serialize /proc operations */ + spinlock_t srv_lock; + /** most often accessed fields */ + /** chain thru all services */ + cfs_list_t srv_list; + /** service operations table */ + struct ptlrpc_service_ops srv_ops; + /** only statically allocated strings here; we don't clean them */ + char *srv_name; + /** only statically allocated strings here; we don't clean them */ + char *srv_thread_name; + /** service thread list */ + cfs_list_t srv_threads; + /** threads # should be created for each partition on initializing */ + int srv_nthrs_cpt_init; + /** limit of threads number for each partition */ + int srv_nthrs_cpt_limit; /** Root of /proc dir tree for this service */ - cfs_proc_dir_entry_t *srv_procroot; + cfs_proc_dir_entry_t *srv_procroot; /** Pointer to statistic data for this service */ - struct lprocfs_stats *srv_stats; - - /** List of free reply_states */ - cfs_list_t srv_free_rs_list; - /** waitq to run, when adding stuff to srv_free_rs_list */ - cfs_waitq_t srv_free_rs_waitq; - + struct lprocfs_stats *srv_stats; + /** # hp per lp reqs to handle */ + int srv_hpreq_ratio; + /** biggest request to receive */ + int srv_max_req_size; + /** biggest reply to send */ + int srv_max_reply_size; + /** size of individual buffers */ + int srv_buf_size; + /** # buffers to allocate in 1 group */ + int srv_nbuf_per_group; + /** Local portal on which to receive requests */ + __u32 srv_req_portal; + /** Portal on the client to send replies to */ + __u32 srv_rep_portal; /** * Tags for lu_context associated with this thread, see struct * lu_context. */ - __u32 srv_ctx_tags; - /** - * if non-NULL called during thread creation (ptlrpc_start_thread()) - * to initialize service specific per-thread state. - */ - int (*srv_init)(struct ptlrpc_thread *thread); - /** - * if non-NULL called during thread shutdown (ptlrpc_main()) to - * destruct state created by ->srv_init(). - */ - void (*srv_done)(struct ptlrpc_thread *thread); + __u32 srv_ctx_tags; + /** soft watchdog timeout multiplier */ + int srv_watchdog_factor; + /** under unregister_service */ + unsigned srv_is_stopping:1; + + /** max # request buffers in history per partition */ + int srv_hist_nrqbds_cpt_max; + /** number of CPTs this service bound on */ + int srv_ncpts; + /** CPTs array this service bound on */ + __u32 *srv_cpts; + /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */ + int srv_cpt_bits; + /** CPT table this service is running over */ + struct cfs_cpt_table *srv_cptable; + /** + * partition data for ptlrpc service + */ + struct ptlrpc_service_part *srv_parts[0]; +}; - //struct ptlrpc_srv_ni srv_interfaces[0]; +/** + * Definition of PortalRPC service partition data. + * Although a service only has one instance of it right now, but we + * will have multiple instances very soon (instance per CPT). + * + * it has four locks: + * \a scp_lock + * serialize operations on rqbd and requests waiting for preprocess + * \a scp_req_lock + * serialize operations active requests sent to this portal + * \a scp_at_lock + * serialize adaptive timeout stuff + * \a scp_rep_lock + * serialize operations on RS list (reply states) + * + * We don't have any use-case to take two or more locks at the same time + * for now, so there is no lock order issue. + */ +struct ptlrpc_service_part { + /** back reference to owner */ + struct ptlrpc_service *scp_service __cfs_cacheline_aligned; + /* CPT id, reserved */ + int scp_cpt; + /** always increasing number */ + int scp_thr_nextid; + /** # of starting threads */ + int scp_nthrs_starting; + /** # of stopping threads, reserved for shrinking threads */ + int scp_nthrs_stopping; + /** # running threads */ + int scp_nthrs_running; + /** service threads list */ + cfs_list_t scp_threads; + + /** + * serialize the following fields, used for protecting + * rqbd list and incoming requests waiting for preprocess, + * threads starting & stopping are also protected by this lock. + */ + spinlock_t scp_lock __cfs_cacheline_aligned; + /** total # req buffer descs allocated */ + int scp_nrqbds_total; + /** # posted request buffers for receiving */ + int scp_nrqbds_posted; + /** in progress of allocating rqbd */ + int scp_rqbd_allocating; + /** # incoming reqs */ + int scp_nreqs_incoming; + /** request buffers to be reposted */ + cfs_list_t scp_rqbd_idle; + /** req buffers receiving */ + cfs_list_t scp_rqbd_posted; + /** incoming reqs */ + cfs_list_t scp_req_incoming; + /** timeout before re-posting reqs, in tick */ + cfs_duration_t scp_rqbd_timeout; + /** + * all threads sleep on this. This wait-queue is signalled when new + * incoming request arrives and when difficult reply has to be handled. + */ + cfs_waitq_t scp_waitq; + + /** request history */ + cfs_list_t scp_hist_reqs; + /** request buffer history */ + cfs_list_t scp_hist_rqbds; + /** # request buffers in history */ + int scp_hist_nrqbds; + /** sequence number for request */ + __u64 scp_hist_seq; + /** highest seq culled from history */ + __u64 scp_hist_seq_culled; + + /** + * serialize the following fields, used for processing requests + * sent to this portal + */ + spinlock_t scp_req_lock __cfs_cacheline_aligned; + /** # reqs in either of the NRS heads below */ + /** # reqs being served */ + int scp_nreqs_active; + /** # HPreqs being served */ + int scp_nhreqs_active; + /** # hp requests handled */ + int scp_hreq_count; + + /** NRS head for regular requests */ + struct ptlrpc_nrs scp_nrs_reg; + /** NRS head for HP requests; this is only valid for services that can + * handle HP requests */ + struct ptlrpc_nrs *scp_nrs_hp; + + /** AT stuff */ + /** @{ */ + /** + * serialize the following fields, used for changes on + * adaptive timeout + */ + spinlock_t scp_at_lock __cfs_cacheline_aligned; + /** estimated rpc service time */ + struct adaptive_timeout scp_at_estimate; + /** reqs waiting for replies */ + struct ptlrpc_at_array scp_at_array; + /** early reply timer */ + cfs_timer_t scp_at_timer; + /** debug */ + cfs_time_t scp_at_checktime; + /** check early replies */ + unsigned scp_at_check; + /** @} */ + + /** + * serialize the following fields, used for processing + * replies for this portal + */ + spinlock_t scp_rep_lock __cfs_cacheline_aligned; + /** all the active replies */ + cfs_list_t scp_rep_active; +#ifndef __KERNEL__ + /** replies waiting for service */ + cfs_list_t scp_rep_queue; +#endif + /** List of free reply_states */ + cfs_list_t scp_rep_idle; + /** waitq to run, when adding stuff to srv_free_rs_list */ + cfs_waitq_t scp_rep_waitq; + /** # 'difficult' replies */ + cfs_atomic_t scp_nreps_difficult; }; +#define ptlrpc_service_for_each_part(part, i, svc) \ + for (i = 0; \ + i < (svc)->srv_ncpts && \ + (svc)->srv_parts != NULL && \ + ((part) = (svc)->srv_parts[i]) != NULL; i++) + /** * Declaration of ptlrpcd control structure */ struct ptlrpcd_ctl { + /** + * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE) + */ + unsigned long pc_flags; + /** + * Thread lock protecting structure fields. + */ + spinlock_t pc_lock; + /** + * Start completion. + */ + struct completion pc_starting; + /** + * Stop completion. + */ + struct completion pc_finishing; /** - * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE) + * Thread requests set. */ - unsigned long pc_flags; + struct ptlrpc_request_set *pc_set; /** - * Thread lock protecting structure fields. + * Thread name used in cfs_daemonize() */ - cfs_spinlock_t pc_lock; + char pc_name[16]; /** - * Start completion. + * Environment for request interpreters to run in. */ - cfs_completion_t pc_starting; + struct lu_env pc_env; /** - * Stop completion. + * Index of ptlrpcd thread in the array. */ - cfs_completion_t pc_finishing; + int pc_index; /** - * Thread requests set. + * Number of the ptlrpcd's partners. */ - struct ptlrpc_request_set *pc_set; + int pc_npartners; /** - * Thread name used in cfs_daemonize() + * Pointer to the array of partners' ptlrpcd_ctl structure. */ - char pc_name[16]; + struct ptlrpcd_ctl **pc_partners; /** - * Environment for request interpreters to run in. + * Record the partner index to be processed next. */ - struct lu_env pc_env; + int pc_cursor; #ifndef __KERNEL__ /** * Async rpcs flag to make sure that ptlrpcd_check() is called only @@ -1194,9 +2404,56 @@ enum ptlrpcd_ctl_flags { /** * This is a recovery ptlrpc thread. */ - LIOD_RECOVERY = 1 << 3 + LIOD_RECOVERY = 1 << 3, + /** + * The ptlrpcd is bound to some CPU core. + */ + LIOD_BIND = 1 << 4, }; +/** + * \addtogroup nrs + * @{ + * + * Service compatibility function; policy is compatible with all services. + * + * \param[in] svc The service the policy is attempting to register with. + * \param[in] desc The policy descriptor + * + * \retval true The policy is compatible with the NRS head + * + * \see ptlrpc_nrs_pol_desc::pd_compat() + */ +static inline bool +nrs_policy_compat_all(struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc) +{ + return true; +} + +/** + * Service compatibility function; policy is compatible with only a specific + * service which is identified by its human-readable name at + * ptlrpc_service::srv_name. + * + * \param[in] svc The service the policy is attempting to register with. + * \param[in] desc The policy descriptor + * + * \retval false The policy is not compatible with the NRS head + * \retval true The policy is compatible with the NRS head + * + * \see ptlrpc_nrs_pol_desc::pd_compat() + */ +static inline bool +nrs_policy_compat_one(struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc) +{ + LASSERT(desc->pd_compat_svc_name != NULL); + return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; +} + +/** @} nrs */ + /* ptlrpc/events.c */ extern lnet_handle_eq_t ptlrpc_eq_h; extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, @@ -1206,12 +2463,14 @@ extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, * underlying buffer * @{ */ -extern void request_out_callback (lnet_event_t *ev); +extern void request_out_callback(lnet_event_t *ev); extern void reply_in_callback(lnet_event_t *ev); -extern void client_bulk_callback (lnet_event_t *ev); +extern void client_bulk_callback(lnet_event_t *ev); extern void request_in_callback(lnet_event_t *ev); extern void reply_out_callback(lnet_event_t *ev); -extern void server_bulk_callback (lnet_event_t *ev); +#ifdef HAVE_SERVER_SUPPORT +extern void server_bulk_callback(lnet_event_t *ev); +#endif /** @} */ /* ptlrpc/connection.c */ @@ -1229,29 +2488,36 @@ extern lnet_pid_t ptl_get_pid(void); * Actual interfacing with LNet to put/get/register/unregister stuff * @{ */ +#ifdef HAVE_SERVER_SUPPORT +struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req, + unsigned npages, unsigned max_brw, + unsigned type, unsigned portal); int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc); void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc); -int ptlrpc_register_bulk(struct ptlrpc_request *req); -int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async); static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc) { - int rc; + int rc; - LASSERT(desc != NULL); + LASSERT(desc != NULL); - cfs_spin_lock(&desc->bd_lock); - rc = desc->bd_network_rw; - cfs_spin_unlock(&desc->bd_lock); - return rc; + spin_lock(&desc->bd_lock); + rc = desc->bd_md_count; + spin_unlock(&desc->bd_lock); + return rc; } +#endif + +int ptlrpc_register_bulk(struct ptlrpc_request *req); +int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async); static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) { - struct ptlrpc_bulk_desc *desc = req->rq_bulk; + struct ptlrpc_bulk_desc *desc; int rc; LASSERT(req != NULL); + desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && req->rq_bulk_deadline > cfs_time_current_sec()) @@ -1260,10 +2526,10 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) if (!desc) return 0; - cfs_spin_lock(&desc->bd_lock); - rc = desc->bd_network_rw; - cfs_spin_unlock(&desc->bd_lock); - return rc; + spin_lock(&desc->bd_lock); + rc = desc->bd_md_count; + spin_unlock(&desc->bd_lock); + return rc; } #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01 @@ -1275,7 +2541,7 @@ int ptlrpc_error(struct ptlrpc_request *req); void ptlrpc_resend_req(struct ptlrpc_request *request); int ptlrpc_at_get_net_latency(struct ptlrpc_request *req); int ptl_send_rpc(struct ptlrpc_request *request, int noreply); -int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd); +int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); /** @} */ /* ptlrpc/client.c */ @@ -1298,6 +2564,8 @@ void ptlrpc_cleanup_imp(struct obd_import *imp); void ptlrpc_abort_set(struct ptlrpc_request_set *set); struct ptlrpc_request_set *ptlrpc_prep_set(void); +struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, + void *arg); int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, set_interpreter_func fn, void *data); int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); @@ -1308,8 +2576,8 @@ void ptlrpc_interrupted_set(void *data); void ptlrpc_mark_interrupted(struct ptlrpc_request *req); void ptlrpc_set_destroy(struct ptlrpc_request_set *); void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); -int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, - struct ptlrpc_request *req); +void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, + struct ptlrpc_request *req); void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool); void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); @@ -1333,11 +2601,6 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); -struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp, - unsigned int timeout, - ptlrpc_interpterer_t interpreter); -void ptlrpc_fakereq_finished(struct ptlrpc_request *req); - struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, __u32 *lengths, char **bufs); @@ -1348,32 +2611,106 @@ struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp, void ptlrpc_req_finished(struct ptlrpc_request *request); void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request); struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req, - int npages, int type, int portal); -struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req, - int npages, int type, int portal); -void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); -void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len); +struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, + unsigned npages, unsigned max_brw, + unsigned type, unsigned portal); +void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin); +static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk) +{ + __ptlrpc_free_bulk(bulk, 1); +} +static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) +{ + __ptlrpc_free_bulk(bulk, 0); +} +void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, + cfs_page_t *page, int pageoffset, int len, int); +static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, + cfs_page_t *page, int pageoffset, + int len) +{ + __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); +} + +static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, + cfs_page_t *page, int pageoffset, + int len) +{ + __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); +} + void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp); __u64 ptlrpc_next_xid(void); __u64 ptlrpc_sample_next_xid(void); __u64 ptlrpc_req_xid(struct ptlrpc_request *request); +/* Set of routines to run a function in ptlrpcd context */ +void *ptlrpcd_alloc_work(struct obd_import *imp, + int (*cb)(const struct lu_env *, void *), void *data); +void ptlrpcd_destroy_work(void *handler); +int ptlrpcd_queue_work(void *handler); + /** @} */ +struct ptlrpc_service_buf_conf { + /* nbufs is buffers # to allocate when growing the pool */ + unsigned int bc_nbufs; + /* buffer size to post */ + unsigned int bc_buf_size; + /* portal to listed for requests on */ + unsigned int bc_req_portal; + /* portal of where to send replies to */ + unsigned int bc_rep_portal; + /* maximum request size to be accepted for this service */ + unsigned int bc_req_max_size; + /* maximum reply size this service can ever send */ + unsigned int bc_rep_max_size; +}; + +struct ptlrpc_service_thr_conf { + /* threadname should be 8 characters or less - 6 will be added on */ + char *tc_thr_name; + /* threads increasing factor for each CPU */ + unsigned int tc_thr_factor; + /* service threads # to start on each partition while initializing */ + unsigned int tc_nthrs_init; + /* + * low water of threads # upper-limit on each partition while running, + * service availability may be impacted if threads number is lower + * than this value. It can be ZERO if the service doesn't require + * CPU affinity or there is only one partition. + */ + unsigned int tc_nthrs_base; + /* "soft" limit for total threads number */ + unsigned int tc_nthrs_max; + /* user specified threads number, it will be validated due to + * other members of this structure. */ + unsigned int tc_nthrs_user; + /* set NUMA node affinity for service threads */ + unsigned int tc_cpu_affinity; + /* Tags for lu_context associated with service thread */ + __u32 tc_ctx_tags; +}; + +struct ptlrpc_service_cpt_conf { + struct cfs_cpt_table *cc_cptable; + /* string pattern to describe CPTs for a service */ + char *cc_pattern; +}; struct ptlrpc_service_conf { - int psc_nbufs; - int psc_bufsize; - int psc_max_req_size; - int psc_max_reply_size; - int psc_req_portal; - int psc_rep_portal; - int psc_watchdog_factor; - int psc_min_threads; - int psc_max_threads; - __u32 psc_ctx_tags; + /* service name */ + char *psc_name; + /* soft watchdog timeout multiplifier to print stuck service traces */ + unsigned int psc_watchdog_factor; + /* buffer information */ + struct ptlrpc_service_buf_conf psc_buf; + /* thread information */ + struct ptlrpc_service_thr_conf psc_thr; + /* CPU partition information */ + struct ptlrpc_service_cpt_conf psc_cpt; + /* function table */ + struct ptlrpc_service_ops psc_ops; }; /* ptlrpc/service.c */ @@ -1383,38 +2720,22 @@ struct ptlrpc_service_conf { * * @{ */ -void ptlrpc_save_lock (struct ptlrpc_request *req, - struct lustre_handle *lock, int mode, int no_ack); +void ptlrpc_save_lock(struct ptlrpc_request *req, + struct lustre_handle *lock, int mode, int no_ack); void ptlrpc_commit_replies(struct obd_export *exp); -void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs); -void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c, - svc_handler_t h, char *name, - struct proc_dir_entry *proc_entry, - svcreq_printfn_t prntfn, - char *threadname); - -struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, - int max_reply_size, - int req_portal, int rep_portal, - int watchdog_factor, - svc_handler_t, char *name, - cfs_proc_dir_entry_t *proc_entry, - svcreq_printfn_t, - int min_threads, int max_threads, - char *threadname, __u32 ctx_tags, - svc_hpreq_handler_t); +void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); +void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); +int ptlrpc_hpreq_handler(struct ptlrpc_request *req); +struct ptlrpc_service *ptlrpc_register_service( + struct ptlrpc_service_conf *conf, + struct proc_dir_entry *proc_entry); void ptlrpc_stop_all_threads(struct ptlrpc_service *svc); -int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc); -int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc); +int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services (void *arg); +int liblustre_check_services(void *arg); void ptlrpc_daemonize(char *name); int ptlrpc_service_health_check(struct ptlrpc_service *); -void ptlrpc_hpreq_reorder(struct ptlrpc_request *req); -void ptlrpc_server_active_request_inc(struct ptlrpc_request *req); -void ptlrpc_server_active_request_dec(struct ptlrpc_request *req); void ptlrpc_server_drop_request(struct ptlrpc_request *req); #ifdef __KERNEL__ @@ -1425,12 +2746,6 @@ void ptlrpc_hr_fini(void); # define ptlrpc_hr_fini() do {} while(0) #endif -struct ptlrpc_svc_data { - char *name; - struct ptlrpc_service *svc; - struct ptlrpc_thread *thread; - struct obd_device *dev; -}; /** @} */ /* ptlrpc/import.c */ @@ -1438,10 +2753,12 @@ struct ptlrpc_svc_data { * Import API * @{ */ -int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid); +int ptlrpc_connect_import(struct obd_import *imp); int ptlrpc_init_import(struct obd_import *imp); int ptlrpc_disconnect_import(struct obd_import *imp, int noclose); int ptlrpc_import_recovery_state_machine(struct obd_import *imp); +void deuuidify(char *uuid, const char *prefix, char **uuid_start, + int *uuid_len); /* ptlrpc/pack_generic.c */ int ptlrpc_reconnect_import(struct obd_import *imp); @@ -1485,7 +2802,7 @@ void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen); int lustre_msg_buflen(struct lustre_msg *m, int n); void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len); int lustre_msg_bufcount(struct lustre_msg *m); -char *lustre_msg_string (struct lustre_msg *m, int n, int max_len); +char *lustre_msg_string(struct lustre_msg *m, int n, int max_len); __u32 lustre_msghdr_get_flags(struct lustre_msg *msg); void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_flags(struct lustre_msg *msg); @@ -1514,8 +2831,9 @@ int lustre_msg_is_v1(struct lustre_msg *msg); __u32 lustre_msg_get_magic(struct lustre_msg *msg); __u32 lustre_msg_get_timeout(struct lustre_msg *msg); __u32 lustre_msg_get_service_time(struct lustre_msg *msg); +char *lustre_msg_get_jobid(struct lustre_msg *msg); __u32 lustre_msg_get_cksum(struct lustre_msg *msg); -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0) +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18); #else # warning "remove checksum compatibility support for b1_8" @@ -1534,6 +2852,7 @@ void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes) void ptlrpc_request_set_replen(struct ptlrpc_request *req); void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout); void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); +void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid); void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); static inline void @@ -1608,17 +2927,17 @@ ptlrpc_client_recv(struct ptlrpc_request *req) static inline int ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) { - int rc; - - cfs_spin_lock(&req->rq_lock); - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) { - cfs_spin_unlock(&req->rq_lock); - return 1; - } - rc = req->rq_receiving_reply || req->rq_must_unlink; - cfs_spin_unlock(&req->rq_lock); - return rc; + int rc; + + spin_lock(&req->rq_lock); + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && + req->rq_reply_deadline > cfs_time_current_sec()) { + spin_unlock(&req->rq_lock); + return 1; + } + rc = req->rq_receiving_reply || req->rq_must_unlink; + spin_unlock(&req->rq_lock); + return rc; } static inline void @@ -1685,12 +3004,28 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) static inline int ptlrpc_no_resend(struct ptlrpc_request *req) { - if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) { - cfs_spin_lock(&req->rq_lock); - req->rq_no_resend = 1; - cfs_spin_unlock(&req->rq_lock); - } - return req->rq_no_resend; + if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) { + spin_lock(&req->rq_lock); + req->rq_no_resend = 1; + spin_unlock(&req->rq_lock); + } + return req->rq_no_resend; +} + +static inline int +ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) +{ + int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate); + + return svcpt->scp_service->srv_watchdog_factor * + max_t(int, at, obd_timeout); +} + +static inline struct ptlrpc_service * +ptlrpc_req2svc(struct ptlrpc_request *req) +{ + LASSERT(req->rq_rqbd != NULL); + return req->rq_rqbd->rqbd_svcpt->scp_service; } /* ldlm/ldlm_lib.c */ @@ -1708,17 +3043,22 @@ int client_disconnect_export(struct obd_export *exp); int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid, int priority); int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid); +int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer, + struct obd_uuid *uuid); int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid); void client_destroy_import(struct obd_import *imp); /** @} */ +#ifdef HAVE_SERVER_SUPPORT int server_disconnect_export(struct obd_export *exp); +#endif /* ptlrpc/pinger.c */ /** * Pinger API (client side only) * @{ */ +extern int suppress_pings; enum timeout_event { TIMEOUT_GRANT = 1 }; @@ -1744,26 +3084,43 @@ void ping_evictor_stop(void); int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req); /** @} */ -/* ptlrpc/ptlrpcd.c */ +/* ptlrpc daemon bind policy */ +typedef enum { + /* all ptlrpcd threads are free mode */ + PDB_POLICY_NONE = 1, + /* all ptlrpcd threads are bound mode */ + PDB_POLICY_FULL = 2, + /* ... */ + PDB_POLICY_PAIR = 3, + /* ... , + * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1]. + * If kernel supports NUMA, pthrpcd threads are binded and + * grouped by NUMA node */ + PDB_POLICY_NEIGHBOR = 4, +} pdb_policy_t; + +/* ptlrpc daemon load policy + * It is caller's duty to specify how to push the async RPC into some ptlrpcd + * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is + * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd, + * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner, + * depends on which is scheduled firstly, to accelerate the RPC processing. */ +typedef enum { + /* on the same CPU core as the caller */ + PDL_POLICY_SAME = 1, + /* within the same CPU partition, but not the same core as the caller */ + PDL_POLICY_LOCAL = 2, + /* round-robin on all CPU cores, but not the same core as the caller */ + PDL_POLICY_ROUND = 3, + /* the specified CPU core is preferred, but not enforced */ + PDL_POLICY_PREFERRED = 4, +} pdl_policy_t; -/** - * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv, - * these threads are used to asynchronously send requests queued with - * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for - * such requests. Multiple scopes are needed to avoid dead-locks. - */ -enum ptlrpcd_scope { - /** Scope of bulk read-write rpcs. */ - PSCOPE_BRW, - /** Everything else. */ - PSCOPE_OTHER, - PSCOPE_NR -}; - -int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc); +/* ptlrpc/ptlrpcd.c */ void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force); +void ptlrpcd_free(struct ptlrpcd_ctl *pc); void ptlrpcd_wake(struct ptlrpc_request *req); -int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope); +void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx); void ptlrpcd_add_rqset(struct ptlrpc_request_set *set); int ptlrpcd_addref(void); void ptlrpcd_decref(void); @@ -1786,14 +3143,13 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {} /** @} */ /* ptlrpc/llog_server.c */ -int llog_origin_handle_create(struct ptlrpc_request *req); +int llog_origin_handle_open(struct ptlrpc_request *req); int llog_origin_handle_destroy(struct ptlrpc_request *req); int llog_origin_handle_prev_block(struct ptlrpc_request *req); int llog_origin_handle_next_block(struct ptlrpc_request *req); int llog_origin_handle_read_header(struct ptlrpc_request *req); int llog_origin_handle_close(struct ptlrpc_request *req); int llog_origin_handle_cancel(struct ptlrpc_request *req); -int llog_catinfo(struct ptlrpc_request *req); /* ptlrpc/llog_client.c */ extern struct llog_operations llog_client_ops;