X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=f96ac9a17213773b7fa27172bd1f71a230a9736c;hp=77f1a984fb9a19470862139f74939e7e3e32e625;hb=8f1c8dbe2389e1ef1e1d3387e343fb9a1bb84198;hpb=7358bc93e63ca3eff1cde4f73d0e1d81896b1eaa diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 77f1a98..f96ac9a 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2010, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -55,19 +55,9 @@ * @{ */ -#if defined(__linux__) -#include -#elif defined(__APPLE__) -#include -#elif defined(__WINNT__) -#include -#else -#error Unsupported operating system. -#endif - #include -// #include -#include +#include +#include #include #include #include @@ -75,7 +65,6 @@ #include #include #include - #include #include @@ -83,28 +72,53 @@ #define PTLRPC_MD_OPTIONS 0 /** - * Define maxima for bulk I/O - * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) - * these limits are system wide and not interface-local. */ -#define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS -#define PTLRPC_MAX_BRW_SIZE (1<> CFS_PAGE_SHIFT) + * Max # of bulk operations in one request. + * In order for the client and server to properly negotiate the maximum + * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two + * value. The client is free to limit the actual RPC size for any bulk + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ +#define PTLRPC_BULK_OPS_BITS 2 +#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) +/** + * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and + * should not be used on the server at all. Otherwise, it imposes a + * protocol limitation on the maximum RPC size that can be used by any + * RPC sent to that server in the future. Instead, the server should + * use the negotiated per-client ocd_brw_size to determine the bulk + * RPC count. */ +#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) + +/** + * Define maxima for bulk I/O. + * + * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT + * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the + * currently supported maximum between peers at connect via ocd_brw_size. + */ +#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) +#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) + +#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) +#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ -#ifdef __KERNEL__ -# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) -# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" -# endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE" -# endif -# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU) -# error "PTLRPC_MAX_BRW_SIZE too big" -# endif -# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV) -# error "PTLRPC_MAX_BRW_PAGES too big" -# endif -#endif /* __KERNEL__ */ +#if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) +# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" +#endif +#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +#endif +#if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) +# error "PTLRPC_MAX_BRW_SIZE too big" +#endif +#if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT) +# error "PTLRPC_MAX_BRW_PAGES too big" +#endif #define PTLRPC_NTHRS_INIT 2 @@ -150,17 +164,17 @@ * * Examples * - * #define MDT_NTHRS_INIT 2 - * #define MDT_NTHRS_BASE 64 - * #define MDT_NTHRS_FACTOR 8 - * #define MDT_NTHRS_MAX 1024 + * #define MDS_NTHRS_INIT 2 + * #define MDS_NTHRS_BASE 64 + * #define MDS_NTHRS_FACTOR 8 + * #define MDS_NTHRS_MAX 1024 * * Example 1): * --------------------------------------------------------------------- * Server(A) has 16 cores, user configured it to 4 partitions so each * partition has 4 cores, then actual number of service threads on each * partition is: - * MDT_NTHRS_BASE(64) + cores(4) * MDT_NTHRS_FACTOR(8) = 96 + * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96 * * Total number of threads for the service is: * 96 * partitions(4) = 384 @@ -170,7 +184,7 @@ * Server(B) has 32 cores, user configured it to 4 partitions so each * partition has 8 cores, then actual number of service threads on each * partition is: - * MDT_NTHRS_BASE(64) + cores(8) * MDT_NTHRS_FACTOR(8) = 128 + * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128 * * Total number of threads for the service is: * 128 * partitions(4) = 512 @@ -180,22 +194,22 @@ * Server(B) has 96 cores, user configured it to 8 partitions so each * partition has 12 cores, then actual number of service threads on each * partition is: - * MDT_NTHRS_BASE(64) + cores(12) * MDT_NTHRS_FACTOR(8) = 160 + * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160 * * Total number of threads for the service is: * 160 * partitions(8) = 1280 * - * However, it's above the soft limit MDT_NTHRS_MAX, so we choose this number + * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number * as upper limit of threads number for each partition: - * MDT_NTHRS_MAX(1024) / partitions(8) = 128 + * MDS_NTHRS_MAX(1024) / partitions(8) = 128 * * Example 4): * --------------------------------------------------------------------- * Server(C) have a thousand of cores and user configured it to 32 partitions - * MDT_NTHRS_BASE(64) * 32 = 2048 + * MDS_NTHRS_BASE(64) * 32 = 2048 * - * which is already above soft limit MDT_NTHRS_MAX(1024), but we still need - * to guarantee that each partition has at least MDT_NTHRS_BASE(64) threads + * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need + * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads * to keep service healthy, so total number of threads will just be 2048. * * NB: we don't suggest to choose server with that many cores because backend @@ -232,19 +246,20 @@ * threads for each partition to keep service healthy, so total threads * number should be 24 * 8 = 192. * - * So with these constants, threads number wil be at the similar level + * So with these constants, threads number will be at the similar level * of old versions, unless target machine has over a hundred cores */ #define LDLM_THR_FACTOR 8 #define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT #define LDLM_NTHRS_BASE 24 -#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128) +#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128) -#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT -#define LDLM_NBUFS (64 * cfs_num_online_cpus()) -#define LDLM_BUFSIZE (8 * 1024) -#define LDLM_MAXREQSIZE (5 * 1024) -#define LDLM_MAXREPSIZE (1024) +#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT +#define LDLM_CLIENT_NBUFS 1 +#define LDLM_SERVER_NBUFS 64 +#define LDLM_BUFSIZE (8 * 1024) +#define LDLM_MAXREQSIZE (5 * 1024) +#define LDLM_MAXREPSIZE (1024) /* * MDS threads constants: @@ -252,45 +267,49 @@ * Please see examples in "Thread Constants", MDS threads number will be at * the comparable level of old versions, unless the server has many cores. */ -#ifndef MDT_MAX_THREADS -#define MDT_MAX_THREADS 1024 -#define MDT_MAX_OTHR_THREADS 256 - -#else /* MDT_MAX_THREADS */ -#if MDT_MAX_THREADS < PTLRPC_NTHRS_INIT -#undef MDT_MAX_THREADS -#define MDT_MAX_THREADS PTLRPC_NTHRS_INIT +#ifndef MDS_MAX_THREADS +#define MDS_MAX_THREADS 1024 +#define MDS_MAX_OTHR_THREADS 256 + +#else /* MDS_MAX_THREADS */ +#if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT +#undef MDS_MAX_THREADS +#define MDS_MAX_THREADS PTLRPC_NTHRS_INIT #endif -#define MDT_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDT_MAX_THREADS / 2) +#define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2) #endif /* default service */ -#define MDT_THR_FACTOR 8 -#define MDT_NTHRS_INIT PTLRPC_NTHRS_INIT -#define MDT_NTHRS_MAX MDT_MAX_THREADS -#define MDT_NTHRS_BASE min(64, MDT_NTHRS_MAX) +#define MDS_THR_FACTOR 8 +#define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_NTHRS_MAX MDS_MAX_THREADS +#define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX) /* read-page service */ -#define MDT_RDPG_THR_FACTOR 4 -#define MDT_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT -#define MDT_RDPG_NTHRS_MAX MDT_MAX_OTHR_THREADS -#define MDT_RDPG_NTHRS_BASE min(48, MDT_RDPG_NTHRS_MAX) +#define MDS_RDPG_THR_FACTOR 4 +#define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS +#define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX) /* these should be removed when we remove setattr service in the future */ -#define MDT_SETA_THR_FACTOR 4 -#define MDT_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT -#define MDT_SETA_NTHRS_MAX MDT_MAX_OTHR_THREADS -#define MDT_SETA_NTHRS_BASE min(48, MDT_SETA_NTHRS_MAX) +#define MDS_SETA_THR_FACTOR 4 +#define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS +#define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX) /* non-affinity threads */ -#define MDT_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT -#define MDT_OTHR_NTHRS_MAX MDT_MAX_OTHR_THREADS +#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT +#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS + +#define MDS_NBUFS 64 -#define MDS_NBUFS (64 * cfs_num_online_cpus()) /** * Assume file name length = FNAME_MAX = 256 (true for ext3). - * path name length = PATH_MAX = 4096 - * LOV MD size max = EA_MAX = 48000 (2000 stripes) + * path name length = PATH_MAX = 4096 + * LOV MD size max = EA_MAX = 24 * 2000 + * (NB: 24 is size of lov_ost_data) + * LOV LOGCOOKIE size max = 32 * 2000 + * (NB: 32 is size of llog_cookie) * symlink: FNAME_MAX + PATH_MAX <- largest * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create) * rename: FNAME_MAX + FNAME_MAX @@ -299,23 +318,95 @@ * MDS_MAXREQSIZE ~= 4736 bytes = * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header - * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem: - * = 9210 bytes = lustre_msg + mdt_body + 160 * (easize + cookiesize) * * Realistic size is about 512 bytes (20 character name + 128 char symlink), * except in the open case where there are a large number of OSTs in a LOV. */ -#define MDS_MAXREPSIZE max(10 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56) -#define MDS_MAXREQSIZE MDS_MAXREPSIZE +#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */ +#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */ -/** MDS_BUFSIZE = max_reqsize + max sptlrpc payload size */ -#define MDS_BUFSIZE (MDS_MAXREQSIZE + 1024) +/** + * MDS incoming request with LOV EA + * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate + */ +#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \ + 362 + LOV_MAX_STRIPE_COUNT * 24) +/** + * MDS outgoing reply with LOV EA + * + * NB: max reply size Lustre 2.4+ client can get from old MDS is: + * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes + * + * but 2.4 or later MDS will never send reply with llog_cookie to any + * version client. This macro is defined for server side reply buffer size. + */ +#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE + +/** + * This is the size of a maximum REINT_SETXATTR request: + * + * lustre_msg 56 (32 + 4 x 5 + 4) + * ptlrpc_body 184 + * mdt_rec_setxattr 136 + * lustre_capa 120 + * name 256 (XATTR_NAME_MAX) + * value 65536 (XATTR_SIZE_MAX) + */ +#define MDS_EA_MAXREQSIZE 66288 + +/** + * These are the maximum request and reply sizes (rounded up to 1 KB + * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL. + */ +#define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \ + MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10) +#define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE + +/** + * The update request includes all of updates from the create, which might + * include linkea (4K maxim), together with other updates, we set it to 9K: + * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K) + */ +#define OUT_MAXREQSIZE (9 * 1024) +#define OUT_MAXREPSIZE MDS_MAXREPSIZE + +/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */ +#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ + 8 * 1024) + +/** + * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD. + * However, we need to allocate a much larger buffer for it because LNet + * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid + * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a + * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request + * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory + * utilization is very low. + * + * In the meanwhile, size of rqbd can't be too large, because rqbd can't be + * reused until all requests fit in it have been processed and released, + * which means one long blocked request can prevent the rqbd be reused. + * Now we set request buffer size to 160 KB, so even each rqbd is unlinked + * from LNet with unused 65 KB, buffer utilization will be about 59%. + * Please check LU-2432 for details. + */ +#define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ + 160 * 1024) + +/** + * OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is + * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some + * extra bytes to each request buffer to improve buffer utilization rate. + */ +#define OUT_BUFSIZE max(OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ + 24 * 1024) /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */ #define FLD_MAXREQSIZE (160) /** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */ #define FLD_MAXREPSIZE (152) +#define FLD_BUFSIZE (1 << 12) /** * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range + @@ -324,12 +415,13 @@ /** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */ #define SEQ_MAXREPSIZE (152) +#define SEQ_BUFSIZE (1 << 12) /** MGS threads must be >= 3, see bug 22458 comment #28 */ #define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define MGS_NTHRS_MAX 32 -#define MGS_NBUFS (64 * cfs_num_online_cpus()) +#define MGS_NBUFS 64 #define MGS_BUFSIZE (8 * 1024) #define MGS_MAXREQSIZE (7 * 1024) #define MGS_MAXREPSIZE (9 * 1024) @@ -360,7 +452,7 @@ */ /* depress threads factor for VM with small memory size */ #define OSS_THR_FACTOR min_t(int, 8, \ - CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT)) + NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT)) #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define OSS_NTHRS_BASE 64 #define OSS_NTHRS_MAX 512 @@ -371,36 +463,62 @@ #define OSS_CR_NTHRS_BASE 8 #define OSS_CR_NTHRS_MAX 64 -#define OST_NBUFS (64 * cfs_num_online_cpus()) -#define OST_BUFSIZE (8 * 1024) - /** - * OST_MAXREQSIZE ~= 4768 bytes = - * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote + * OST_IO_MAXREQSIZE ~= + * lustre_msg + ptlrpc_body + obdo + obd_ioobj + + * DT_MAX_BRW_PAGES * niobuf_remote * * - single object with 16 pages is 512 bytes - * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover + * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover + * - Must be a multiple of 1024 + * - actual size is about 18K + */ +#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \ + sizeof(struct ptlrpc_body) + \ + sizeof(struct obdo) + \ + sizeof(struct obd_ioobj) + \ + sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES) +/** + * FIEMAP request can be 4K+ for now */ -#define OST_MAXREQSIZE (5 * 1024) -#define OST_MAXREPSIZE (9 * 1024) +#define OST_MAXREQSIZE (16 * 1024) +#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \ + (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1)) + +#define OST_MAXREPSIZE (9 * 1024) +#define OST_IO_MAXREPSIZE OST_MAXREPSIZE + +#define OST_NBUFS 64 +/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */ +#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024) +/** + * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization + * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details. + */ +#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024) /* Macro to hide a typecast. */ #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) +struct ptlrpc_replay_async_args { + int praa_old_state; + int praa_old_status; +}; + /** * Structure to single define portal connection. */ struct ptlrpc_connection { - /** linkage for connections hash table */ - cfs_hlist_node_t c_hash; - /** Our own lnet nid for this connection */ - lnet_nid_t c_self; - /** Remote side nid for this connection */ - lnet_process_id_t c_peer; - /** UUID of the other side */ - struct obd_uuid c_remote_uuid; - /** reference counter for this connection */ - cfs_atomic_t c_refcount; + /** linkage for connections hash table */ + struct hlist_node c_hash; + /** Our own lnet nid for this connection */ + lnet_nid_t c_self; + /** Remote side nid for this connection */ + lnet_process_id_t c_peer; + /** UUID of the other side */ + struct obd_uuid c_remote_uuid; + /** reference counter for this connection */ + atomic_t c_refcount; }; /** Client definition for PortalRPC */ @@ -448,60 +566,61 @@ typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); * returned. */ struct ptlrpc_request_set { - cfs_atomic_t set_refcount; + atomic_t set_refcount; /** number of in queue requests */ - cfs_atomic_t set_new_count; + atomic_t set_new_count; /** number of uncompleted requests */ - cfs_atomic_t set_remaining; + atomic_t set_remaining; /** wait queue to wait on for request events */ - cfs_waitq_t set_waitq; - cfs_waitq_t *set_wakeup_ptr; + wait_queue_head_t set_waitq; + wait_queue_head_t *set_wakeup_ptr; /** List of requests in the set */ - cfs_list_t set_requests; + struct list_head set_requests; /** * List of completion callbacks to be called when the set is completed * This is only used if \a set_interpret is NULL. * Links struct ptlrpc_set_cbdata. */ - cfs_list_t set_cblist; + struct list_head set_cblist; /** Completion callback, if only one. */ - set_interpreter_func set_interpret; + set_interpreter_func set_interpret; /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; + void *set_arg; /** * Lock for \a set_new_requests manipulations * locked so that any old caller can communicate requests to * the set holder who can then fold them into the lock-free set */ - cfs_spinlock_t set_new_req_lock; + spinlock_t set_new_req_lock; /** List of new yet unsent requests. Only used with ptlrpcd now. */ - cfs_list_t set_new_requests; + struct list_head set_new_requests; /** rq_status of requests that have been freed already */ - int set_rc; + int set_rc; /** Additional fields used by the flow control extension */ /** Maximum number of RPCs in flight */ - int set_max_inflight; + int set_max_inflight; /** Callback function used to generate RPCs */ - set_producer_func set_producer; + set_producer_func set_producer; /** opaq argument passed to the producer callback */ - void *set_producer_arg; + void *set_producer_arg; }; /** * Description of a single ptrlrpc_set callback */ struct ptlrpc_set_cbdata { - /** List linkage item */ - cfs_list_t psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; + /** List linkage item */ + struct list_head psc_item; + /** Pointer to interpreting function */ + set_interpreter_func psc_interpret; + /** Opaq argument to pass to the callback */ + void *psc_data; }; struct ptlrpc_bulk_desc; struct ptlrpc_service_part; +struct ptlrpc_service; /** * ptlrpc callback & work item stuff @@ -523,20 +642,20 @@ struct ptlrpc_cb_id { * added to the state for replay/failover consistency guarantees. */ struct ptlrpc_reply_state { - /** Callback description */ - struct ptlrpc_cb_id rs_cb_id; - /** Linkage for list of all reply states in a system */ - cfs_list_t rs_list; - /** Linkage for list of all reply states on same export */ - cfs_list_t rs_exp_list; - /** Linkage for list of all reply states for same obd */ - cfs_list_t rs_obd_list; + /** Callback description */ + struct ptlrpc_cb_id rs_cb_id; + /** Linkage for list of all reply states in a system */ + struct list_head rs_list; + /** Linkage for list of all reply states on same export */ + struct list_head rs_exp_list; + /** Linkage for list of all reply states for same obd */ + struct list_head rs_obd_list; #if RS_DEBUG - cfs_list_t rs_debug_list; + struct list_head rs_debug_list; #endif - /** A spinlock to protect the reply state flags */ - cfs_spinlock_t rs_lock; - /** Reply state flags */ + /** A spinlock to protect the reply state flags */ + spinlock_t rs_lock; + /** Reply state flags */ unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for difficult requests */ @@ -556,16 +675,16 @@ struct ptlrpc_reply_state { __u64 rs_transno; /** xid */ __u64 rs_xid; - struct obd_export *rs_export; + struct obd_export *rs_export; struct ptlrpc_service_part *rs_svcpt; - /** Lnet metadata handle for the reply */ - lnet_handle_md_t rs_md_h; - cfs_atomic_t rs_refcount; - - /** Context for the sevice thread */ - struct ptlrpc_svc_ctx *rs_svc_ctx; - /** Reply buffer (actually sent to the client), encoded if needed */ - struct lustre_msg *rs_repbuf; /* wrapper */ + /** Lnet metadata handle for the reply */ + lnet_handle_md_t rs_md_h; + atomic_t rs_refcount; + + /** Context for the sevice thread */ + struct ptlrpc_svc_ctx *rs_svc_ctx; + /** Reply buffer (actually sent to the client), encoded if needed */ + struct lustre_msg *rs_repbuf; /* wrapper */ /** Size of the reply buffer */ int rs_repbuf_len; /* wrapper buf length */ /** Size of the reply message */ @@ -577,53 +696,1061 @@ struct ptlrpc_reply_state { */ struct lustre_msg *rs_msg; /* reply message */ - /** Number of locks awaiting client ACK */ - int rs_nlocks; - /** Handles of locks awaiting client reply ACK */ - struct lustre_handle rs_locks[RS_MAX_LOCKS]; - /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + /** Number of locks awaiting client ACK */ + int rs_nlocks; + /** Handles of locks awaiting client reply ACK */ + struct lustre_handle rs_locks[RS_MAX_LOCKS]; + /** Lock modes of locks in \a rs_locks */ + ldlm_mode_t rs_modes[RS_MAX_LOCKS]; +}; + +struct ptlrpc_thread; + +/** RPC stages */ +enum rq_phase { + RQ_PHASE_NEW = 0xebc0de00, + RQ_PHASE_RPC = 0xebc0de01, + RQ_PHASE_BULK = 0xebc0de02, + RQ_PHASE_INTERPRET = 0xebc0de03, + RQ_PHASE_COMPLETE = 0xebc0de04, + RQ_PHASE_UNREGISTERING = 0xebc0de05, + RQ_PHASE_UNDEFINED = 0xebc0de06 +}; + +/** Type of request interpreter call-back */ +typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, + struct ptlrpc_request *req, + void *arg, int rc); +/** Type of request resend call-back */ +typedef void (*ptlrpc_resend_cb_t)(struct ptlrpc_request *req, + void *arg); + +/** + * Definition of request pool structure. + * The pool is used to store empty preallocated requests for the case + * when we would actually need to send something without performing + * any allocations (to avoid e.g. OOM). + */ +struct ptlrpc_request_pool { + /** Locks the list */ + spinlock_t prp_lock; + /** list of ptlrpc_request structs */ + struct list_head prp_req_list; + /** Maximum message size that would fit into a rquest from this pool */ + int prp_rq_size; + /** Function to allocate more requests for this pool */ + void (*prp_populate)(struct ptlrpc_request_pool *, int); +}; + +struct lu_context; +struct lu_env; + +struct ldlm_lock; + +/** + * \defgroup nrs Network Request Scheduler + * @{ + */ +struct ptlrpc_nrs_policy; +struct ptlrpc_nrs_resource; +struct ptlrpc_nrs_request; + +/** + * NRS control operations. + * + * These are common for all policies. + */ +enum ptlrpc_nrs_ctl { + /** + * Not a valid opcode. + */ + PTLRPC_NRS_CTL_INVALID, + /** + * Activate the policy. + */ + PTLRPC_NRS_CTL_START, + /** + * Reserved for multiple primary policies, which may be a possibility + * in the future. + */ + PTLRPC_NRS_CTL_STOP, + /** + * Policies can start using opcodes from this value and onwards for + * their own purposes; the assigned value itself is arbitrary. + */ + PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, +}; + +/** + * ORR policy operations + */ +enum nrs_ctl_orr { + NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC, + NRS_CTL_ORR_WR_QUANTUM, + NRS_CTL_ORR_RD_OFF_TYPE, + NRS_CTL_ORR_WR_OFF_TYPE, + NRS_CTL_ORR_RD_SUPP_REQ, + NRS_CTL_ORR_WR_SUPP_REQ, +}; + +/** + * NRS policy operations. + * + * These determine the behaviour of a policy, and are called in response to + * NRS core events. + */ +struct ptlrpc_nrs_pol_ops { + /** + * Called during policy registration; this operation is optional. + * + * \param[in,out] policy The policy being initialized + */ + int (*op_policy_init) (struct ptlrpc_nrs_policy *policy); + /** + * Called during policy unregistration; this operation is optional. + * + * \param[in,out] policy The policy being unregistered/finalized + */ + void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy); + /** + * Called when activating a policy via lprocfs; policies allocate and + * initialize their resources here; this operation is optional. + * + * \param[in,out] policy The policy being started + * \param[in,out] arg A generic char buffer + * + * \see nrs_policy_start_locked() + */ + int (*op_policy_start) (struct ptlrpc_nrs_policy *policy, + char *arg); + /** + * Called when deactivating a policy via lprocfs; policies deallocate + * their resources here; this operation is optional + * + * \param[in,out] policy The policy being stopped + * + * \see nrs_policy_stop0() + */ + void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy); + /** + * Used for policy-specific operations; i.e. not generic ones like + * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous + * to an ioctl; this operation is optional. + * + * \param[in,out] policy The policy carrying out operation \a opc + * \param[in] opc The command operation being carried out + * \param[in,out] arg An generic buffer for communication between the + * user and the control operation + * + * \retval -ve error + * \retval 0 success + * + * \see ptlrpc_nrs_policy_control() + */ + int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy, + enum ptlrpc_nrs_ctl opc, void *arg); + + /** + * Called when obtaining references to the resources of the resource + * hierarchy for a request that has arrived for handling at the PTLRPC + * service. Policies should return -ve for requests they do not wish + * to handle. This operation is mandatory. + * + * \param[in,out] policy The policy we're getting resources for. + * \param[in,out] nrq The request we are getting resources for. + * \param[in] parent The parent resource of the resource being + * requested; set to NULL if none. + * \param[out] resp The resource is to be returned here; the + * fallback policy in an NRS head should + * \e always return a non-NULL pointer value. + * \param[in] moving_req When set, signifies that this is an attempt + * to obtain resources for a request being moved + * to the high-priority NRS head by + * ldlm_lock_reorder_req(). + * This implies two things: + * 1. We are under obd_export::exp_rpc_lock and + * so should not sleep. + * 2. We should not perform non-idempotent or can + * skip performing idempotent operations that + * were carried out when resources were first + * taken for the request when it was initialized + * in ptlrpc_nrs_req_initialize(). + * + * \retval 0, +ve The level of the returned resource in the resource + * hierarchy; currently only 0 (for a non-leaf resource) + * and 1 (for a leaf resource) are supported by the + * framework. + * \retval -ve error + * + * \see ptlrpc_nrs_req_initialize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + int (*op_res_get) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq, + const struct ptlrpc_nrs_resource *parent, + struct ptlrpc_nrs_resource **resp, + bool moving_req); + /** + * Called when releasing references taken for resources in the resource + * hierarchy for the request; this operation is optional. + * + * \param[in,out] policy The policy the resource belongs to + * \param[in] res The resource to be freed + * + * \see ptlrpc_nrs_req_finalize() + * \see ptlrpc_nrs_hpreq_add_nolock() + * \see ptlrpc_nrs_req_hp_move() + */ + void (*op_res_put) (struct ptlrpc_nrs_policy *policy, + const struct ptlrpc_nrs_resource *res); + + /** + * Obtains a request for handling from the policy, and optionally + * removes the request from the policy; this operation is mandatory. + * + * \param[in,out] policy The policy to poll + * \param[in] peek When set, signifies that we just want to + * examine the request, and not handle it, so the + * request is not removed from the policy. + * \param[in] force When set, it will force a policy to return a + * request if it has one queued. + * + * \retval NULL No request available for handling + * \retval valid-pointer The request polled for handling + * + * \see ptlrpc_nrs_req_get_nolock() + */ + struct ptlrpc_nrs_request * + (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek, + bool force); + /** + * Called when attempting to add a request to a policy for later + * handling; this operation is mandatory. + * + * \param[in,out] policy The policy on which to enqueue \a nrq + * \param[in,out] nrq The request to enqueue + * + * \retval 0 success + * \retval != 0 error + * + * \see ptlrpc_nrs_req_add_nolock() + */ + int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Removes a request from the policy's set of pending requests. Normally + * called after a request has been polled successfully from the policy + * for handling; this operation is mandatory. + * + * \param[in,out] policy The policy the request \a nrq belongs to + * \param[in,out] nrq The request to dequeue + * + * \see ptlrpc_nrs_req_del_nolock() + */ + void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Called after the request being carried out. Could be used for + * job/resource control; this operation is optional. + * + * \param[in,out] policy The policy which is stopping to handle request + * \a nrq + * \param[in,out] nrq The request + * + * \pre assert_spin_locked(&svcpt->scp_req_lock) + * + * \see ptlrpc_nrs_req_stop_nolock() + */ + void (*op_req_stop) (struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_request *nrq); + /** + * Registers the policy's lprocfs interface with a PTLRPC service. + * + * \param[in] svc The service + * + * \retval 0 success + * \retval != 0 error + */ + int (*op_lprocfs_init) (struct ptlrpc_service *svc); + /** + * Unegisters the policy's lprocfs interface with a PTLRPC service. + * + * In cases of failed policy registration in + * \e ptlrpc_nrs_policy_register(), this function may be called for a + * service which has not registered the policy successfully, so + * implementations of this method should make sure their operations are + * safe in such cases. + * + * \param[in] svc The service + */ + void (*op_lprocfs_fini) (struct ptlrpc_service *svc); +}; + +/** + * Policy flags + */ +enum nrs_policy_flags { + /** + * Fallback policy, use this flag only on a single supported policy per + * service. The flag cannot be used on policies that use + * \e PTLRPC_NRS_FL_REG_EXTERN + */ + PTLRPC_NRS_FL_FALLBACK = (1 << 0), + /** + * Start policy immediately after registering. + */ + PTLRPC_NRS_FL_REG_START = (1 << 1), + /** + * This is a policy registering from a module different to the one NRS + * core ships in (currently ptlrpc). + */ + PTLRPC_NRS_FL_REG_EXTERN = (1 << 2), +}; + +/** + * NRS queue type. + * + * Denotes whether an NRS instance is for handling normal or high-priority + * RPCs, or whether an operation pertains to one or both of the NRS instances + * in a service. + */ +enum ptlrpc_nrs_queue_type { + PTLRPC_NRS_QUEUE_REG = (1 << 0), + PTLRPC_NRS_QUEUE_HP = (1 << 1), + PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP) +}; + +/** + * NRS head + * + * A PTLRPC service has at least one NRS head instance for handling normal + * priority RPCs, and may optionally have a second NRS head instance for + * handling high-priority RPCs. Each NRS head maintains a list of available + * policies, of which one and only one policy is acting as the fallback policy, + * and optionally a different policy may be acting as the primary policy. For + * all RPCs handled by this NRS head instance, NRS core will first attempt to + * enqueue the RPC using the primary policy (if any). The fallback policy is + * used in the following cases: + * - when there was no primary policy in the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request + * was initialized. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, denoted it did not wish, or for some other reason was + * not able to handle the request, by returning a non-valid NRS resource + * reference. + * - when the primary policy that was at the + * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the + * RPC was initialized, fails later during the request enqueueing stage. + * + * \see nrs_resource_get_safe() + * \see nrs_request_enqueue() + */ +struct ptlrpc_nrs { + spinlock_t nrs_lock; + /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ + /** + * List of registered policies + */ + struct list_head nrs_policy_list; + /** + * List of policies with queued requests. Policies that have any + * outstanding requests are queued here, and this list is queried + * in a round-robin manner from NRS core when obtaining a request + * for handling. This ensures that requests from policies that at some + * point transition away from the + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. + */ + struct list_head nrs_policy_queued; + /** + * Service partition for this NRS head + */ + struct ptlrpc_service_part *nrs_svcpt; + /** + * Primary policy, which is the preferred policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_primary; + /** + * Fallback policy, which is the backup policy for handling RPCs + */ + struct ptlrpc_nrs_policy *nrs_policy_fallback; + /** + * This NRS head handles either HP or regular requests + */ + enum ptlrpc_nrs_queue_type nrs_queue_type; + /** + * # queued requests from all policies in this NRS head + */ + unsigned long nrs_req_queued; + /** + * # scheduled requests from all policies in this NRS head + */ + unsigned long nrs_req_started; + /** + * # policies on this NRS + */ + unsigned nrs_num_pols; + /** + * This NRS head is in progress of starting a policy + */ + unsigned nrs_policy_starting:1; + /** + * In progress of shutting down the whole NRS head; used during + * unregistration + */ + unsigned nrs_stopping:1; + /** + * NRS policy is throttling reqeust + */ + unsigned nrs_throttling:1; +}; + +#define NRS_POL_NAME_MAX 16 + +struct ptlrpc_nrs_pol_desc; + +/** + * Service compatibility predicate; this determines whether a policy is adequate + * for handling RPCs of a particular PTLRPC service. + * + * XXX:This should give the same result during policy registration and + * unregistration, and for all partitions of a service; so the result should not + * depend on temporal service or other properties, that may influence the + * result. + */ +typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc); + +struct ptlrpc_nrs_pol_conf { + /** + * Human-readable policy name + */ + char nc_name[NRS_POL_NAME_MAX]; + /** + * NRS operations for this policy + */ + const struct ptlrpc_nrs_pol_ops *nc_ops; + /** + * Service compatibility predicate + */ + nrs_pol_desc_compat_t nc_compat; + /** + * Set for policies that support a single ptlrpc service, i.e. ones that + * have \a pd_compat set to nrs_policy_compat_one(). The variable value + * depicts the name of the single service that such policies are + * compatible with. + */ + const char *nc_compat_svc_name; + /** + * Owner module for this policy descriptor; policies registering from a + * different module to the one the NRS framework is held within + * (currently ptlrpc), should set this field to THIS_MODULE. + */ + struct module *nc_owner; + /** + * Policy registration flags; a bitmast of \e nrs_policy_flags + */ + unsigned nc_flags; +}; + +/** + * NRS policy registering descriptor + * + * Is used to hold a description of a policy that can be passed to NRS core in + * order to register the policy with NRS heads in different PTLRPC services. + */ +struct ptlrpc_nrs_pol_desc { + /** + * Human-readable policy name + */ + char pd_name[NRS_POL_NAME_MAX]; + /** + * Link into nrs_core::nrs_policies + */ + struct list_head pd_list; + /** + * NRS operations for this policy + */ + const struct ptlrpc_nrs_pol_ops *pd_ops; + /** + * Service compatibility predicate + */ + nrs_pol_desc_compat_t pd_compat; + /** + * Set for policies that are compatible with only one PTLRPC service. + * + * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name + */ + const char *pd_compat_svc_name; + /** + * Owner module for this policy descriptor. + * + * We need to hold a reference to the module whenever we might make use + * of any of the module's contents, i.e. + * - If one or more instances of the policy are at a state where they + * might be handling a request, i.e. + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or + * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to + * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference + * is taken on the module when + * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it + * becomes 0, so that we hold only one reference to the module maximum + * at any time. + * + * We do not need to hold a reference to the module, even though we + * might use code and data from the module, in the following cases: + * - During external policy registration, because this should happen in + * the module's init() function, in which case the module is safe from + * removal because a reference is being held on the module by the + * kernel, and iirc kmod (and I guess module-init-tools also) will + * serialize any racing processes properly anyway. + * - During external policy unregistration, because this should happen + * in a module's exit() function, and any attempts to start a policy + * instance would need to take a reference on the module, and this is + * not possible once we have reached the point where the exit() + * handler is called. + * - During service registration and unregistration, as service setup + * and cleanup, and policy registration, unregistration and policy + * instance starting, are serialized by \e nrs_core::nrs_mutex, so + * as long as users adhere to the convention of registering policies + * in init() and unregistering them in module exit() functions, there + * should not be a race between these operations. + * - During any policy-specific lprocfs operations, because a reference + * is held by the kernel on a proc entry that has been entered by a + * syscall, so as long as proc entries are removed during unregistration time, + * then unregistration and lprocfs operations will be properly + * serialized. + */ + struct module *pd_owner; + /** + * Bitmask of \e nrs_policy_flags + */ + unsigned pd_flags; + /** + * # of references on this descriptor + */ + atomic_t pd_refs; +}; + +/** + * NRS policy state + * + * Policies transition from one state to the other during their lifetime + */ +enum ptlrpc_nrs_pol_state { + /** + * Not a valid policy state. + */ + NRS_POL_STATE_INVALID, + /** + * Policies are at this state either at the start of their life, or + * transition here when the user selects a different policy to act + * as the primary one. + */ + NRS_POL_STATE_STOPPED, + /** + * Policy is progress of stopping + */ + NRS_POL_STATE_STOPPING, + /** + * Policy is in progress of starting + */ + NRS_POL_STATE_STARTING, + /** + * A policy is in this state in two cases: + * - it is the fallback policy, which is always in this state. + * - it has been activated by the user; i.e. it is the primary policy, + */ + NRS_POL_STATE_STARTED, +}; + +/** + * NRS policy information + * + * Used for obtaining information for the status of a policy via lprocfs + */ +struct ptlrpc_nrs_pol_info { + /** + * Policy name + */ + char pi_name[NRS_POL_NAME_MAX]; + /** + * Current policy state + */ + enum ptlrpc_nrs_pol_state pi_state; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pi_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pi_req_started; + /** + * Is this a fallback policy? + */ + unsigned pi_fallback:1; +}; + +/** + * NRS policy + * + * There is one instance of this for each policy in each NRS head of each + * PTLRPC service partition. + */ +struct ptlrpc_nrs_policy { + /** + * Linkage into the NRS head's list of policies, + * ptlrpc_nrs:nrs_policy_list + */ + struct list_head pol_list; + /** + * Linkage into the NRS head's list of policies with enqueued + * requests ptlrpc_nrs:nrs_policy_queued + */ + struct list_head pol_list_queued; + /** + * Current state of this policy + */ + enum ptlrpc_nrs_pol_state pol_state; + /** + * Bitmask of nrs_policy_flags + */ + unsigned pol_flags; + /** + * # RPCs enqueued for later dispatching by the policy + */ + long pol_req_queued; + /** + * # RPCs started for dispatch by the policy + */ + long pol_req_started; + /** + * Usage Reference count taken on the policy instance + */ + long pol_ref; + /** + * The NRS head this policy has been created at + */ + struct ptlrpc_nrs *pol_nrs; + /** + * Private policy data; varies by policy type + */ + void *pol_private; + /** + * Policy descriptor for this policy instance. + */ + struct ptlrpc_nrs_pol_desc *pol_desc; +}; + +/** + * NRS resource + * + * Resources are embedded into two types of NRS entities: + * - Inside NRS policies, in the policy's private data in + * ptlrpc_nrs_policy::pol_private + * - In objects that act as prime-level scheduling entities in different NRS + * policies; e.g. on a policy that performs round robin or similar order + * scheduling across client NIDs, there would be one NRS resource per unique + * client NID. On a policy which performs round robin scheduling across + * backend filesystem objects, there would be one resource associated with + * each of the backend filesystem objects partaking in the scheduling + * performed by the policy. + * + * NRS resources share a parent-child relationship, in which resources embedded + * in policy instances are the parent entities, with all scheduling entities + * a policy schedules across being the children, thus forming a simple resource + * hierarchy. This hierarchy may be extended with one or more levels in the + * future if the ability to have more than one primary policy is added. + * + * Upon request initialization, references to the then active NRS policies are + * taken and used to later handle the dispatching of the request with one of + * these policies. + * + * \see nrs_resource_get_safe() + * \see ptlrpc_nrs_req_add() + */ +struct ptlrpc_nrs_resource { + /** + * This NRS resource's parent; is NULL for resources embedded in NRS + * policy instances; i.e. those are top-level ones. + */ + struct ptlrpc_nrs_resource *res_parent; + /** + * The policy associated with this resource. + */ + struct ptlrpc_nrs_policy *res_policy; +}; + +enum { + NRS_RES_FALLBACK, + NRS_RES_PRIMARY, + NRS_RES_MAX +}; + +/* \name fifo + * + * FIFO policy + * + * This policy is a logical wrapper around previous, non-NRS functionality. + * It dispatches RPCs in the same order as they arrive from the network. This + * policy is currently used as the fallback policy, and the only enabled policy + * on all NRS heads of all PTLRPC service partitions. + * @{ + */ + +/** + * Private data structure for the FIFO policy + */ +struct nrs_fifo_head { + /** + * Resource object for policy instance. + */ + struct ptlrpc_nrs_resource fh_res; + /** + * List of queued requests. + */ + struct list_head fh_list; + /** + * For debugging purposes. + */ + __u64 fh_sequence; +}; + +struct nrs_fifo_req { + struct list_head fr_list; + __u64 fr_sequence; +}; + +/** @} fifo */ + +/** + * \name CRR-N + * + * CRR-N, Client Round Robin over NIDs + * @{ + */ + +/** + * private data structure for CRR-N NRS + */ +struct nrs_crrn_net { + struct ptlrpc_nrs_resource cn_res; + cfs_binheap_t *cn_binheap; + cfs_hash_t *cn_cli_hash; + /** + * Used when a new scheduling round commences, in order to synchronize + * all clients with the new round number. + */ + __u64 cn_round; + /** + * Determines the relevant ordering amongst request batches within a + * scheduling round. + */ + __u64 cn_sequence; + /** + * Round Robin quantum; the maximum number of RPCs that each request + * batch for each client can have in a scheduling round. + */ + __u16 cn_quantum; +}; + +/** + * Object representing a client in CRR-N, as identified by its NID + */ +struct nrs_crrn_client { + struct ptlrpc_nrs_resource cc_res; + struct hlist_node cc_hnode; + lnet_nid_t cc_nid; + /** + * The round number against which this client is currently scheduling + * requests. + */ + __u64 cc_round; + /** + * The sequence number used for requests scheduled by this client during + * the current round number. + */ + __u64 cc_sequence; + atomic_t cc_ref; + /** + * Round Robin quantum; the maximum number of RPCs the client is allowed + * to schedule in a single batch of each round. + */ + __u16 cc_quantum; + /** + * # of pending requests for this client, on all existing rounds + */ + __u16 cc_active; +}; + +/** + * CRR-N NRS request definition + */ +struct nrs_crrn_req { + /** + * Round number for this request; shared with all other requests in the + * same batch. + */ + __u64 cr_round; + /** + * Sequence number for this request; shared with all other requests in + * the same batch. + */ + __u64 cr_sequence; +}; + +/** + * CRR-N policy operations. + */ +enum nrs_ctl_crr { + /** + * Read the RR quantum size of a CRR-N policy. + */ + NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC, + /** + * Write the RR quantum size of a CRR-N policy. + */ + NRS_CTL_CRRN_WR_QUANTUM, +}; + +/** @} CRR-N */ + +/** + * \name ORR/TRR + * + * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies + * @{ + */ + +/** + * Lower and upper byte offsets of a brw RPC + */ +struct nrs_orr_req_range { + __u64 or_start; + __u64 or_end; +}; + +/** + * RPC types supported by the ORR/TRR policies + */ +enum nrs_orr_supp { + NOS_OST_READ = (1 << 0), + NOS_OST_WRITE = (1 << 1), + NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE), + /** + * Default value for policies. + */ + NOS_DFLT = NOS_OST_READ }; -struct ptlrpc_thread; +/** + * As unique keys for grouping RPCs together, we use the object's OST FID for + * the ORR policy, and the OST index for the TRR policy. + * + * XXX: We waste some space for TRR policy instances by using a union, but it + * allows to consolidate some of the code between ORR and TRR, and these + * policies will probably eventually merge into one anyway. + */ +struct nrs_orr_key { + union { + /** object FID for ORR */ + struct lu_fid ok_fid; + /** OST index for TRR */ + __u32 ok_idx; + }; +}; -/** RPC stages */ -enum rq_phase { - RQ_PHASE_NEW = 0xebc0de00, - RQ_PHASE_RPC = 0xebc0de01, - RQ_PHASE_BULK = 0xebc0de02, - RQ_PHASE_INTERPRET = 0xebc0de03, - RQ_PHASE_COMPLETE = 0xebc0de04, - RQ_PHASE_UNREGISTERING = 0xebc0de05, - RQ_PHASE_UNDEFINED = 0xebc0de06 +/** + * The largest base string for unique hash/slab object names is + * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT + * id number, so this _should_ be more than enough for the maximum number of + * CPTs on any system. If it does happen that this statement is incorrect, + * nrs_orr_genobjname() will inevitably yield a non-unique name and cause + * kmem_cache_create() to complain (on Linux), so the erroneous situation + * will hopefully not go unnoticed. + */ +#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3) + +/** + * private data structure for ORR and TRR NRS + */ +struct nrs_orr_data { + struct ptlrpc_nrs_resource od_res; + cfs_binheap_t *od_binheap; + cfs_hash_t *od_obj_hash; + struct kmem_cache *od_cache; + /** + * Used when a new scheduling round commences, in order to synchronize + * all object or OST batches with the new round number. + */ + __u64 od_round; + /** + * Determines the relevant ordering amongst request batches within a + * scheduling round. + */ + __u64 od_sequence; + /** + * RPC types that are currently supported. + */ + enum nrs_orr_supp od_supp; + /** + * Round Robin quantum; the maxium number of RPCs that each request + * batch for each object or OST can have in a scheduling round. + */ + __u16 od_quantum; + /** + * Whether to use physical disk offsets or logical file offsets. + */ + bool od_physical; + /** + * XXX: We need to provide a persistently allocated string to hold + * unique object names for this policy, since in currently supported + * versions of Linux by Lustre, kmem_cache_create() just sets a pointer + * to the name string provided. kstrdup() is used in the version of + * kmeme_cache_create() in current Linux mainline, so we may be able to + * remove this in the future. + */ + char od_objname[NRS_ORR_OBJ_NAME_MAX]; }; -/** Type of request interpreter call-back */ -typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, - struct ptlrpc_request *req, - void *arg, int rc); +/** + * Represents a backend-fs object or OST in the ORR and TRR policies + * respectively + */ +struct nrs_orr_object { + struct ptlrpc_nrs_resource oo_res; + struct hlist_node oo_hnode; + /** + * The round number against which requests are being scheduled for this + * object or OST + */ + __u64 oo_round; + /** + * The sequence number used for requests scheduled for this object or + * OST during the current round number. + */ + __u64 oo_sequence; + /** + * The key of the object or OST for which this structure instance is + * scheduling RPCs + */ + struct nrs_orr_key oo_key; + long oo_ref; + /** + * Round Robin quantum; the maximum number of RPCs that are allowed to + * be scheduled for the object or OST in a single batch of each round. + */ + __u16 oo_quantum; + /** + * # of pending requests for this object or OST, on all existing rounds + */ + __u16 oo_active; +}; /** - * Definition of request pool structure. - * The pool is used to store empty preallocated requests for the case - * when we would actually need to send something without performing - * any allocations (to avoid e.g. OOM). + * ORR/TRR NRS request definition */ -struct ptlrpc_request_pool { - /** Locks the list */ - cfs_spinlock_t prp_lock; - /** list of ptlrpc_request structs */ - cfs_list_t prp_req_list; - /** Maximum message size that would fit into a rquest from this pool */ - int prp_rq_size; - /** Function to allocate more requests for this pool */ - void (*prp_populate)(struct ptlrpc_request_pool *, int); +struct nrs_orr_req { + /** + * The offset range this request covers + */ + struct nrs_orr_req_range or_range; + /** + * Round number for this request; shared with all other requests in the + * same batch. + */ + __u64 or_round; + /** + * Sequence number for this request; shared with all other requests in + * the same batch. + */ + __u64 or_sequence; + /** + * For debugging purposes. + */ + struct nrs_orr_key or_key; + /** + * An ORR policy instance has filled in request information while + * enqueueing the request on the service partition's regular NRS head. + */ + unsigned int or_orr_set:1; + /** + * A TRR policy instance has filled in request information while + * enqueueing the request on the service partition's regular NRS head. + */ + unsigned int or_trr_set:1; + /** + * Request offset ranges have been filled in with logical offset + * values. + */ + unsigned int or_logical_set:1; + /** + * Request offset ranges have been filled in with physical offset + * values. + */ + unsigned int or_physical_set:1; }; -struct lu_context; -struct lu_env; +/** @} ORR/TRR */ -struct ldlm_lock; +#include + +/** + * NRS request + * + * Instances of this object exist embedded within ptlrpc_request; the main + * purpose of this object is to hold references to the request's resources + * for the lifetime of the request, and to hold properties that policies use + * use for determining the request's scheduling priority. + * */ +struct ptlrpc_nrs_request { + /** + * The request's resource hierarchy. + */ + struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; + /** + * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the + * policy that was used to enqueue the request. + * + * \see nrs_request_enqueue() + */ + unsigned nr_res_idx; + unsigned nr_initialized:1; + unsigned nr_enqueued:1; + unsigned nr_started:1; + unsigned nr_finalized:1; + cfs_binheap_node_t nr_node; + + /** + * Policy-specific fields, used for determining a request's scheduling + * priority, and other supporting functionality. + */ + union { + /** + * Fields for the FIFO policy + */ + struct nrs_fifo_req fifo; + /** + * CRR-N request defintion + */ + struct nrs_crrn_req crr; + /** ORR and TRR share the same request definition */ + struct nrs_orr_req orr; + /** + * TBF request definition + */ + struct nrs_tbf_req tbf; + } nr_u; + /** + * Externally-registering policies may want to use this to allocate + * their own request properties. + */ + void *ext; +}; + +/** @} nrs */ /** * Basic request prioritization operations structure. @@ -650,6 +1777,167 @@ struct ptlrpc_hpreq_ops { void (*hpreq_fini)(struct ptlrpc_request *); }; +struct ptlrpc_cli_req { + /** For bulk requests on client only: bulk descriptor */ + struct ptlrpc_bulk_desc *cr_bulk; + /** optional time limit for send attempts */ + cfs_duration_t cr_delay_limit; + /** time request was first queued */ + cfs_time_t cr_queued_time; + /** request sent timeval */ + struct timeval cr_sent_tv; + /** time for request really sent out */ + time_t cr_sent_out; + /** when req reply unlink must finish. */ + time_t cr_reply_deadline; + /** when req bulk unlink must finish. */ + time_t cr_bulk_deadline; + /** Portal to which this request would be sent */ + short cr_req_ptl; + /** Portal where to wait for reply and where reply would be sent */ + short cr_rep_ptl; + /** request resending number */ + unsigned int cr_resend_nr; + /** What was import generation when this request was sent */ + int cr_imp_gen; + enum lustre_imp_state cr_send_state; + /** Per-request waitq introduced by bug 21938 for recovery waiting */ + wait_queue_head_t cr_set_waitq; + /** Link item for request set lists */ + struct list_head cr_set_chain; + /** link to waited ctx */ + struct list_head cr_ctx_chain; + + /** client's half ctx */ + struct ptlrpc_cli_ctx *cr_cli_ctx; + /** Link back to the request set */ + struct ptlrpc_request_set *cr_set; + /** outgoing request MD handle */ + lnet_handle_md_t cr_req_md_h; + /** request-out callback parameter */ + struct ptlrpc_cb_id cr_req_cbid; + /** incoming reply MD handle */ + lnet_handle_md_t cr_reply_md_h; + wait_queue_head_t cr_reply_waitq; + /** reply callback parameter */ + struct ptlrpc_cb_id cr_reply_cbid; + /** Async completion handler, called when reply is received */ + ptlrpc_interpterer_t cr_reply_interp; + /** Resend handler, called when request is resend to update RPC data */ + ptlrpc_resend_cb_t cr_resend_cb; + /** Async completion context */ + union ptlrpc_async_args cr_async_args; + /** Opaq data for replay and commit callbacks. */ + void *cr_cb_data; + /** + * Commit callback, called when request is committed and about to be + * freed. + */ + void (*cr_commit_cb)(struct ptlrpc_request *); + /** Replay callback, called after request is replayed at recovery */ + void (*cr_replay_cb)(struct ptlrpc_request *); +}; + +/** client request member alias */ +/* NB: these alias should NOT be used by any new code, instead they should + * be removed step by step to avoid potential abuse */ +#define rq_bulk rq_cli.cr_bulk +#define rq_delay_limit rq_cli.cr_delay_limit +#define rq_queued_time rq_cli.cr_queued_time +#define rq_sent_tv rq_cli.cr_sent_tv +#define rq_real_sent rq_cli.cr_sent_out +#define rq_reply_deadline rq_cli.cr_reply_deadline +#define rq_bulk_deadline rq_cli.cr_bulk_deadline +#define rq_nr_resend rq_cli.cr_resend_nr +#define rq_request_portal rq_cli.cr_req_ptl +#define rq_reply_portal rq_cli.cr_rep_ptl +#define rq_import_generation rq_cli.cr_imp_gen +#define rq_send_state rq_cli.cr_send_state +#define rq_set_chain rq_cli.cr_set_chain +#define rq_ctx_chain rq_cli.cr_ctx_chain +#define rq_set rq_cli.cr_set +#define rq_set_waitq rq_cli.cr_set_waitq +#define rq_cli_ctx rq_cli.cr_cli_ctx +#define rq_req_md_h rq_cli.cr_req_md_h +#define rq_req_cbid rq_cli.cr_req_cbid +#define rq_reply_md_h rq_cli.cr_reply_md_h +#define rq_reply_waitq rq_cli.cr_reply_waitq +#define rq_reply_cbid rq_cli.cr_reply_cbid +#define rq_interpret_reply rq_cli.cr_reply_interp +#define rq_resend_cb rq_cli.cr_resend_cb +#define rq_async_args rq_cli.cr_async_args +#define rq_cb_data rq_cli.cr_cb_data +#define rq_commit_cb rq_cli.cr_commit_cb +#define rq_replay_cb rq_cli.cr_replay_cb + +struct ptlrpc_srv_req { + /** initial thread servicing this request */ + struct ptlrpc_thread *sr_svc_thread; + /** + * Server side list of incoming unserved requests sorted by arrival + * time. Traversed from time to time to notice about to expire + * requests and sent back "early replies" to clients to let them + * know server is alive and well, just very busy to service their + * requests in time + */ + struct list_head sr_timed_list; + /** server-side per-export list */ + struct list_head sr_exp_list; + /** server-side history, used for debuging purposes. */ + struct list_head sr_hist_list; + /** history sequence # */ + __u64 sr_hist_seq; + /** the index of service's srv_at_array into which request is linked */ + time_t sr_at_index; + /** authed uid */ + uid_t sr_auth_uid; + /** authed uid mapped to */ + uid_t sr_auth_mapped_uid; + /** RPC is generated from what part of Lustre */ + enum lustre_sec_part sr_sp_from; + /** request session context */ + struct lu_context sr_ses; + /** \addtogroup nrs + * @{ + */ + /** stub for NRS request */ + struct ptlrpc_nrs_request sr_nrq; + /** @} nrs */ + /** request arrival time */ + struct timeval sr_arrival_time; + /** server's half ctx */ + struct ptlrpc_svc_ctx *sr_svc_ctx; + /** (server side), pointed directly into req buffer */ + struct ptlrpc_user_desc *sr_user_desc; + /** separated reply state */ + struct ptlrpc_reply_state *sr_reply_state; + /** server-side hp handlers */ + struct ptlrpc_hpreq_ops *sr_ops; + /** incoming request buffer */ + struct ptlrpc_request_buffer_desc *sr_rqbd; +}; + +/** server request member alias */ +/* NB: these alias should NOT be used by any new code, instead they should + * be removed step by step to avoid potential abuse */ +#define rq_svc_thread rq_srv.sr_svc_thread +#define rq_timed_list rq_srv.sr_timed_list +#define rq_exp_list rq_srv.sr_exp_list +#define rq_history_list rq_srv.sr_hist_list +#define rq_history_seq rq_srv.sr_hist_seq +#define rq_at_index rq_srv.sr_at_index +#define rq_auth_uid rq_srv.sr_auth_uid +#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid +#define rq_sp_from rq_srv.sr_sp_from +#define rq_session rq_srv.sr_ses +#define rq_nrq rq_srv.sr_nrq +#define rq_arrival_time rq_srv.sr_arrival_time +#define rq_reply_state rq_srv.sr_reply_state +#define rq_svc_ctx rq_srv.sr_svc_ctx +#define rq_user_desc rq_srv.sr_user_desc +#define rq_ops rq_srv.sr_ops +#define rq_rqbd rq_srv.sr_rqbd + /** * Represents remote procedure call. * @@ -657,39 +1945,21 @@ struct ptlrpc_hpreq_ops { * in Lustre. */ struct ptlrpc_request { - /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; - /** - * Linkage item through which this request is included into - * sending/delayed lists on client and into rqbd list on server - */ - cfs_list_t rq_list; - /** - * Server side list of incoming unserved requests sorted by arrival - * time. Traversed from time to time to notice about to expire - * requests and sent back "early replies" to clients to let them - * know server is alive and well, just very busy to service their - * requests in time - */ - cfs_list_t rq_timed_list; - /** server-side history, used for debuging purposes. */ - cfs_list_t rq_history_list; - /** server-side per-export list */ - cfs_list_t rq_exp_list; - /** server-side hp handlers */ - struct ptlrpc_hpreq_ops *rq_ops; - /** history sequence # */ - __u64 rq_history_seq; - /** the index of service's srv_at_array into which request is linked */ - time_t rq_at_index; - /** Result of request processing */ - int rq_status; - /** Lock to protect request flags and some other important bits, like - * rq_list - */ - cfs_spinlock_t rq_lock; - /** client-side flags are serialized by rq_lock */ - unsigned long rq_intr:1, rq_replied:1, rq_err:1, + /* Request type: one of PTL_RPC_MSG_* */ + int rq_type; + /** Result of request processing */ + int rq_status; + /** + * Linkage item through which this request is included into + * sending/delayed lists on client and into rqbd list on server + */ + struct list_head rq_list; + /** Lock to protect request flags and some other important bits, like + * rq_list + */ + spinlock_t rq_lock; + /** client-side flags are serialized by rq_lock @{ */ + unsigned int rq_intr:1, rq_replied:1, rq_err:1, rq_timedout:1, rq_resend:1, rq_restart:1, /** * when ->rq_replay is set, request is kept by the client even @@ -703,32 +1973,40 @@ struct ptlrpc_request { rq_replay:1, rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, - rq_early:1, rq_must_unlink:1, - rq_fake:1, /* this fake req */ - rq_memalloc:1, /* req originated from "kswapd" */ - /* server-side flags */ - rq_packed_final:1, /* packed final reply */ - rq_hp:1, /* high priority RPC */ - rq_at_linked:1, /* link into service's srv_at_array */ - rq_reply_truncate:1, - rq_committed:1, - /* whether the "rq_set" is a valid one */ - rq_invalid_rqset:1, - rq_generation_set:1; - - enum rq_phase rq_phase; /* one of RQ_PHASE_* */ - enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - cfs_atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcounf for multiple replies */ - - /** initial thread servicing this request */ - struct ptlrpc_thread *rq_svc_thread; - - /** Portal to which this request would be sent */ - int rq_request_portal; /* XXX FIXME bug 249 */ - /** Portal where to wait for reply and where reply would be sent */ - int rq_reply_portal; /* XXX FIXME bug 249 */ + rq_early:1, + rq_req_unlinked:1, /* unlinked request buffer from lnet */ + rq_reply_unlinked:1, /* unlinked reply buffer from lnet */ + rq_memalloc:1, /* req originated from "kswapd" */ + rq_committed:1, + rq_reply_truncated:1, + /** whether the "rq_set" is a valid one */ + rq_invalid_rqset:1, + rq_generation_set:1, + /** do not resend request on -EINPROGRESS */ + rq_no_retry_einprogress:1, + /* allow the req to be sent if the import is in recovery + * status */ + rq_allow_replay:1, + /* bulk request, sent to server, but uncommitted */ + rq_unstable:1; + /** @} */ + + /** server-side flags @{ */ + unsigned int + rq_hp:1, /**< high priority RPC */ + rq_at_linked:1, /**< link into service's srv_at_array */ + rq_packed_final:1; /**< packed final reply */ + /** @} */ + /** one of RQ_PHASE_* */ + enum rq_phase rq_phase; + /** one of RQ_PHASE_* to be used next */ + enum rq_phase rq_next_phase; + /** + * client-side refcount for SENT race, server-side refcounf + * for multiple replies + */ + atomic_t rq_refcount; /** * client-side: * !rq_truncate : # reply bytes actually received, @@ -737,35 +2015,38 @@ struct ptlrpc_request { int rq_nob_received; /** Request length */ int rq_reqlen; - /** Request message - what client sent */ - struct lustre_msg *rq_reqmsg; - /** Reply length */ int rq_replen; + /** Pool if request is from preallocated list */ + struct ptlrpc_request_pool *rq_pool; + /** Request message - what client sent */ + struct lustre_msg *rq_reqmsg; /** Reply message - server response */ struct lustre_msg *rq_repmsg; /** Transaction number */ __u64 rq_transno; /** xid */ __u64 rq_xid; - /** - * List item to for replay list. Not yet commited requests get linked - * there. - * Also see \a rq_replay comment above. - */ - cfs_list_t rq_replay_list; - - /** - * security and encryption data - * @{ */ - struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ - struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ - cfs_list_t rq_ctx_chain; /**< link to waited ctx */ - - struct sptlrpc_flavor rq_flvr; /**< for client & server */ - enum lustre_sec_part rq_sp_from; + /** + * List item to for replay list. Not yet commited requests get linked + * there. + * Also see \a rq_replay comment above. + * It's also link chain on obd_export::exp_req_replay_queue + */ + struct list_head rq_replay_list; + /** non-shared members for client & server request*/ + union { + struct ptlrpc_cli_req rq_cli; + struct ptlrpc_srv_req rq_srv; + }; + /** + * security and encryption data + * @{ */ + /** description of flavors for client & server */ + struct sptlrpc_flavor rq_flvr; - unsigned long /* client/server security flags */ + /* client/server security flags */ + unsigned int rq_ctx_init:1, /* context initiation */ rq_ctx_fini:1, /* context destroy */ rq_bulk_read:1, /* request bulk read */ @@ -781,127 +2062,55 @@ struct ptlrpc_request { rq_pack_bulk:1, /* doesn't expect reply FIXME */ rq_no_reply:1, - rq_pill_init:1; /* pill initialized */ + rq_pill_init:1, /* pill initialized */ + rq_srv_req:1; /* server request */ - uid_t rq_auth_uid; /* authed uid */ - uid_t rq_auth_mapped_uid; /* authed uid mapped to */ - /* (server side), pointed directly into req buffer */ - struct ptlrpc_user_desc *rq_user_desc; - - /** early replies go to offset 0, regular replies go after that */ - unsigned int rq_reply_off; - - /* various buffer pointers */ - struct lustre_msg *rq_reqbuf; /* req wrapper */ + /** various buffer pointers */ + struct lustre_msg *rq_reqbuf; /**< req wrapper */ + char *rq_repbuf; /**< rep buffer */ + struct lustre_msg *rq_repdata; /**< rep wrapper msg */ + /** only in priv mode */ + struct lustre_msg *rq_clrbuf; int rq_reqbuf_len; /* req wrapper buf len */ int rq_reqdata_len; /* req wrapper msg len */ - char *rq_repbuf; /* rep buffer */ int rq_repbuf_len; /* rep buffer len */ - struct lustre_msg *rq_repdata; /* rep wrapper msg */ int rq_repdata_len; /* rep wrapper msg len */ - struct lustre_msg *rq_clrbuf; /* only in priv mode */ int rq_clrbuf_len; /* only in priv mode */ int rq_clrdata_len; /* only in priv mode */ - /** @} */ - - /** Fields that help to see if request and reply were swabbed or not */ - __u32 rq_req_swab_mask; - __u32 rq_rep_swab_mask; - - /** What was import generation when this request was sent */ - int rq_import_generation; - enum lustre_imp_state rq_send_state; - - /** how many early replies (for stats) */ - int rq_early_count; - - /** client+server request */ - lnet_handle_md_t rq_req_md_h; - struct ptlrpc_cb_id rq_req_cbid; - /** optional time limit for send attempts */ - cfs_duration_t rq_delay_limit; - /** time request was first queued */ - cfs_time_t rq_queued_time; - - /* server-side... */ - /** request arrival time */ - struct timeval rq_arrival_time; - /** separated reply state */ - struct ptlrpc_reply_state *rq_reply_state; - /** incoming request buffer */ - struct ptlrpc_request_buffer_desc *rq_rqbd; - - /** client-only incoming reply */ - lnet_handle_md_t rq_reply_md_h; - cfs_waitq_t rq_reply_waitq; - struct ptlrpc_cb_id rq_reply_cbid; - - /** our LNet NID */ - lnet_nid_t rq_self; - /** Peer description (the other side) */ - lnet_process_id_t rq_peer; - /** Server-side, export on which request was received */ - struct obd_export *rq_export; - /** Client side, import where request is being sent */ - struct obd_import *rq_import; - - /** Replay callback, called after request is replayed at recovery */ - void (*rq_replay_cb)(struct ptlrpc_request *); - /** - * Commit callback, called when request is committed and about to be - * freed. - */ - void (*rq_commit_cb)(struct ptlrpc_request *); - /** Opaq data for replay and commit callbacks. */ - void *rq_cb_data; + /** early replies go to offset 0, regular replies go after that */ + unsigned int rq_reply_off; - /** For bulk requests on client only: bulk descriptor */ - struct ptlrpc_bulk_desc *rq_bulk; + /** @} */ - /** client outgoing req */ - /** - * when request/reply sent (secs), or time when request should be sent - */ - time_t rq_sent; - /** time for request really sent out */ - time_t rq_real_sent; - - /** when request must finish. volatile - * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ - volatile time_t rq_deadline; - /** when req reply unlink must finish. */ - time_t rq_reply_deadline; - /** when req bulk unlink must finish. */ - time_t rq_bulk_deadline; - /** - * service time estimate (secs) - * If the requestsis not served by this time, it is marked as timed out. - */ - int rq_timeout; - - /** Multi-rpc bits */ - /** Link item for request set lists */ - cfs_list_t rq_set_chain; - /** Per-request waitq introduced by bug 21938 for recovery waiting */ - cfs_waitq_t rq_set_waitq; - /** Link back to the request set */ - struct ptlrpc_request_set *rq_set; - /** Async completion handler, called when reply is received */ - ptlrpc_interpterer_t rq_interpret_reply; - /** Async completion context */ - union ptlrpc_async_args rq_async_args; - - /** Pool if request is from preallocated list */ - struct ptlrpc_request_pool *rq_pool; - - struct lu_context rq_session; - struct lu_context rq_recov_session; - - /** request format description */ - struct req_capsule rq_pill; + /** Fields that help to see if request and reply were swabbed or not */ + __u32 rq_req_swab_mask; + __u32 rq_rep_swab_mask; + + /** how many early replies (for stats) */ + int rq_early_count; + /** Server-side, export on which request was received */ + struct obd_export *rq_export; + /** import where request is being sent */ + struct obd_import *rq_import; + /** our LNet NID */ + lnet_nid_t rq_self; + /** Peer description (the other side) */ + lnet_process_id_t rq_peer; + /** + * service time estimate (secs) + * If the request is not served by this time, it is marked as timed out. + */ + int rq_timeout; + /** + * when request/reply sent (secs), or time when request should be sent + */ + time_t rq_sent; + /** when request must finish. */ + time_t rq_deadline; + /** request format description */ + struct req_capsule rq_pill; }; /** @@ -920,10 +2129,39 @@ static inline int ptlrpc_req_interpret(const struct lu_env *env, return rc; } +/** \addtogroup nrs + * @{ + */ +int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf); +int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf); +void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req); +void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, + struct ptlrpc_nrs_pol_info *info); + +/* + * Can the request be moved from the regular NRS head to the high-priority NRS + * head (of the same PTLRPC service partition), if any? + * + * For a reliable result, this should be checked under svcpt->scp_req lock. + */ +static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) +{ + struct ptlrpc_nrs_request *nrq = &req->rq_nrq; + + /** + * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the + * request has been enqueued first, and ptlrpc_nrs_request::nr_started + * to make sure it has not been scheduled yet (analogous to previous + * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list). + */ + return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp; +} +/** @} nrs */ + /** * Returns 1 if request buffer at offset \a index was already swabbed */ -static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index) +static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index) { LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); return req->rq_req_swab_mask & (1 << index); @@ -932,7 +2170,7 @@ static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index) /** * Returns 1 if request reply buffer at offset \a index was already swabbed */ -static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index) +static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) { LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); return req->rq_rep_swab_mask & (1 << index); @@ -957,7 +2195,8 @@ static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req) /** * Mark request buffer at offset \a index that it was already swabbed */ -static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index) +static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, + size_t index) { LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); LASSERT((req->rq_req_swab_mask & (1 << index)) == 0); @@ -967,7 +2206,8 @@ static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index) /** * Mark request reply buffer at offset \a index that it was already swabbed */ -static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index) +static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, + size_t index) { LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0); @@ -1011,7 +2251,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req) /** * Debugging functions and helpers to print request structure into debug log * @{ - */ + */ /* Spare the preprocessor, spoil the bugs. */ #define FLAG(field, str) (field ? str : "") @@ -1068,16 +2308,16 @@ do { \ * Structure that defines a single page of a bulk transfer */ struct ptlrpc_bulk_page { - /** Linkage to list of pages in a bulk */ - cfs_list_t bp_link; - /** - * Number of bytes in a page to transfer starting from \a bp_pageoffset - */ - int bp_buflen; - /** offset within a page */ - int bp_pageoffset; - /** The page itself */ - struct page *bp_page; + /** Linkage to list of pages in a bulk */ + struct list_head bp_link; + /** + * Number of bytes in a page to transfer starting from \a bp_pageoffset + */ + int bp_buflen; + /** offset within a page */ + int bp_pageoffset; + /** The page itself */ + struct page *bp_page; }; #define BULK_GET_SOURCE 0 @@ -1086,7 +2326,7 @@ struct ptlrpc_bulk_page { #define BULK_PUT_SOURCE 3 /** - * Definition of buk descriptor. + * Definition of bulk descriptor. * Bulks are special "Two phase" RPCs where initial request message * is sent first and it is followed bt a transfer (o receiving) of a large * amount of data to be settled into pages referenced from the bulk descriptors. @@ -1096,48 +2336,45 @@ struct ptlrpc_bulk_page { * Another user is readpage for MDT. */ struct ptlrpc_bulk_desc { - /** completed successfully */ - unsigned long bd_success:1; - /** accessible to the network (network io potentially in progress) */ - unsigned long bd_network_rw:1; - /** {put,get}{source,sink} */ - unsigned long bd_type:2; - /** client side */ - unsigned long bd_registered:1; - /** For serialization with callback */ - cfs_spinlock_t bd_lock; - /** Import generation when request for this bulk was sent */ - int bd_import_generation; - /** Server side - export this bulk created for */ - struct obd_export *bd_export; - /** Client side - import this bulk was sent on */ - struct obd_import *bd_import; - /** LNet portal for this bulk */ - __u32 bd_portal; - /** Back pointer to the request */ - struct ptlrpc_request *bd_req; - cfs_waitq_t bd_waitq; /* server side only WQ */ - int bd_iov_count; /* # entries in bd_iov */ - int bd_max_iov; /* allocated size of bd_iov */ - int bd_nob; /* # bytes covered */ - int bd_nob_transferred; /* # bytes GOT/PUT */ - - __u64 bd_last_xid; - - struct ptlrpc_cb_id bd_cbid; /* network callback info */ - lnet_handle_md_t bd_md_h; /* associated MD */ - lnet_nid_t bd_sender; /* stash event::sender */ - -#if defined(__KERNEL__) - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - lnet_kiov_t *bd_enc_iov; + /** completed with failure */ + unsigned long bd_failure:1; + /** {put,get}{source,sink} */ + unsigned long bd_type:2; + /** client side */ + unsigned long bd_registered:1; + /** For serialization with callback */ + spinlock_t bd_lock; + /** Import generation when request for this bulk was sent */ + int bd_import_generation; + /** LNet portal for this bulk */ + __u32 bd_portal; + /** Server side - export this bulk created for */ + struct obd_export *bd_export; + /** Client side - import this bulk was sent on */ + struct obd_import *bd_import; + /** Back pointer to the request */ + struct ptlrpc_request *bd_req; + wait_queue_head_t bd_waitq; /* server side only WQ */ + int bd_iov_count; /* # entries in bd_iov */ + int bd_max_iov; /* allocated size of bd_iov */ + int bd_nob; /* # bytes covered */ + int bd_nob_transferred; /* # bytes GOT/PUT */ + + __u64 bd_last_xid; + + struct ptlrpc_cb_id bd_cbid; /* network callback info */ + lnet_nid_t bd_sender; /* stash event::sender */ + int bd_md_count; /* # valid entries in bd_mds */ + int bd_md_max_brw; /* max entries in bd_mds */ + /** array of associated MDs */ + lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; - lnet_kiov_t bd_iov[0]; -#else - lnet_md_iovec_t bd_iov[0]; -#endif + /* + * encrypt iov, size is either 0 or bd_iov_count. + */ + lnet_kiov_t *bd_enc_iov; + + lnet_kiov_t bd_iov[0]; }; enum { @@ -1157,7 +2394,7 @@ struct ptlrpc_thread { /** * List of active threads in svc->srv_threads */ - cfs_list_t t_link; + struct list_head t_link; /** * thread-private data (preallocated memory) */ @@ -1170,7 +2407,7 @@ struct ptlrpc_thread { /** * service thread pid */ - pid_t t_pid; + pid_t t_pid; /** * put watchdog in the structure per thread b=14840 */ @@ -1179,7 +2416,7 @@ struct ptlrpc_thread { * the svc this thread belonged to b=18582 */ struct ptlrpc_service_part *t_svcpt; - cfs_waitq_t t_ctl_waitq; + wait_queue_head_t t_ctl_waitq; struct lu_env *t_env; char t_name[PTLRPC_THR_NAME_LEN]; }; @@ -1252,23 +2489,23 @@ static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread, * More than one request can fit into the buffer. */ struct ptlrpc_request_buffer_desc { - /** Link item for rqbds on a service */ - cfs_list_t rqbd_list; - /** History of requests for this buffer */ - cfs_list_t rqbd_reqs; - /** Back pointer to service for which this buffer is registered */ - struct ptlrpc_service_part *rqbd_svcpt; - /** LNet descriptor */ - lnet_handle_md_t rqbd_md_h; - int rqbd_refcount; - /** The buffer itself */ - char *rqbd_buffer; - struct ptlrpc_cb_id rqbd_cbid; - /** - * This "embedded" request structure is only used for the - * last request to fit into the buffer - */ - struct ptlrpc_request rqbd_req; + /** Link item for rqbds on a service */ + struct list_head rqbd_list; + /** History of requests for this buffer */ + struct list_head rqbd_reqs; + /** Back pointer to service for which this buffer is registered */ + struct ptlrpc_service_part *rqbd_svcpt; + /** LNet descriptor */ + lnet_handle_md_t rqbd_md_h; + int rqbd_refcount; + /** The buffer itself */ + char *rqbd_buffer; + struct ptlrpc_cb_id rqbd_cbid; + /** + * This "embedded" request structure is only used for the + * last request to fit into the buffer + */ + struct ptlrpc_request rqbd_req; }; typedef int (*svc_handler_t)(struct ptlrpc_request *req); @@ -1318,10 +2555,10 @@ struct ptlrpc_service_ops { */ struct ptlrpc_service { /** serialize /proc operations */ - cfs_spinlock_t srv_lock; - /** most often accessed fields */ - /** chain thru all services */ - cfs_list_t srv_list; + spinlock_t srv_lock; + /** most often accessed fields */ + /** chain thru all services */ + struct list_head srv_list; /** service operations table */ struct ptlrpc_service_ops srv_ops; /** only statically allocated strings here; we don't clean them */ @@ -1329,13 +2566,13 @@ struct ptlrpc_service { /** only statically allocated strings here; we don't clean them */ char *srv_thread_name; /** service thread list */ - cfs_list_t srv_threads; + struct list_head srv_threads; /** threads # should be created for each partition on initializing */ int srv_nthrs_cpt_init; /** limit of threads number for each partition */ int srv_nthrs_cpt_limit; /** Root of /proc dir tree for this service */ - cfs_proc_dir_entry_t *srv_procroot; + struct proc_dir_entry *srv_procroot; /** Pointer to statistic data for this service */ struct lprocfs_stats *srv_stats; /** # hp per lp reqs to handle */ @@ -1410,38 +2647,40 @@ struct ptlrpc_service_part { /** # running threads */ int scp_nthrs_running; /** service threads list */ - cfs_list_t scp_threads; + struct list_head scp_threads; /** * serialize the following fields, used for protecting * rqbd list and incoming requests waiting for preprocess, * threads starting & stopping are also protected by this lock. */ - cfs_spinlock_t scp_lock __cfs_cacheline_aligned; + spinlock_t scp_lock __cfs_cacheline_aligned; /** total # req buffer descs allocated */ int scp_nrqbds_total; /** # posted request buffers for receiving */ int scp_nrqbds_posted; + /** in progress of allocating rqbd */ + int scp_rqbd_allocating; /** # incoming reqs */ int scp_nreqs_incoming; /** request buffers to be reposted */ - cfs_list_t scp_rqbd_idle; + struct list_head scp_rqbd_idle; /** req buffers receiving */ - cfs_list_t scp_rqbd_posted; + struct list_head scp_rqbd_posted; /** incoming reqs */ - cfs_list_t scp_req_incoming; + struct list_head scp_req_incoming; /** timeout before re-posting reqs, in tick */ cfs_duration_t scp_rqbd_timeout; /** * all threads sleep on this. This wait-queue is signalled when new * incoming request arrives and when difficult reply has to be handled. */ - cfs_waitq_t scp_waitq; + wait_queue_head_t scp_waitq; /** request history */ - cfs_list_t scp_hist_reqs; + struct list_head scp_hist_reqs; /** request buffer history */ - cfs_list_t scp_hist_rqbds; + struct list_head scp_hist_rqbds; /** # request buffers in history */ int scp_hist_nrqbds; /** sequence number for request */ @@ -1453,12 +2692,8 @@ struct ptlrpc_service_part { * serialize the following fields, used for processing requests * sent to this portal */ - cfs_spinlock_t scp_req_lock __cfs_cacheline_aligned; - /** # reqs in either of the queues below */ - /** reqs waiting for service */ - cfs_list_t scp_req_pending; - /** high priority queue */ - cfs_list_t scp_hreq_pending; + spinlock_t scp_req_lock __cfs_cacheline_aligned; + /** # reqs in either of the NRS heads below */ /** # reqs being served */ int scp_nreqs_active; /** # HPreqs being served */ @@ -1466,19 +2701,25 @@ struct ptlrpc_service_part { /** # hp requests handled */ int scp_hreq_count; + /** NRS head for regular requests */ + struct ptlrpc_nrs scp_nrs_reg; + /** NRS head for HP requests; this is only valid for services that can + * handle HP requests */ + struct ptlrpc_nrs *scp_nrs_hp; + /** AT stuff */ /** @{ */ /** * serialize the following fields, used for changes on * adaptive timeout */ - cfs_spinlock_t scp_at_lock __cfs_cacheline_aligned; + spinlock_t scp_at_lock __cfs_cacheline_aligned; /** estimated rpc service time */ struct adaptive_timeout scp_at_estimate; /** reqs waiting for replies */ struct ptlrpc_at_array scp_at_array; /** early reply timer */ - cfs_timer_t scp_at_timer; + struct timer_list scp_at_timer; /** debug */ cfs_time_t scp_at_checktime; /** check early replies */ @@ -1489,19 +2730,15 @@ struct ptlrpc_service_part { * serialize the following fields, used for processing * replies for this portal */ - cfs_spinlock_t scp_rep_lock __cfs_cacheline_aligned; + spinlock_t scp_rep_lock __cfs_cacheline_aligned; /** all the active replies */ - cfs_list_t scp_rep_active; -#ifndef __KERNEL__ - /** replies waiting for service */ - cfs_list_t scp_rep_queue; -#endif + struct list_head scp_rep_active; /** List of free reply_states */ - cfs_list_t scp_rep_idle; + struct list_head scp_rep_idle; /** waitq to run, when adding stuff to srv_free_rs_list */ - cfs_waitq_t scp_rep_waitq; + wait_queue_head_t scp_rep_waitq; /** # 'difficult' replies */ - cfs_atomic_t scp_nreps_difficult; + atomic_t scp_nreps_difficult; }; #define ptlrpc_service_for_each_part(part, i, svc) \ @@ -1514,28 +2751,28 @@ struct ptlrpc_service_part { * Declaration of ptlrpcd control structure */ struct ptlrpcd_ctl { - /** - * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE) - */ - unsigned long pc_flags; - /** - * Thread lock protecting structure fields. - */ - cfs_spinlock_t pc_lock; - /** - * Start completion. - */ - cfs_completion_t pc_starting; - /** - * Stop completion. - */ - cfs_completion_t pc_finishing; + /** + * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE) + */ + unsigned long pc_flags; + /** + * Thread lock protecting structure fields. + */ + spinlock_t pc_lock; + /** + * Start completion. + */ + struct completion pc_starting; + /** + * Stop completion. + */ + struct completion pc_finishing; /** * Thread requests set. */ struct ptlrpc_request_set *pc_set; /** - * Thread name used in cfs_daemonize() + * Thread name used in kthread_run() */ char pc_name[16]; /** @@ -1558,25 +2795,6 @@ struct ptlrpcd_ctl { * Record the partner index to be processed next. */ int pc_cursor; -#ifndef __KERNEL__ - /** - * Async rpcs flag to make sure that ptlrpcd_check() is called only - * once. - */ - int pc_recurred; - /** - * Currently not used. - */ - void *pc_callback; - /** - * User-space async rpcs callback. - */ - void *pc_wait_callback; - /** - * User-space check idle rpcs callback. - */ - void *pc_idle_callback; -#endif }; /* Bits for pc_flags */ @@ -1605,6 +2823,47 @@ enum ptlrpcd_ctl_flags { LIOD_BIND = 1 << 4, }; +/** + * \addtogroup nrs + * @{ + * + * Service compatibility function; the policy is compatible with all services. + * + * \param[in] svc The service the policy is attempting to register with. + * \param[in] desc The policy descriptor + * + * \retval true The policy is compatible with the service + * + * \see ptlrpc_nrs_pol_desc::pd_compat() + */ +static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc) +{ + return true; +} + +/** + * Service compatibility function; the policy is compatible with only a specific + * service which is identified by its human-readable name at + * ptlrpc_service::srv_name. + * + * \param[in] svc The service the policy is attempting to register with. + * \param[in] desc The policy descriptor + * + * \retval false The policy is not compatible with the service + * \retval true The policy is compatible with the service + * + * \see ptlrpc_nrs_pol_desc::pd_compat() + */ +static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, + const struct ptlrpc_nrs_pol_desc *desc) +{ + LASSERT(desc->pd_compat_svc_name != NULL); + return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; +} + +/** @} nrs */ + /* ptlrpc/events.c */ extern lnet_handle_eq_t ptlrpc_eq_h; extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, @@ -1641,20 +2900,21 @@ extern lnet_pid_t ptl_get_pid(void); */ #ifdef HAVE_SERVER_SUPPORT struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req, - int npages, int type, int portal); + unsigned npages, unsigned max_brw, + unsigned type, unsigned portal); int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc); void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc); static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc) { - int rc; + int rc; - LASSERT(desc != NULL); + LASSERT(desc != NULL); - cfs_spin_lock(&desc->bd_lock); - rc = desc->bd_network_rw; - cfs_spin_unlock(&desc->bd_lock); - return rc; + spin_lock(&desc->bd_lock); + rc = desc->bd_md_count; + spin_unlock(&desc->bd_lock); + return rc; } #endif @@ -1663,10 +2923,11 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async); static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) { - struct ptlrpc_bulk_desc *desc = req->rq_bulk; + struct ptlrpc_bulk_desc *desc; int rc; LASSERT(req != NULL); + desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && req->rq_bulk_deadline > cfs_time_current_sec()) @@ -1675,10 +2936,10 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) if (!desc) return 0; - cfs_spin_lock(&desc->bd_lock); - rc = desc->bd_network_rw; - cfs_spin_unlock(&desc->bd_lock); - return rc; + spin_lock(&desc->bd_lock); + rc = desc->bd_md_count; + spin_unlock(&desc->bd_lock); + return rc; } #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01 @@ -1687,7 +2948,6 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags); int ptlrpc_reply(struct ptlrpc_request *req); int ptlrpc_send_error(struct ptlrpc_request *req, int difficult); int ptlrpc_error(struct ptlrpc_request *req); -void ptlrpc_resend_req(struct ptlrpc_request *request); int ptlrpc_at_get_net_latency(struct ptlrpc_request *req); int ptl_send_rpc(struct ptlrpc_request *request, int noreply); int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); @@ -1699,6 +2959,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); * request queues, request management, etc. * @{ */ +void ptlrpc_request_committed(struct ptlrpc_request *req, int force); + void ptlrpc_init_client(int req_portal, int rep_portal, char *name, struct ptlrpc_client *); void ptlrpc_cleanup_client(struct obd_import *imp); @@ -1706,7 +2968,6 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid); int ptlrpc_queue_wait(struct ptlrpc_request *req); int ptlrpc_replay_req(struct ptlrpc_request *req); -int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async); void ptlrpc_restart_req(struct ptlrpc_request *req); void ptlrpc_abort_inflight(struct obd_import *imp); void ptlrpc_cleanup_imp(struct obd_import *imp); @@ -1717,16 +2978,11 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, void *arg); int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, set_interpreter_func fn, void *data); -int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set); int ptlrpc_set_wait(struct ptlrpc_request_set *); -int ptlrpc_expired_set(void *data); -void ptlrpc_interrupted_set(void *data); void ptlrpc_mark_interrupted(struct ptlrpc_request *req); void ptlrpc_set_destroy(struct ptlrpc_request_set *); void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); -void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, - struct ptlrpc_request *req); void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool); void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); @@ -1750,11 +3006,6 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); -struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp, - unsigned int timeout, - ptlrpc_interpterer_t interpreter); -void ptlrpc_fakereq_finished(struct ptlrpc_request *req); - struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count, __u32 *lengths, char **bufs); @@ -1766,10 +3017,33 @@ void ptlrpc_req_finished(struct ptlrpc_request *request); void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request); struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - int npages, int type, int portal); -void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); -void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len); + unsigned npages, unsigned max_brw, + unsigned type, unsigned portal); +void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin); +static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk) +{ + __ptlrpc_free_bulk(bulk, 1); +} +static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) +{ + __ptlrpc_free_bulk(bulk, 0); +} +void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, int len, int); +static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, + int len) +{ + __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); +} + +static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, + int len) +{ + __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); +} + void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp); __u64 ptlrpc_next_xid(void); @@ -1784,7 +3058,7 @@ int ptlrpcd_queue_work(void *handler); /** @} */ struct ptlrpc_service_buf_conf { - /* nbufs is how many buffers to post */ + /* nbufs is buffers # to allocate when growing the pool */ unsigned int bc_nbufs; /* buffer size to post */ unsigned int bc_buf_size; @@ -1856,6 +3130,7 @@ void ptlrpc_save_lock(struct ptlrpc_request *req, void ptlrpc_commit_replies(struct obd_export *exp); void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); +int ptlrpc_hpreq_handler(struct ptlrpc_request *req); struct ptlrpc_service *ptlrpc_register_service( struct ptlrpc_service_conf *conf, struct proc_dir_entry *proc_entry); @@ -1866,16 +3141,13 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service); int liblustre_check_services(void *arg); void ptlrpc_daemonize(char *name); int ptlrpc_service_health_check(struct ptlrpc_service *); -void ptlrpc_hpreq_reorder(struct ptlrpc_request *req); void ptlrpc_server_drop_request(struct ptlrpc_request *req); +void ptlrpc_request_change_export(struct ptlrpc_request *req, + struct obd_export *export); +void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay); -#ifdef __KERNEL__ int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); -#else -# define ptlrpc_hr_init() (0) -# define ptlrpc_hr_fini() do {} while(0) -#endif /** @} */ @@ -1896,14 +3168,14 @@ int ptlrpc_reconnect_import(struct obd_import *imp); /** @} */ /** - * ptlrpc msg buffer and swab interface + * ptlrpc msg buffer and swab interface * * @{ */ int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, - int index); + __u32 index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + __u32 index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -1923,32 +3195,32 @@ int lustre_shrink_msg(struct lustre_msg *msg, int segment, unsigned int newlen, int move_data); void lustre_free_reply_state(struct ptlrpc_reply_state *rs); int __lustre_unpack_msg(struct lustre_msg *m, int len); -int lustre_msg_hdr_size(__u32 magic, int count); -int lustre_msg_size(__u32 magic, int count, __u32 *lengths); -int lustre_msg_size_v2(int count, __u32 *lengths); -int lustre_packed_msg_size(struct lustre_msg *msg); -int lustre_msg_early_size(void); -void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size); -void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen); -int lustre_msg_buflen(struct lustre_msg *m, int n); -void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len); -int lustre_msg_bufcount(struct lustre_msg *m); -char *lustre_msg_string(struct lustre_msg *m, int n, int max_len); +__u32 lustre_msg_hdr_size(__u32 magic, __u32 count); +__u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths); +__u32 lustre_msg_size_v2(int count, __u32 *lengths); +__u32 lustre_packed_msg_size(struct lustre_msg *msg); +__u32 lustre_msg_early_size(void); +void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size); +void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 minlen); +__u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n); +void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len); +__u32 lustre_msg_bufcount(struct lustre_msg *m); +char *lustre_msg_string(struct lustre_msg *m, __u32 n, __u32 max_len); __u32 lustre_msghdr_get_flags(struct lustre_msg *msg); void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_flags(struct lustre_msg *msg); -void lustre_msg_add_flags(struct lustre_msg *msg, int flags); -void lustre_msg_set_flags(struct lustre_msg *msg, int flags); -void lustre_msg_clear_flags(struct lustre_msg *msg, int flags); +void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags); +void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags); +void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_op_flags(struct lustre_msg *msg); -void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags); -void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags); +void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags); struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg); __u32 lustre_msg_get_type(struct lustre_msg *msg); __u32 lustre_msg_get_version(struct lustre_msg *msg); -void lustre_msg_add_version(struct lustre_msg *msg, int version); +void lustre_msg_add_version(struct lustre_msg *msg, __u32 version); __u32 lustre_msg_get_opc(struct lustre_msg *msg); __u64 lustre_msg_get_last_xid(struct lustre_msg *msg); +__u16 lustre_msg_get_tag(struct lustre_msg *msg); __u64 lustre_msg_get_last_committed(struct lustre_msg *msg); __u64 *lustre_msg_get_versions(struct lustre_msg *msg); __u64 lustre_msg_get_transno(struct lustre_msg *msg); @@ -1958,22 +3230,21 @@ void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv); void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit); int lustre_msg_get_status(struct lustre_msg *msg); __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg); -int lustre_msg_is_v1(struct lustre_msg *msg); __u32 lustre_msg_get_magic(struct lustre_msg *msg); __u32 lustre_msg_get_timeout(struct lustre_msg *msg); __u32 lustre_msg_get_service_time(struct lustre_msg *msg); char *lustre_msg_get_jobid(struct lustre_msg *msg); __u32 lustre_msg_get_cksum(struct lustre_msg *msg); -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0) +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 53, 0) __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18); #else -# warning "remove checksum compatibility support for b1_8" __u32 lustre_msg_calc_cksum(struct lustre_msg *msg); #endif void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle); void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid); +void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag); void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed); void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions); void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); @@ -1995,34 +3266,66 @@ lustre_shrink_reply(struct ptlrpc_request *req, int segment, req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment, newlen, move_data); } + +#ifdef LUSTRE_TRANSLATE_ERRNOS + +static inline int ptlrpc_status_hton(int h) +{ + /* + * Positive errnos must be network errnos, such as LUSTRE_EDEADLK, + * ELDLM_LOCK_ABORTED, etc. + */ + if (h < 0) + return -lustre_errno_hton(-h); + else + return h; +} + +static inline int ptlrpc_status_ntoh(int n) +{ + /* + * See the comment in ptlrpc_status_hton(). + */ + if (n < 0) + return -lustre_errno_ntoh(-n); + else + return n; +} + +#else + +#define ptlrpc_status_hton(h) (h) +#define ptlrpc_status_ntoh(n) (n) + +#endif /** @} */ /** Change request phase of \a req to \a new_phase */ static inline void ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase) { - if (req->rq_phase == new_phase) - return; + if (req->rq_phase == new_phase) + return; - if (new_phase == RQ_PHASE_UNREGISTERING) { - req->rq_next_phase = req->rq_phase; - if (req->rq_import) - cfs_atomic_inc(&req->rq_import->imp_unregistering); - } + if (new_phase == RQ_PHASE_UNREGISTERING) { + req->rq_next_phase = req->rq_phase; + if (req->rq_import) + atomic_inc(&req->rq_import->imp_unregistering); + } - if (req->rq_phase == RQ_PHASE_UNREGISTERING) { - if (req->rq_import) - cfs_atomic_dec(&req->rq_import->imp_unregistering); - } + if (req->rq_phase == RQ_PHASE_UNREGISTERING) { + if (req->rq_import) + atomic_dec(&req->rq_import->imp_unregistering); + } - DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"", - ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase)); + DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"", + ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase)); - req->rq_phase = new_phase; + req->rq_phase = new_phase; } /** - * Returns true if request \a req got early reply and hard deadline is not met + * Returns true if request \a req got early reply and hard deadline is not met */ static inline int ptlrpc_client_early(struct ptlrpc_request *req) @@ -2058,41 +3361,42 @@ ptlrpc_client_recv(struct ptlrpc_request *req) static inline int ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) { - int rc; - - cfs_spin_lock(&req->rq_lock); - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) { - cfs_spin_unlock(&req->rq_lock); - return 1; - } - rc = req->rq_receiving_reply || req->rq_must_unlink; - cfs_spin_unlock(&req->rq_lock); - return rc; + int rc; + + spin_lock(&req->rq_lock); + if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && + req->rq_reply_deadline > cfs_time_current_sec()) { + spin_unlock(&req->rq_lock); + return 1; + } + rc = !req->rq_req_unlinked || !req->rq_reply_unlinked || + req->rq_receiving_reply; + spin_unlock(&req->rq_lock); + return rc; } static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) - cfs_waitq_signal(&req->rq_reply_waitq); - else - cfs_waitq_signal(&req->rq_set->set_waitq); + if (req->rq_set == NULL) + wake_up(&req->rq_reply_waitq); + else + wake_up(&req->rq_set->set_waitq); } static inline void ptlrpc_rs_addref(struct ptlrpc_reply_state *rs) { - LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0); - cfs_atomic_inc(&rs->rs_refcount); + LASSERT(atomic_read(&rs->rs_refcount) > 0); + atomic_inc(&rs->rs_refcount); } static inline void ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) { - LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0); - if (cfs_atomic_dec_and_test(&rs->rs_refcount)) - lustre_free_reply_state(rs); + LASSERT(atomic_read(&rs->rs_refcount) > 0); + if (atomic_dec_and_test(&rs->rs_refcount)) + lustre_free_reply_state(rs); } /* Should only be called once per req */ @@ -2135,12 +3439,12 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) static inline int ptlrpc_no_resend(struct ptlrpc_request *req) { - if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) { - cfs_spin_lock(&req->rq_lock); - req->rq_no_resend = 1; - cfs_spin_unlock(&req->rq_lock); - } - return req->rq_no_resend; + if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) { + spin_lock(&req->rq_lock); + req->rq_no_resend = 1; + spin_unlock(&req->rq_lock); + } + return req->rq_no_resend; } static inline int @@ -2197,22 +3501,18 @@ typedef int (*timeout_cb_t)(struct timeout_item *, void *); int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); int ptlrpc_add_timeout_client(int time, enum timeout_event event, - timeout_cb_t cb, void *data, - cfs_list_t *obd_list); -int ptlrpc_del_timeout_client(cfs_list_t *obd_list, + timeout_cb_t cb, void *data, + struct list_head *obd_list); +int ptlrpc_del_timeout_client(struct list_head *obd_list, enum timeout_event event); struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp); int ptlrpc_obd_ping(struct obd_device *obd); -cfs_time_t ptlrpc_suspend_wakeup_time(void); -#ifdef __KERNEL__ void ping_evictor_start(void); void ping_evictor_stop(void); -#else -#define ping_evictor_start() do {} while (0) -#define ping_evictor_stop() do {} while (0) -#endif -int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req); +void ptlrpc_pinger_ir_up(void); +void ptlrpc_pinger_ir_down(void); /** @} */ +int ptlrpc_pinger_suppress_pings(void); /* ptlrpc daemon bind policy */ typedef enum { @@ -2248,6 +3548,7 @@ typedef enum { /* ptlrpc/ptlrpcd.c */ void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force); +void ptlrpcd_free(struct ptlrpcd_ctl *pc); void ptlrpcd_wake(struct ptlrpc_request *req); void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx); void ptlrpcd_add_rqset(struct ptlrpc_request_set *set); @@ -2260,7 +3561,7 @@ void ptlrpcd_decref(void); * @{ */ const char* ll_opcode2str(__u32 opcode); -#ifdef LPROCFS +#ifdef CONFIG_PROC_FS void ptlrpc_lprocfs_register_obd(struct obd_device *obd); void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd); void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes); @@ -2272,14 +3573,12 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {} /** @} */ /* ptlrpc/llog_server.c */ -int llog_origin_handle_create(struct ptlrpc_request *req); +int llog_origin_handle_open(struct ptlrpc_request *req); int llog_origin_handle_destroy(struct ptlrpc_request *req); int llog_origin_handle_prev_block(struct ptlrpc_request *req); int llog_origin_handle_next_block(struct ptlrpc_request *req); int llog_origin_handle_read_header(struct ptlrpc_request *req); int llog_origin_handle_close(struct ptlrpc_request *req); -int llog_origin_handle_cancel(struct ptlrpc_request *req); -int llog_catinfo(struct ptlrpc_request *req); /* ptlrpc/llog_client.c */ extern struct llog_operations llog_client_ops;