X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=38ccbde759ce61b3f4ced9d0f001f5a070a305f4;hp=e55f42ab20ce3326675faa276eace2a687f16b27;hb=02aa25019db411ef6a3e8ea39900db5737672e87;hpb=30be03b4dd593894687773d2a460d441d85f88a2 diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index e55f42a..38ccbde 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2013, Intel Corporation. + * Copyright (c) 2010, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -54,11 +50,13 @@ * * @{ */ - -#include +#include +#include #include -#include -#include +#include +#include +#include +#include #include #include #include @@ -66,18 +64,22 @@ #include #include #include -#include +#include /* MD flags we _always_ use */ #define PTLRPC_MD_OPTIONS 0 /** - * Max # of bulk operations in one request. + * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ... * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ -#define PTLRPC_BULK_OPS_BITS 2 + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS. */ +#define PTLRPC_BULK_OPS_BITS 6 +#if PTLRPC_BULK_OPS_BITS > 16 +#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS." +#endif #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and @@ -96,22 +98,23 @@ * currently supported maximum between peers at connect via ocd_brw_size. */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) -#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define PTLRPC_MAX_BRW_SIZE (1U << PTLRPC_MAX_BRW_BITS) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) -#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define ONE_MB_BRW_SIZE (1U << LNET_MTU_BITS) +#define MD_MAX_BRW_SIZE (1U << LNET_MTU_BITS) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) -#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) +#define DT_DEF_BRW_SIZE (4 * ONE_MB_BRW_SIZE) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) +#define OFD_MAX_BRW_SIZE (1U << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ #if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" #endif -#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" #endif #if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -364,10 +367,10 @@ /** * The update request includes all of updates from the create, which might - * include linkea (4K maxim), together with other updates, we set it to 9K: - * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K) + * include linkea (4K maxim), together with other updates, we set it to 1000K: + * lustre_msg + ptlrpc_body + OUT_UPDATE_BUFFER_SIZE_MAX */ -#define OUT_MAXREQSIZE (9 * 1024) +#define OUT_MAXREQSIZE (1000 * 1024) #define OUT_MAXREPSIZE MDS_MAXREPSIZE /** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */ @@ -452,10 +455,9 @@ */ /* depress threads factor for VM with small memory size */ #define OSS_THR_FACTOR min_t(int, 8, \ - NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT)) + NUM_CACHEPAGES >> (28 - PAGE_SHIFT)) #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define OSS_NTHRS_BASE 64 -#define OSS_NTHRS_MAX 512 /* threads for handling "create" request */ #define OSS_CR_THR_FACTOR 1 @@ -471,35 +473,53 @@ * - single object with 16 pages is 512 bytes * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover * - Must be a multiple of 1024 - * - actual size is about 18K */ -#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \ - sizeof(struct ptlrpc_body) + \ - sizeof(struct obdo) + \ - sizeof(struct obd_ioobj) + \ - sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES) +#define _OST_MAXREQSIZE_BASE ((unsigned long)(sizeof(struct lustre_msg) + \ + sizeof(struct ptlrpc_body) + \ + sizeof(struct obdo) + \ + sizeof(struct obd_ioobj) + \ + sizeof(struct niobuf_remote))) +#define _OST_MAXREQSIZE_SUM ((unsigned long)(_OST_MAXREQSIZE_BASE + \ + sizeof(struct niobuf_remote) * \ + (DT_MAX_BRW_PAGES - 1))) /** * FIEMAP request can be 4K+ for now */ -#define OST_MAXREQSIZE (16 * 1024) -#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \ - (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1)) +#define OST_MAXREQSIZE (16UL * 1024UL) +#define OST_IO_MAXREQSIZE max(OST_MAXREQSIZE, \ + ((_OST_MAXREQSIZE_SUM - 1) | \ + (1024UL - 1)) + 1) +/* Safe estimate of free space in standard RPC, provides upper limit for # of + * bytes of i/o to pack in RPC (skipping bulk transfer). */ +#define OST_SHORT_IO_SPACE (OST_IO_MAXREQSIZE - _OST_MAXREQSIZE_BASE) + +/* Actual size used for short i/o buffer. Calculation means this: + * At least one page (for large PAGE_SIZE), or 16 KiB, but not more + * than the available space aligned to a page boundary. */ +#define OBD_MAX_SHORT_IO_BYTES min(max(PAGE_SIZE, 16UL * 1024UL), \ + OST_SHORT_IO_SPACE & PAGE_MASK) #define OST_MAXREPSIZE (9 * 1024) #define OST_IO_MAXREPSIZE OST_MAXREPSIZE #define OST_NBUFS 64 /** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */ -#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024) +#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 32 * 1024) /** * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details. */ #define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024) + /* Macro to hide a typecast. */ #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) +struct ptlrpc_replay_async_args { + int praa_old_state; + int praa_old_status; +}; + /** * Structure to single define portal connection. */ @@ -509,7 +529,7 @@ struct ptlrpc_connection { /** Our own lnet nid for this connection */ lnet_nid_t c_self; /** Remote side nid for this connection */ - lnet_process_id_t c_peer; + struct lnet_process_id c_peer; /** UUID of the other side */ struct obd_uuid c_remote_uuid; /** reference counter for this connection */ @@ -546,7 +566,6 @@ union ptlrpc_async_args { }; struct ptlrpc_request_set; -typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int); typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); /** @@ -568,20 +587,9 @@ struct ptlrpc_request_set { atomic_t set_remaining; /** wait queue to wait on for request events */ wait_queue_head_t set_waitq; - wait_queue_head_t *set_wakeup_ptr; /** List of requests in the set */ struct list_head set_requests; /** - * List of completion callbacks to be called when the set is completed - * This is only used if \a set_interpret is NULL. - * Links struct ptlrpc_set_cbdata. - */ - struct list_head set_cblist; - /** Completion callback, if only one. */ - set_interpreter_func set_interpret; - /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; - /** * Lock for \a set_new_requests manipulations * locked so that any old caller can communicate requests to * the set holder who can then fold them into the lock-free set @@ -599,18 +607,7 @@ struct ptlrpc_request_set { set_producer_func set_producer; /** opaq argument passed to the producer callback */ void *set_producer_arg; -}; - -/** - * Description of a single ptrlrpc_set callback - */ -struct ptlrpc_set_cbdata { - /** List linkage item */ - struct list_head psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; + unsigned int set_allow_intr:1; }; struct ptlrpc_bulk_desc; @@ -621,8 +618,8 @@ struct ptlrpc_service; * ptlrpc callback & work item stuff */ struct ptlrpc_cb_id { - void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */ - void *cbid_arg; /* additional arg */ + void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */ + void *cbid_arg; /* additional arg */ }; /** Maximum number of locks to fit into reply state */ @@ -662,6 +659,12 @@ struct ptlrpc_reply_state { unsigned long rs_committed:1;/* the transaction was committed and the rs was dispatched by ptlrpc_commit_replies */ + unsigned long rs_convert_lock:1; /* need to convert saved + * locks to COS mode */ + atomic_t rs_refcount; /* number of users */ + /** Number of locks awaiting client ACK */ + int rs_nlocks; + /** Size of the state */ int rs_size; /** opcode */ @@ -673,43 +676,41 @@ struct ptlrpc_reply_state { struct obd_export *rs_export; struct ptlrpc_service_part *rs_svcpt; /** Lnet metadata handle for the reply */ - lnet_handle_md_t rs_md_h; - atomic_t rs_refcount; + struct lnet_handle_md rs_md_h; /** Context for the sevice thread */ - struct ptlrpc_svc_ctx *rs_svc_ctx; + struct ptlrpc_svc_ctx *rs_svc_ctx; /** Reply buffer (actually sent to the client), encoded if needed */ - struct lustre_msg *rs_repbuf; /* wrapper */ - /** Size of the reply buffer */ - int rs_repbuf_len; /* wrapper buf length */ - /** Size of the reply message */ - int rs_repdata_len; /* wrapper msg length */ - /** - * Actual reply message. Its content is encrupted (if needed) to - * produce reply buffer for actual sending. In simple case - * of no network encryption we jus set \a rs_repbuf to \a rs_msg - */ - struct lustre_msg *rs_msg; /* reply message */ - - /** Number of locks awaiting client ACK */ - int rs_nlocks; - /** Handles of locks awaiting client reply ACK */ - struct lustre_handle rs_locks[RS_MAX_LOCKS]; - /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + struct lustre_msg *rs_repbuf; /* wrapper */ + /** Size of the reply buffer */ + int rs_repbuf_len; /* wrapper buf length */ + /** Size of the reply message */ + int rs_repdata_len; /* wrapper msg length */ + /** + * Actual reply message. Its content is encrupted (if needed) to + * produce reply buffer for actual sending. In simple case + * of no network encryption we jus set \a rs_repbuf to \a rs_msg + */ + struct lustre_msg *rs_msg; /* reply message */ + + /** Handles of locks awaiting client reply ACK */ + struct lustre_handle rs_locks[RS_MAX_LOCKS]; + /** Lock modes of locks in \a rs_locks */ + enum ldlm_mode rs_modes[RS_MAX_LOCKS]; }; struct ptlrpc_thread; /** RPC stages */ enum rq_phase { - RQ_PHASE_NEW = 0xebc0de00, - RQ_PHASE_RPC = 0xebc0de01, - RQ_PHASE_BULK = 0xebc0de02, - RQ_PHASE_INTERPRET = 0xebc0de03, - RQ_PHASE_COMPLETE = 0xebc0de04, - RQ_PHASE_UNREGISTERING = 0xebc0de05, - RQ_PHASE_UNDEFINED = 0xebc0de06 + RQ_PHASE_NEW = 0xebc0de00, + RQ_PHASE_RPC = 0xebc0de01, + RQ_PHASE_BULK = 0xebc0de02, + RQ_PHASE_INTERPRET = 0xebc0de03, + RQ_PHASE_COMPLETE = 0xebc0de04, + RQ_PHASE_UNREG_RPC = 0xebc0de05, + RQ_PHASE_UNREG_BULK = 0xebc0de06, + RQ_PHASE_UNDEFINED = 0xebc0de07 }; /** Type of request interpreter call-back */ @@ -718,1034 +719,31 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, void *arg, int rc); /** Type of request resend call-back */ typedef void (*ptlrpc_resend_cb_t)(struct ptlrpc_request *req, - void *arg); - -/** - * Definition of request pool structure. - * The pool is used to store empty preallocated requests for the case - * when we would actually need to send something without performing - * any allocations (to avoid e.g. OOM). - */ -struct ptlrpc_request_pool { - /** Locks the list */ - spinlock_t prp_lock; - /** list of ptlrpc_request structs */ - struct list_head prp_req_list; - /** Maximum message size that would fit into a rquest from this pool */ - int prp_rq_size; - /** Function to allocate more requests for this pool */ - void (*prp_populate)(struct ptlrpc_request_pool *, int); -}; - -struct lu_context; -struct lu_env; - -struct ldlm_lock; - -/** - * \defgroup nrs Network Request Scheduler - * @{ - */ -struct ptlrpc_nrs_policy; -struct ptlrpc_nrs_resource; -struct ptlrpc_nrs_request; - -/** - * NRS control operations. - * - * These are common for all policies. - */ -enum ptlrpc_nrs_ctl { - /** - * Not a valid opcode. - */ - PTLRPC_NRS_CTL_INVALID, - /** - * Activate the policy. - */ - PTLRPC_NRS_CTL_START, - /** - * Reserved for multiple primary policies, which may be a possibility - * in the future. - */ - PTLRPC_NRS_CTL_STOP, - /** - * Policies can start using opcodes from this value and onwards for - * their own purposes; the assigned value itself is arbitrary. - */ - PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20, -}; - -/** - * ORR policy operations - */ -enum nrs_ctl_orr { - NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC, - NRS_CTL_ORR_WR_QUANTUM, - NRS_CTL_ORR_RD_OFF_TYPE, - NRS_CTL_ORR_WR_OFF_TYPE, - NRS_CTL_ORR_RD_SUPP_REQ, - NRS_CTL_ORR_WR_SUPP_REQ, -}; - -/** - * NRS policy operations. - * - * These determine the behaviour of a policy, and are called in response to - * NRS core events. - */ -struct ptlrpc_nrs_pol_ops { - /** - * Called during policy registration; this operation is optional. - * - * \param[in,out] policy The policy being initialized - */ - int (*op_policy_init) (struct ptlrpc_nrs_policy *policy); - /** - * Called during policy unregistration; this operation is optional. - * - * \param[in,out] policy The policy being unregistered/finalized - */ - void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy); - /** - * Called when activating a policy via lprocfs; policies allocate and - * initialize their resources here; this operation is optional. - * - * \param[in,out] policy The policy being started - * \param[in,out] arg A generic char buffer - * - * \see nrs_policy_start_locked() - */ - int (*op_policy_start) (struct ptlrpc_nrs_policy *policy, - char *arg); - /** - * Called when deactivating a policy via lprocfs; policies deallocate - * their resources here; this operation is optional - * - * \param[in,out] policy The policy being stopped - * - * \see nrs_policy_stop0() - */ - void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy); - /** - * Used for policy-specific operations; i.e. not generic ones like - * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous - * to an ioctl; this operation is optional. - * - * \param[in,out] policy The policy carrying out operation \a opc - * \param[in] opc The command operation being carried out - * \param[in,out] arg An generic buffer for communication between the - * user and the control operation - * - * \retval -ve error - * \retval 0 success - * - * \see ptlrpc_nrs_policy_control() - */ - int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy, - enum ptlrpc_nrs_ctl opc, void *arg); - - /** - * Called when obtaining references to the resources of the resource - * hierarchy for a request that has arrived for handling at the PTLRPC - * service. Policies should return -ve for requests they do not wish - * to handle. This operation is mandatory. - * - * \param[in,out] policy The policy we're getting resources for. - * \param[in,out] nrq The request we are getting resources for. - * \param[in] parent The parent resource of the resource being - * requested; set to NULL if none. - * \param[out] resp The resource is to be returned here; the - * fallback policy in an NRS head should - * \e always return a non-NULL pointer value. - * \param[in] moving_req When set, signifies that this is an attempt - * to obtain resources for a request being moved - * to the high-priority NRS head by - * ldlm_lock_reorder_req(). - * This implies two things: - * 1. We are under obd_export::exp_rpc_lock and - * so should not sleep. - * 2. We should not perform non-idempotent or can - * skip performing idempotent operations that - * were carried out when resources were first - * taken for the request when it was initialized - * in ptlrpc_nrs_req_initialize(). - * - * \retval 0, +ve The level of the returned resource in the resource - * hierarchy; currently only 0 (for a non-leaf resource) - * and 1 (for a leaf resource) are supported by the - * framework. - * \retval -ve error - * - * \see ptlrpc_nrs_req_initialize() - * \see ptlrpc_nrs_hpreq_add_nolock() - * \see ptlrpc_nrs_req_hp_move() - */ - int (*op_res_get) (struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq, - const struct ptlrpc_nrs_resource *parent, - struct ptlrpc_nrs_resource **resp, - bool moving_req); - /** - * Called when releasing references taken for resources in the resource - * hierarchy for the request; this operation is optional. - * - * \param[in,out] policy The policy the resource belongs to - * \param[in] res The resource to be freed - * - * \see ptlrpc_nrs_req_finalize() - * \see ptlrpc_nrs_hpreq_add_nolock() - * \see ptlrpc_nrs_req_hp_move() - */ - void (*op_res_put) (struct ptlrpc_nrs_policy *policy, - const struct ptlrpc_nrs_resource *res); - - /** - * Obtains a request for handling from the policy, and optionally - * removes the request from the policy; this operation is mandatory. - * - * \param[in,out] policy The policy to poll - * \param[in] peek When set, signifies that we just want to - * examine the request, and not handle it, so the - * request is not removed from the policy. - * \param[in] force When set, it will force a policy to return a - * request if it has one queued. - * - * \retval NULL No request available for handling - * \retval valid-pointer The request polled for handling - * - * \see ptlrpc_nrs_req_get_nolock() - */ - struct ptlrpc_nrs_request * - (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek, - bool force); - /** - * Called when attempting to add a request to a policy for later - * handling; this operation is mandatory. - * - * \param[in,out] policy The policy on which to enqueue \a nrq - * \param[in,out] nrq The request to enqueue - * - * \retval 0 success - * \retval != 0 error - * - * \see ptlrpc_nrs_req_add_nolock() - */ - int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Removes a request from the policy's set of pending requests. Normally - * called after a request has been polled successfully from the policy - * for handling; this operation is mandatory. - * - * \param[in,out] policy The policy the request \a nrq belongs to - * \param[in,out] nrq The request to dequeue - * - * \see ptlrpc_nrs_req_del_nolock() - */ - void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Called after the request being carried out. Could be used for - * job/resource control; this operation is optional. - * - * \param[in,out] policy The policy which is stopping to handle request - * \a nrq - * \param[in,out] nrq The request - * - * \pre assert_spin_locked(&svcpt->scp_req_lock) - * - * \see ptlrpc_nrs_req_stop_nolock() - */ - void (*op_req_stop) (struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_request *nrq); - /** - * Registers the policy's lprocfs interface with a PTLRPC service. - * - * \param[in] svc The service - * - * \retval 0 success - * \retval != 0 error - */ - int (*op_lprocfs_init) (struct ptlrpc_service *svc); - /** - * Unegisters the policy's lprocfs interface with a PTLRPC service. - * - * In cases of failed policy registration in - * \e ptlrpc_nrs_policy_register(), this function may be called for a - * service which has not registered the policy successfully, so - * implementations of this method should make sure their operations are - * safe in such cases. - * - * \param[in] svc The service - */ - void (*op_lprocfs_fini) (struct ptlrpc_service *svc); -}; - -/** - * Policy flags - */ -enum nrs_policy_flags { - /** - * Fallback policy, use this flag only on a single supported policy per - * service. The flag cannot be used on policies that use - * \e PTLRPC_NRS_FL_REG_EXTERN - */ - PTLRPC_NRS_FL_FALLBACK = (1 << 0), - /** - * Start policy immediately after registering. - */ - PTLRPC_NRS_FL_REG_START = (1 << 1), - /** - * This is a policy registering from a module different to the one NRS - * core ships in (currently ptlrpc). - */ - PTLRPC_NRS_FL_REG_EXTERN = (1 << 2), -}; - -/** - * NRS queue type. - * - * Denotes whether an NRS instance is for handling normal or high-priority - * RPCs, or whether an operation pertains to one or both of the NRS instances - * in a service. - */ -enum ptlrpc_nrs_queue_type { - PTLRPC_NRS_QUEUE_REG = (1 << 0), - PTLRPC_NRS_QUEUE_HP = (1 << 1), - PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP) -}; - -/** - * NRS head - * - * A PTLRPC service has at least one NRS head instance for handling normal - * priority RPCs, and may optionally have a second NRS head instance for - * handling high-priority RPCs. Each NRS head maintains a list of available - * policies, of which one and only one policy is acting as the fallback policy, - * and optionally a different policy may be acting as the primary policy. For - * all RPCs handled by this NRS head instance, NRS core will first attempt to - * enqueue the RPC using the primary policy (if any). The fallback policy is - * used in the following cases: - * - when there was no primary policy in the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request - * was initialized. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, denoted it did not wish, or for some other reason was - * not able to handle the request, by returning a non-valid NRS resource - * reference. - * - when the primary policy that was at the - * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the - * RPC was initialized, fails later during the request enqueueing stage. - * - * \see nrs_resource_get_safe() - * \see nrs_request_enqueue() - */ -struct ptlrpc_nrs { - spinlock_t nrs_lock; - /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */ - /** - * List of registered policies - */ - struct list_head nrs_policy_list; - /** - * List of policies with queued requests. Policies that have any - * outstanding requests are queued here, and this list is queried - * in a round-robin manner from NRS core when obtaining a request - * for handling. This ensures that requests from policies that at some - * point transition away from the - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. - */ - struct list_head nrs_policy_queued; - /** - * Service partition for this NRS head - */ - struct ptlrpc_service_part *nrs_svcpt; - /** - * Primary policy, which is the preferred policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_primary; - /** - * Fallback policy, which is the backup policy for handling RPCs - */ - struct ptlrpc_nrs_policy *nrs_policy_fallback; - /** - * This NRS head handles either HP or regular requests - */ - enum ptlrpc_nrs_queue_type nrs_queue_type; - /** - * # queued requests from all policies in this NRS head - */ - unsigned long nrs_req_queued; - /** - * # scheduled requests from all policies in this NRS head - */ - unsigned long nrs_req_started; - /** - * # policies on this NRS - */ - unsigned nrs_num_pols; - /** - * This NRS head is in progress of starting a policy - */ - unsigned nrs_policy_starting:1; - /** - * In progress of shutting down the whole NRS head; used during - * unregistration - */ - unsigned nrs_stopping:1; - /** - * NRS policy is throttling reqeust - */ - unsigned nrs_throttling:1; -}; - -#define NRS_POL_NAME_MAX 16 - -struct ptlrpc_nrs_pol_desc; - -/** - * Service compatibility predicate; this determines whether a policy is adequate - * for handling RPCs of a particular PTLRPC service. - * - * XXX:This should give the same result during policy registration and - * unregistration, and for all partitions of a service; so the result should not - * depend on temporal service or other properties, that may influence the - * result. - */ -typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc, - const struct ptlrpc_nrs_pol_desc *desc); - -struct ptlrpc_nrs_pol_conf { - /** - * Human-readable policy name - */ - char nc_name[NRS_POL_NAME_MAX]; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *nc_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t nc_compat; - /** - * Set for policies that support a single ptlrpc service, i.e. ones that - * have \a pd_compat set to nrs_policy_compat_one(). The variable value - * depicts the name of the single service that such policies are - * compatible with. - */ - const char *nc_compat_svc_name; - /** - * Owner module for this policy descriptor; policies registering from a - * different module to the one the NRS framework is held within - * (currently ptlrpc), should set this field to THIS_MODULE. - */ - struct module *nc_owner; - /** - * Policy registration flags; a bitmast of \e nrs_policy_flags - */ - unsigned nc_flags; -}; - -/** - * NRS policy registering descriptor - * - * Is used to hold a description of a policy that can be passed to NRS core in - * order to register the policy with NRS heads in different PTLRPC services. - */ -struct ptlrpc_nrs_pol_desc { - /** - * Human-readable policy name - */ - char pd_name[NRS_POL_NAME_MAX]; - /** - * Link into nrs_core::nrs_policies - */ - struct list_head pd_list; - /** - * NRS operations for this policy - */ - const struct ptlrpc_nrs_pol_ops *pd_ops; - /** - * Service compatibility predicate - */ - nrs_pol_desc_compat_t pd_compat; - /** - * Set for policies that are compatible with only one PTLRPC service. - * - * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name - */ - const char *pd_compat_svc_name; - /** - * Owner module for this policy descriptor. - * - * We need to hold a reference to the module whenever we might make use - * of any of the module's contents, i.e. - * - If one or more instances of the policy are at a state where they - * might be handling a request, i.e. - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or - * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to - * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference - * is taken on the module when - * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it - * becomes 0, so that we hold only one reference to the module maximum - * at any time. - * - * We do not need to hold a reference to the module, even though we - * might use code and data from the module, in the following cases: - * - During external policy registration, because this should happen in - * the module's init() function, in which case the module is safe from - * removal because a reference is being held on the module by the - * kernel, and iirc kmod (and I guess module-init-tools also) will - * serialize any racing processes properly anyway. - * - During external policy unregistration, because this should happen - * in a module's exit() function, and any attempts to start a policy - * instance would need to take a reference on the module, and this is - * not possible once we have reached the point where the exit() - * handler is called. - * - During service registration and unregistration, as service setup - * and cleanup, and policy registration, unregistration and policy - * instance starting, are serialized by \e nrs_core::nrs_mutex, so - * as long as users adhere to the convention of registering policies - * in init() and unregistering them in module exit() functions, there - * should not be a race between these operations. - * - During any policy-specific lprocfs operations, because a reference - * is held by the kernel on a proc entry that has been entered by a - * syscall, so as long as proc entries are removed during unregistration time, - * then unregistration and lprocfs operations will be properly - * serialized. - */ - struct module *pd_owner; - /** - * Bitmask of \e nrs_policy_flags - */ - unsigned pd_flags; - /** - * # of references on this descriptor - */ - atomic_t pd_refs; -}; - -/** - * NRS policy state - * - * Policies transition from one state to the other during their lifetime - */ -enum ptlrpc_nrs_pol_state { - /** - * Not a valid policy state. - */ - NRS_POL_STATE_INVALID, - /** - * Policies are at this state either at the start of their life, or - * transition here when the user selects a different policy to act - * as the primary one. - */ - NRS_POL_STATE_STOPPED, - /** - * Policy is progress of stopping - */ - NRS_POL_STATE_STOPPING, - /** - * Policy is in progress of starting - */ - NRS_POL_STATE_STARTING, - /** - * A policy is in this state in two cases: - * - it is the fallback policy, which is always in this state. - * - it has been activated by the user; i.e. it is the primary policy, - */ - NRS_POL_STATE_STARTED, -}; - -/** - * NRS policy information - * - * Used for obtaining information for the status of a policy via lprocfs - */ -struct ptlrpc_nrs_pol_info { - /** - * Policy name - */ - char pi_name[NRS_POL_NAME_MAX]; - /** - * Current policy state - */ - enum ptlrpc_nrs_pol_state pi_state; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pi_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pi_req_started; - /** - * Is this a fallback policy? - */ - unsigned pi_fallback:1; -}; - -/** - * NRS policy - * - * There is one instance of this for each policy in each NRS head of each - * PTLRPC service partition. - */ -struct ptlrpc_nrs_policy { - /** - * Linkage into the NRS head's list of policies, - * ptlrpc_nrs:nrs_policy_list - */ - struct list_head pol_list; - /** - * Linkage into the NRS head's list of policies with enqueued - * requests ptlrpc_nrs:nrs_policy_queued - */ - struct list_head pol_list_queued; - /** - * Current state of this policy - */ - enum ptlrpc_nrs_pol_state pol_state; - /** - * Bitmask of nrs_policy_flags - */ - unsigned pol_flags; - /** - * # RPCs enqueued for later dispatching by the policy - */ - long pol_req_queued; - /** - * # RPCs started for dispatch by the policy - */ - long pol_req_started; - /** - * Usage Reference count taken on the policy instance - */ - long pol_ref; - /** - * The NRS head this policy has been created at - */ - struct ptlrpc_nrs *pol_nrs; - /** - * Private policy data; varies by policy type - */ - void *pol_private; - /** - * Policy descriptor for this policy instance. - */ - struct ptlrpc_nrs_pol_desc *pol_desc; -}; - -/** - * NRS resource - * - * Resources are embedded into two types of NRS entities: - * - Inside NRS policies, in the policy's private data in - * ptlrpc_nrs_policy::pol_private - * - In objects that act as prime-level scheduling entities in different NRS - * policies; e.g. on a policy that performs round robin or similar order - * scheduling across client NIDs, there would be one NRS resource per unique - * client NID. On a policy which performs round robin scheduling across - * backend filesystem objects, there would be one resource associated with - * each of the backend filesystem objects partaking in the scheduling - * performed by the policy. - * - * NRS resources share a parent-child relationship, in which resources embedded - * in policy instances are the parent entities, with all scheduling entities - * a policy schedules across being the children, thus forming a simple resource - * hierarchy. This hierarchy may be extended with one or more levels in the - * future if the ability to have more than one primary policy is added. - * - * Upon request initialization, references to the then active NRS policies are - * taken and used to later handle the dispatching of the request with one of - * these policies. - * - * \see nrs_resource_get_safe() - * \see ptlrpc_nrs_req_add() - */ -struct ptlrpc_nrs_resource { - /** - * This NRS resource's parent; is NULL for resources embedded in NRS - * policy instances; i.e. those are top-level ones. - */ - struct ptlrpc_nrs_resource *res_parent; - /** - * The policy associated with this resource. - */ - struct ptlrpc_nrs_policy *res_policy; -}; - -enum { - NRS_RES_FALLBACK, - NRS_RES_PRIMARY, - NRS_RES_MAX -}; - -/* \name fifo - * - * FIFO policy - * - * This policy is a logical wrapper around previous, non-NRS functionality. - * It dispatches RPCs in the same order as they arrive from the network. This - * policy is currently used as the fallback policy, and the only enabled policy - * on all NRS heads of all PTLRPC service partitions. - * @{ - */ - -/** - * Private data structure for the FIFO policy - */ -struct nrs_fifo_head { - /** - * Resource object for policy instance. - */ - struct ptlrpc_nrs_resource fh_res; - /** - * List of queued requests. - */ - struct list_head fh_list; - /** - * For debugging purposes. - */ - __u64 fh_sequence; -}; - -struct nrs_fifo_req { - struct list_head fr_list; - __u64 fr_sequence; -}; - -/** @} fifo */ - -/** - * \name CRR-N - * - * CRR-N, Client Round Robin over NIDs - * @{ - */ - -/** - * private data structure for CRR-N NRS - */ -struct nrs_crrn_net { - struct ptlrpc_nrs_resource cn_res; - cfs_binheap_t *cn_binheap; - cfs_hash_t *cn_cli_hash; - /** - * Used when a new scheduling round commences, in order to synchronize - * all clients with the new round number. - */ - __u64 cn_round; - /** - * Determines the relevant ordering amongst request batches within a - * scheduling round. - */ - __u64 cn_sequence; - /** - * Round Robin quantum; the maximum number of RPCs that each request - * batch for each client can have in a scheduling round. - */ - __u16 cn_quantum; -}; - -/** - * Object representing a client in CRR-N, as identified by its NID - */ -struct nrs_crrn_client { - struct ptlrpc_nrs_resource cc_res; - struct hlist_node cc_hnode; - lnet_nid_t cc_nid; - /** - * The round number against which this client is currently scheduling - * requests. - */ - __u64 cc_round; - /** - * The sequence number used for requests scheduled by this client during - * the current round number. - */ - __u64 cc_sequence; - atomic_t cc_ref; - /** - * Round Robin quantum; the maximum number of RPCs the client is allowed - * to schedule in a single batch of each round. - */ - __u16 cc_quantum; - /** - * # of pending requests for this client, on all existing rounds - */ - __u16 cc_active; -}; - -/** - * CRR-N NRS request definition - */ -struct nrs_crrn_req { - /** - * Round number for this request; shared with all other requests in the - * same batch. - */ - __u64 cr_round; - /** - * Sequence number for this request; shared with all other requests in - * the same batch. - */ - __u64 cr_sequence; -}; - -/** - * CRR-N policy operations. - */ -enum nrs_ctl_crr { - /** - * Read the RR quantum size of a CRR-N policy. - */ - NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC, - /** - * Write the RR quantum size of a CRR-N policy. - */ - NRS_CTL_CRRN_WR_QUANTUM, -}; - -/** @} CRR-N */ - -/** - * \name ORR/TRR - * - * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies - * @{ - */ - -/** - * Lower and upper byte offsets of a brw RPC - */ -struct nrs_orr_req_range { - __u64 or_start; - __u64 or_end; -}; - -/** - * RPC types supported by the ORR/TRR policies - */ -enum nrs_orr_supp { - NOS_OST_READ = (1 << 0), - NOS_OST_WRITE = (1 << 1), - NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE), - /** - * Default value for policies. - */ - NOS_DFLT = NOS_OST_READ -}; - -/** - * As unique keys for grouping RPCs together, we use the object's OST FID for - * the ORR policy, and the OST index for the TRR policy. - * - * XXX: We waste some space for TRR policy instances by using a union, but it - * allows to consolidate some of the code between ORR and TRR, and these - * policies will probably eventually merge into one anyway. - */ -struct nrs_orr_key { - union { - /** object FID for ORR */ - struct lu_fid ok_fid; - /** OST index for TRR */ - __u32 ok_idx; - }; -}; - -/** - * The largest base string for unique hash/slab object names is - * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT - * id number, so this _should_ be more than enough for the maximum number of - * CPTs on any system. If it does happen that this statement is incorrect, - * nrs_orr_genobjname() will inevitably yield a non-unique name and cause - * kmem_cache_create() to complain (on Linux), so the erroneous situation - * will hopefully not go unnoticed. - */ -#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3) - -/** - * private data structure for ORR and TRR NRS - */ -struct nrs_orr_data { - struct ptlrpc_nrs_resource od_res; - cfs_binheap_t *od_binheap; - cfs_hash_t *od_obj_hash; - struct kmem_cache *od_cache; - /** - * Used when a new scheduling round commences, in order to synchronize - * all object or OST batches with the new round number. - */ - __u64 od_round; - /** - * Determines the relevant ordering amongst request batches within a - * scheduling round. - */ - __u64 od_sequence; - /** - * RPC types that are currently supported. - */ - enum nrs_orr_supp od_supp; - /** - * Round Robin quantum; the maxium number of RPCs that each request - * batch for each object or OST can have in a scheduling round. - */ - __u16 od_quantum; - /** - * Whether to use physical disk offsets or logical file offsets. - */ - bool od_physical; - /** - * XXX: We need to provide a persistently allocated string to hold - * unique object names for this policy, since in currently supported - * versions of Linux by Lustre, kmem_cache_create() just sets a pointer - * to the name string provided. kstrdup() is used in the version of - * kmeme_cache_create() in current Linux mainline, so we may be able to - * remove this in the future. - */ - char od_objname[NRS_ORR_OBJ_NAME_MAX]; -}; - -/** - * Represents a backend-fs object or OST in the ORR and TRR policies - * respectively - */ -struct nrs_orr_object { - struct ptlrpc_nrs_resource oo_res; - struct hlist_node oo_hnode; - /** - * The round number against which requests are being scheduled for this - * object or OST - */ - __u64 oo_round; - /** - * The sequence number used for requests scheduled for this object or - * OST during the current round number. - */ - __u64 oo_sequence; - /** - * The key of the object or OST for which this structure instance is - * scheduling RPCs - */ - struct nrs_orr_key oo_key; - long oo_ref; - /** - * Round Robin quantum; the maximum number of RPCs that are allowed to - * be scheduled for the object or OST in a single batch of each round. - */ - __u16 oo_quantum; - /** - * # of pending requests for this object or OST, on all existing rounds - */ - __u16 oo_active; -}; - -/** - * ORR/TRR NRS request definition - */ -struct nrs_orr_req { - /** - * The offset range this request covers - */ - struct nrs_orr_req_range or_range; - /** - * Round number for this request; shared with all other requests in the - * same batch. - */ - __u64 or_round; - /** - * Sequence number for this request; shared with all other requests in - * the same batch. - */ - __u64 or_sequence; - /** - * For debugging purposes. - */ - struct nrs_orr_key or_key; - /** - * An ORR policy instance has filled in request information while - * enqueueing the request on the service partition's regular NRS head. - */ - unsigned int or_orr_set:1; - /** - * A TRR policy instance has filled in request information while - * enqueueing the request on the service partition's regular NRS head. - */ - unsigned int or_trr_set:1; - /** - * Request offset ranges have been filled in with logical offset - * values. - */ - unsigned int or_logical_set:1; - /** - * Request offset ranges have been filled in with physical offset - * values. - */ - unsigned int or_physical_set:1; -}; - -/** @} ORR/TRR */ - -#include + void *arg); /** - * NRS request - * - * Instances of this object exist embedded within ptlrpc_request; the main - * purpose of this object is to hold references to the request's resources - * for the lifetime of the request, and to hold properties that policies use - * use for determining the request's scheduling priority. - * */ -struct ptlrpc_nrs_request { - /** - * The request's resource hierarchy. - */ - struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX]; - /** - * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the - * policy that was used to enqueue the request. - * - * \see nrs_request_enqueue() - */ - unsigned nr_res_idx; - unsigned nr_initialized:1; - unsigned nr_enqueued:1; - unsigned nr_started:1; - unsigned nr_finalized:1; - cfs_binheap_node_t nr_node; - - /** - * Policy-specific fields, used for determining a request's scheduling - * priority, and other supporting functionality. - */ - union { - /** - * Fields for the FIFO policy - */ - struct nrs_fifo_req fifo; - /** - * CRR-N request defintion - */ - struct nrs_crrn_req crr; - /** ORR and TRR share the same request definition */ - struct nrs_orr_req orr; - /** - * TBF request definition - */ - struct nrs_tbf_req tbf; - } nr_u; - /** - * Externally-registering policies may want to use this to allocate - * their own request properties. - */ - void *ext; + * Definition of request pool structure. + * The pool is used to store empty preallocated requests for the case + * when we would actually need to send something without performing + * any allocations (to avoid e.g. OOM). + */ +struct ptlrpc_request_pool { + /** Locks the list */ + spinlock_t prp_lock; + /** list of ptlrpc_request structs */ + struct list_head prp_req_list; + /** Maximum message size that would fit into a rquest from this pool */ + int prp_rq_size; + /** Function to allocate more requests for this pool */ + int (*prp_populate)(struct ptlrpc_request_pool *, int); }; -/** @} nrs */ +struct lu_context; +struct lu_env; + +struct ldlm_lock; + +#include /** * Basic request prioritization operations structure. @@ -1772,6 +770,173 @@ struct ptlrpc_hpreq_ops { void (*hpreq_fini)(struct ptlrpc_request *); }; +struct ptlrpc_cli_req { + /** For bulk requests on client only: bulk descriptor */ + struct ptlrpc_bulk_desc *cr_bulk; + /** optional time limit for send attempts */ + time64_t cr_delay_limit; + /** time request was first queued */ + time64_t cr_queued_time; + /** request sent in nanoseconds */ + ktime_t cr_sent_ns; + /** time for request really sent out */ + time64_t cr_sent_out; + /** when req reply unlink must finish. */ + time64_t cr_reply_deadline; + /** when req bulk unlink must finish. */ + time64_t cr_bulk_deadline; + /** when req unlink must finish. */ + time64_t cr_req_deadline; + /** Portal to which this request would be sent */ + short cr_req_ptl; + /** Portal where to wait for reply and where reply would be sent */ + short cr_rep_ptl; + /** request resending number */ + unsigned int cr_resend_nr; + /** What was import generation when this request was sent */ + int cr_imp_gen; + enum lustre_imp_state cr_send_state; + /** Per-request waitq introduced by bug 21938 for recovery waiting */ + wait_queue_head_t cr_set_waitq; + /** Link item for request set lists */ + struct list_head cr_set_chain; + /** link to waited ctx */ + struct list_head cr_ctx_chain; + + /** client's half ctx */ + struct ptlrpc_cli_ctx *cr_cli_ctx; + /** Link back to the request set */ + struct ptlrpc_request_set *cr_set; + /** outgoing request MD handle */ + struct lnet_handle_md cr_req_md_h; + /** request-out callback parameter */ + struct ptlrpc_cb_id cr_req_cbid; + /** incoming reply MD handle */ + struct lnet_handle_md cr_reply_md_h; + wait_queue_head_t cr_reply_waitq; + /** reply callback parameter */ + struct ptlrpc_cb_id cr_reply_cbid; + /** Async completion handler, called when reply is received */ + ptlrpc_interpterer_t cr_reply_interp; + /** Resend handler, called when request is resend to update RPC data */ + ptlrpc_resend_cb_t cr_resend_cb; + /** Async completion context */ + union ptlrpc_async_args cr_async_args; + /** Opaq data for replay and commit callbacks. */ + void *cr_cb_data; + /** Link to the imp->imp_unreplied_list */ + struct list_head cr_unreplied_list; + /** + * Commit callback, called when request is committed and about to be + * freed. + */ + void (*cr_commit_cb)(struct ptlrpc_request *); + /** Replay callback, called after request is replayed at recovery */ + void (*cr_replay_cb)(struct ptlrpc_request *); +}; + +/** client request member alias */ +/* NB: these alias should NOT be used by any new code, instead they should + * be removed step by step to avoid potential abuse */ +#define rq_bulk rq_cli.cr_bulk +#define rq_delay_limit rq_cli.cr_delay_limit +#define rq_queued_time rq_cli.cr_queued_time +#define rq_sent_ns rq_cli.cr_sent_ns +#define rq_real_sent rq_cli.cr_sent_out +#define rq_reply_deadline rq_cli.cr_reply_deadline +#define rq_bulk_deadline rq_cli.cr_bulk_deadline +#define rq_req_deadline rq_cli.cr_req_deadline +#define rq_nr_resend rq_cli.cr_resend_nr +#define rq_request_portal rq_cli.cr_req_ptl +#define rq_reply_portal rq_cli.cr_rep_ptl +#define rq_import_generation rq_cli.cr_imp_gen +#define rq_send_state rq_cli.cr_send_state +#define rq_set_chain rq_cli.cr_set_chain +#define rq_ctx_chain rq_cli.cr_ctx_chain +#define rq_set rq_cli.cr_set +#define rq_set_waitq rq_cli.cr_set_waitq +#define rq_cli_ctx rq_cli.cr_cli_ctx +#define rq_req_md_h rq_cli.cr_req_md_h +#define rq_req_cbid rq_cli.cr_req_cbid +#define rq_reply_md_h rq_cli.cr_reply_md_h +#define rq_reply_waitq rq_cli.cr_reply_waitq +#define rq_reply_cbid rq_cli.cr_reply_cbid +#define rq_interpret_reply rq_cli.cr_reply_interp +#define rq_resend_cb rq_cli.cr_resend_cb +#define rq_async_args rq_cli.cr_async_args +#define rq_cb_data rq_cli.cr_cb_data +#define rq_unreplied_list rq_cli.cr_unreplied_list +#define rq_commit_cb rq_cli.cr_commit_cb +#define rq_replay_cb rq_cli.cr_replay_cb + +struct ptlrpc_srv_req { + /** initial thread servicing this request */ + struct ptlrpc_thread *sr_svc_thread; + /** + * Server side list of incoming unserved requests sorted by arrival + * time. Traversed from time to time to notice about to expire + * requests and sent back "early replies" to clients to let them + * know server is alive and well, just very busy to service their + * requests in time + */ + struct list_head sr_timed_list; + /** server-side per-export list */ + struct list_head sr_exp_list; + /** server-side history, used for debuging purposes. */ + struct list_head sr_hist_list; + /** history sequence # */ + __u64 sr_hist_seq; + /** the index of service's srv_at_array into which request is linked */ + __u32 sr_at_index; + /** authed uid */ + uid_t sr_auth_uid; + /** authed uid mapped to */ + uid_t sr_auth_mapped_uid; + /** RPC is generated from what part of Lustre */ + enum lustre_sec_part sr_sp_from; + /** request session context */ + struct lu_context sr_ses; + /** \addtogroup nrs + * @{ + */ + /** stub for NRS request */ + struct ptlrpc_nrs_request sr_nrq; + /** @} nrs */ + /** request arrival time */ + struct timespec64 sr_arrival_time; + /** server's half ctx */ + struct ptlrpc_svc_ctx *sr_svc_ctx; + /** (server side), pointed directly into req buffer */ + struct ptlrpc_user_desc *sr_user_desc; + /** separated reply state, may be vmalloc'd */ + struct ptlrpc_reply_state *sr_reply_state; + /** server-side hp handlers */ + struct ptlrpc_hpreq_ops *sr_ops; + /** incoming request buffer */ + struct ptlrpc_request_buffer_desc *sr_rqbd; +}; + +/** server request member alias */ +/* NB: these alias should NOT be used by any new code, instead they should + * be removed step by step to avoid potential abuse */ +#define rq_svc_thread rq_srv.sr_svc_thread +#define rq_timed_list rq_srv.sr_timed_list +#define rq_exp_list rq_srv.sr_exp_list +#define rq_history_list rq_srv.sr_hist_list +#define rq_history_seq rq_srv.sr_hist_seq +#define rq_at_index rq_srv.sr_at_index +#define rq_auth_uid rq_srv.sr_auth_uid +#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid +#define rq_sp_from rq_srv.sr_sp_from +#define rq_session rq_srv.sr_ses +#define rq_nrq rq_srv.sr_nrq +#define rq_arrival_time rq_srv.sr_arrival_time +#define rq_reply_state rq_srv.sr_reply_state +#define rq_svc_ctx rq_srv.sr_svc_ctx +#define rq_user_desc rq_srv.sr_user_desc +#define rq_ops rq_srv.sr_ops +#define rq_rqbd rq_srv.sr_rqbd + /** * Represents remote procedure call. * @@ -1780,47 +945,20 @@ struct ptlrpc_hpreq_ops { */ struct ptlrpc_request { /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; + int rq_type; /** Result of request processing */ - int rq_status; + int rq_status; /** * Linkage item through which this request is included into * sending/delayed lists on client and into rqbd list on server */ - struct list_head rq_list; - /** - * Server side list of incoming unserved requests sorted by arrival - * time. Traversed from time to time to notice about to expire - * requests and sent back "early replies" to clients to let them - * know server is alive and well, just very busy to service their - * requests in time - */ - struct list_head rq_timed_list; - /** server-side history, used for debuging purposes. */ - struct list_head rq_history_list; - /** server-side per-export list */ - struct list_head rq_exp_list; - /** server-side hp handlers */ - struct ptlrpc_hpreq_ops *rq_ops; - - /** initial thread servicing this request */ - struct ptlrpc_thread *rq_svc_thread; - - /** history sequence # */ - __u64 rq_history_seq; - /** \addtogroup nrs - * @{ + struct list_head rq_list; + /** Lock to protect request flags and some other important bits, like + * rq_list */ - /** stub for NRS request */ - struct ptlrpc_nrs_request rq_nrq; - /** @} nrs */ - /** the index of service's srv_at_array into which request is linked */ - time_t rq_at_index; - /** Lock to protect request flags and some other important bits, like - * rq_list - */ - spinlock_t rq_lock; - /** client-side flags are serialized by rq_lock */ + spinlock_t rq_lock; + spinlock_t rq_early_free_lock; + /** client-side flags are serialized by rq_lock @{ */ unsigned int rq_intr:1, rq_replied:1, rq_err:1, rq_timedout:1, rq_resend:1, rq_restart:1, /** @@ -1836,37 +974,41 @@ struct ptlrpc_request { rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, rq_early:1, - rq_req_unlink:1, rq_reply_unlink:1, - rq_memalloc:1, /* req originated from "kswapd" */ - /* server-side flags */ - rq_packed_final:1, /* packed final reply */ - rq_hp:1, /* high priority RPC */ - rq_at_linked:1, /* link into service's srv_at_array */ - rq_reply_truncate:1, - rq_committed:1, - /* whether the "rq_set" is a valid one */ - rq_invalid_rqset:1, + rq_req_unlinked:1, /* unlinked request buffer from lnet */ + rq_reply_unlinked:1, /* unlinked reply buffer from lnet */ + rq_memalloc:1, /* req originated from "kswapd" */ + rq_committed:1, + rq_reply_truncated:1, + /** whether the "rq_set" is a valid one */ + rq_invalid_rqset:1, rq_generation_set:1, - /* do not resend request on -EINPROGRESS */ + /** do not resend request on -EINPROGRESS */ rq_no_retry_einprogress:1, /* allow the req to be sent if the import is in recovery * status */ rq_allow_replay:1, /* bulk request, sent to server, but uncommitted */ - rq_unstable:1; - - unsigned int rq_nr_resend; - - enum rq_phase rq_phase; /* one of RQ_PHASE_* */ - enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcounf for multiple replies */ + rq_unstable:1, + rq_early_free_repbuf:1, /* free reply buffer in advance */ + rq_allow_intr:1; + /** @} */ - /** Portal to which this request would be sent */ - short rq_request_portal; /* XXX FIXME bug 249 */ - /** Portal where to wait for reply and where reply would be sent */ - short rq_reply_portal; /* XXX FIXME bug 249 */ + /** server-side flags @{ */ + unsigned int + rq_hp:1, /**< high priority RPC */ + rq_at_linked:1, /**< link into service's srv_at_array */ + rq_packed_final:1; /**< packed final reply */ + /** @} */ + /** one of RQ_PHASE_* */ + enum rq_phase rq_phase; + /** one of RQ_PHASE_* to be used next */ + enum rq_phase rq_next_phase; + /** + * client-side refcount for SENT race, server-side refcounf + * for multiple replies + */ + atomic_t rq_refcount; /** * client-side: * !rq_truncate : # reply bytes actually received, @@ -1877,6 +1019,8 @@ struct ptlrpc_request { int rq_reqlen; /** Reply length */ int rq_replen; + /** Pool if request is from preallocated list */ + struct ptlrpc_request_pool *rq_pool; /** Request message - what client sent */ struct lustre_msg *rq_reqmsg; /** Reply message - server response */ @@ -1884,23 +1028,33 @@ struct ptlrpc_request { /** Transaction number */ __u64 rq_transno; /** xid */ - __u64 rq_xid; - /** - * List item to for replay list. Not yet commited requests get linked - * there. - * Also see \a rq_replay comment above. - */ - struct list_head rq_replay_list; - + __u64 rq_xid; + /** bulk match bits */ + __u64 rq_mbits; + /** + * List item to for replay list. Not yet committed requests get linked + * there. + * Also see \a rq_replay comment above. + * It's also link chain on obd_export::exp_req_replay_queue + */ + struct list_head rq_replay_list; + /** non-shared members for client & server request*/ + union { + struct ptlrpc_cli_req rq_cli; + struct ptlrpc_srv_req rq_srv; + }; /** * security and encryption data * @{ */ - struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ - struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ - struct list_head rq_ctx_chain; /**< link to waited ctx */ + /** description of flavors for client & server */ + struct sptlrpc_flavor rq_flvr; - struct sptlrpc_flavor rq_flvr; /**< for client & server */ - enum lustre_sec_part rq_sp_from; + /** + * SELinux policy info at the time of the request + * sepol string format is: + * ::: + */ + char rq_sepol[LUSTRE_NODEMAP_SEPOL_LENGTH + 1]; /* client/server security flags */ unsigned int @@ -1910,7 +1064,6 @@ struct ptlrpc_request { rq_bulk_write:1, /* request bulk write */ /* server authentication flags */ rq_auth_gss:1, /* authenticated by gss */ - rq_auth_remote:1, /* authed as remote user */ rq_auth_usr_root:1, /* authed as root */ rq_auth_usr_mdt:1, /* authed as mdt */ rq_auth_usr_ost:1, /* authed as ost */ @@ -1919,19 +1072,16 @@ struct ptlrpc_request { rq_pack_bulk:1, /* doesn't expect reply FIXME */ rq_no_reply:1, - rq_pill_init:1; /* pill initialized */ + rq_pill_init:1, /* pill initialized */ + rq_srv_req:1; /* server request */ - uid_t rq_auth_uid; /* authed uid */ - uid_t rq_auth_mapped_uid; /* authed uid mapped to */ - /* (server side), pointed directly into req buffer */ - struct ptlrpc_user_desc *rq_user_desc; - - /* various buffer pointers */ - struct lustre_msg *rq_reqbuf; /* req wrapper */ - char *rq_repbuf; /* rep buffer */ - struct lustre_msg *rq_repdata; /* rep wrapper msg */ - struct lustre_msg *rq_clrbuf; /* only in priv mode */ + /** various buffer pointers */ + struct lustre_msg *rq_reqbuf; /**< req wrapper, vmalloc*/ + char *rq_repbuf; /**< rep buffer, vmalloc */ + struct lustre_msg *rq_repdata; /**< rep wrapper msg */ + /** only in priv mode */ + struct lustre_msg *rq_clrbuf; int rq_reqbuf_len; /* req wrapper buf len */ int rq_reqdata_len; /* req wrapper msg len */ int rq_repbuf_len; /* rep buffer len */ @@ -1940,107 +1090,39 @@ struct ptlrpc_request { int rq_clrdata_len; /* only in priv mode */ /** early replies go to offset 0, regular replies go after that */ - unsigned int rq_reply_off; - - /** @} */ - - /** Fields that help to see if request and reply were swabbed or not */ - __u32 rq_req_swab_mask; - __u32 rq_rep_swab_mask; - - /** What was import generation when this request was sent */ - int rq_import_generation; - enum lustre_imp_state rq_send_state; - - /** how many early replies (for stats) */ - int rq_early_count; - - /** client+server request */ - lnet_handle_md_t rq_req_md_h; - struct ptlrpc_cb_id rq_req_cbid; - /** optional time limit for send attempts */ - cfs_duration_t rq_delay_limit; - /** time request was first queued */ - cfs_time_t rq_queued_time; - - /* server-side... */ - /** request arrival time */ - struct timeval rq_arrival_time; - /** separated reply state */ - struct ptlrpc_reply_state *rq_reply_state; - /** incoming request buffer */ - struct ptlrpc_request_buffer_desc *rq_rqbd; - - /** client-only incoming reply */ - lnet_handle_md_t rq_reply_md_h; - wait_queue_head_t rq_reply_waitq; - struct ptlrpc_cb_id rq_reply_cbid; - - /** our LNet NID */ - lnet_nid_t rq_self; - /** Peer description (the other side) */ - lnet_process_id_t rq_peer; - /** Server-side, export on which request was received */ - struct obd_export *rq_export; - /** Client side, import where request is being sent */ - struct obd_import *rq_import; - - /** Replay callback, called after request is replayed at recovery */ - void (*rq_replay_cb)(struct ptlrpc_request *); - /** - * Commit callback, called when request is committed and about to be - * freed. - */ - void (*rq_commit_cb)(struct ptlrpc_request *); - /** Opaq data for replay and commit callbacks. */ - void *rq_cb_data; - - /** For bulk requests on client only: bulk descriptor */ - struct ptlrpc_bulk_desc *rq_bulk; - - /** client outgoing req */ - /** - * when request/reply sent (secs), or time when request should be sent - */ - time_t rq_sent; - /** time for request really sent out */ - time_t rq_real_sent; - - /** when request must finish. volatile - * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ - volatile time_t rq_deadline; - /** when req reply unlink must finish. */ - time_t rq_reply_deadline; - /** when req bulk unlink must finish. */ - time_t rq_bulk_deadline; - /** - * service time estimate (secs) - * If the requestsis not served by this time, it is marked as timed out. - */ - int rq_timeout; - - /** Multi-rpc bits */ - /** Per-request waitq introduced by bug 21938 for recovery waiting */ - wait_queue_head_t rq_set_waitq; - /** Link item for request set lists */ - struct list_head rq_set_chain; - /** Link back to the request set */ - struct ptlrpc_request_set *rq_set; - /** Async completion handler, called when reply is received */ - ptlrpc_interpterer_t rq_interpret_reply; - /** Resend handler, called when request is resend to update RPC data */ - ptlrpc_resend_cb_t rq_resend_cb; - /** Async completion context */ - union ptlrpc_async_args rq_async_args; - - /** Pool if request is from preallocated list */ - struct ptlrpc_request_pool *rq_pool; - - struct lu_context rq_session; + unsigned int rq_reply_off; + /** @} */ - /** request format description */ - struct req_capsule rq_pill; + /** Fields that help to see if request and reply were swabbed or not */ + __u32 rq_req_swab_mask; + __u32 rq_rep_swab_mask; + + /** how many early replies (for stats) */ + int rq_early_count; + /** Server-side, export on which request was received */ + struct obd_export *rq_export; + /** import where request is being sent */ + struct obd_import *rq_import; + /** our LNet NID */ + lnet_nid_t rq_self; + /** Peer description (the other side) */ + struct lnet_process_id rq_peer; + /** Descriptor for the NID from which the peer sent the request. */ + struct lnet_process_id rq_source; + /** + * service time estimate (secs) + * If the request is not served by this time, it is marked as timed out. + * Do not change to time64_t since this is transmitted over the wire. + */ + time_t rq_timeout; + /** + * when request/reply sent (secs), or time when request should be sent + */ + time64_t rq_sent; + /** when request must finish. */ + time64_t rq_deadline; + /** request format description */ + struct req_capsule rq_pill; }; /** @@ -2050,13 +1132,14 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { - req->rq_status = req->rq_interpret_reply(env, req, - &req->rq_async_args, - rc); - return req->rq_status; - } - return rc; + if (req->rq_interpret_reply != NULL) { + req->rq_status = req->rq_interpret_reply(env, req, + &req->rq_async_args, + rc); + return req->rq_status; + } + + return rc; } /** \addtogroup nrs @@ -2089,37 +1172,37 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) /** @} nrs */ /** - * Returns 1 if request buffer at offset \a index was already swabbed + * Returns true if request buffer at offset \a index was already swabbed */ -static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index) +static inline bool lustre_req_swabbed(struct ptlrpc_request *req, size_t index) { - LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); - return req->rq_req_swab_mask & (1 << index); + LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); + return req->rq_req_swab_mask & (1 << index); } /** - * Returns 1 if request reply buffer at offset \a index was already swabbed + * Returns true if request reply buffer at offset \a index was already swabbed */ -static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) +static inline bool lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) { - LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); - return req->rq_rep_swab_mask & (1 << index); + LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); + return req->rq_rep_swab_mask & (1 << index); } /** - * Returns 1 if request needs to be swabbed into local cpu byteorder + * Returns true if request needs to be swabbed into local cpu byteorder */ -static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req) +static inline bool ptlrpc_req_need_swab(struct ptlrpc_request *req) { - return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); + return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); } /** - * Returns 1 if request reply needs to be swabbed into local cpu byteorder + * Returns true if request reply needs to be swabbed into local cpu byteorder */ -static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req) +static inline bool ptlrpc_rep_need_swab(struct ptlrpc_request *req) { - return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); + return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); } /** @@ -2150,22 +1233,24 @@ static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, static inline const char * ptlrpc_phase2str(enum rq_phase phase) { - switch (phase) { - case RQ_PHASE_NEW: - return "New"; - case RQ_PHASE_RPC: - return "Rpc"; - case RQ_PHASE_BULK: - return "Bulk"; - case RQ_PHASE_INTERPRET: - return "Interpret"; - case RQ_PHASE_COMPLETE: - return "Complete"; - case RQ_PHASE_UNREGISTERING: - return "Unregistering"; - default: - return "?Phase?"; - } + switch (phase) { + case RQ_PHASE_NEW: + return "New"; + case RQ_PHASE_RPC: + return "Rpc"; + case RQ_PHASE_BULK: + return "Bulk"; + case RQ_PHASE_INTERPRET: + return "Interpret"; + case RQ_PHASE_COMPLETE: + return "Complete"; + case RQ_PHASE_UNREG_RPC: + return "UnregRPC"; + case RQ_PHASE_UNREG_BULK: + return "UnregBULK"; + default: + return "?Phase?"; + } } /** @@ -2181,23 +1266,26 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req) /** * Debugging functions and helpers to print request structure into debug log * @{ - */ + */ /* Spare the preprocessor, spoil the bugs. */ #define FLAG(field, str) (field ? str : "") /** Convert bit flags into a string */ -#define DEBUG_REQ_FLAGS(req) \ - ptlrpc_rqphase2str(req), \ - FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \ - FLAG(req->rq_err, "E"), \ - FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \ - FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \ - FLAG(req->rq_no_resend, "N"), \ - FLAG(req->rq_waiting, "W"), \ - FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \ - FLAG(req->rq_committed, "M") - -#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s" +#define DEBUG_REQ_FLAGS(req) \ + ptlrpc_rqphase2str(req), \ + FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \ + FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \ + FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \ + FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \ + FLAG(req->rq_no_resend, "N"), \ + FLAG(req->rq_waiting, "W"), \ + FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \ + FLAG(req->rq_committed, "M"), \ + FLAG(req->rq_req_unlinked, "Q"), \ + FLAG(req->rq_reply_unlinked, "U"), \ + FLAG(req->rq_receiving_reply, "r") + +#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" void _debug_req(struct ptlrpc_request *req, struct libcfs_debug_msg_data *data, const char *fmt, ...) @@ -2224,7 +1312,7 @@ do { \ #define DEBUG_REQ(level, req, fmt, args...) \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ - static cfs_debug_limit_state_t cdls; \ + static struct cfs_debug_limit_state cdls; \ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \ debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\ } else { \ @@ -2250,12 +1338,98 @@ struct ptlrpc_bulk_page { struct page *bp_page; }; -#define BULK_GET_SOURCE 0 -#define BULK_PUT_SINK 1 -#define BULK_GET_SINK 2 -#define BULK_PUT_SOURCE 3 +enum ptlrpc_bulk_op_type { + PTLRPC_BULK_OP_ACTIVE = 0x00000001, + PTLRPC_BULK_OP_PASSIVE = 0x00000002, + PTLRPC_BULK_OP_PUT = 0x00000004, + PTLRPC_BULK_OP_GET = 0x00000008, + PTLRPC_BULK_BUF_KVEC = 0x00000010, + PTLRPC_BULK_BUF_KIOV = 0x00000020, + PTLRPC_BULK_GET_SOURCE = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET, + PTLRPC_BULK_PUT_SINK = PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT, + PTLRPC_BULK_GET_SINK = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET, + PTLRPC_BULK_PUT_SOURCE = PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT, +}; -/** +static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET; +} + +static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE; +} + +static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK; +} + +static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK; +} + +static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type) +{ + return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE; +} + +static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) + == PTLRPC_BULK_BUF_KVEC; +} + +static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV)) + == PTLRPC_BULK_BUF_KIOV; +} + +static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_OP_ACTIVE) | + (type & PTLRPC_BULK_OP_PASSIVE)) + == PTLRPC_BULK_OP_ACTIVE; +} + +static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type) +{ + return ((type & PTLRPC_BULK_OP_ACTIVE) | + (type & PTLRPC_BULK_OP_PASSIVE)) + == PTLRPC_BULK_OP_PASSIVE; +} + +struct ptlrpc_bulk_frag_ops { + /** + * Add a page \a page to the bulk descriptor \a desc + * Data to transfer in the page starts at offset \a pageoffset and + * amount of data to transfer from the page is \a len + */ + void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc, + struct page *page, int pageoffset, int len); + + /* + * Add a \a fragment to the bulk descriptor \a desc. + * Data to transfer in the fragment is pointed to by \a frag + * The size of the fragment is \a len + */ + int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len); + + /** + * Uninitialize and free bulk descriptor \a desc. + * Works on bulk descriptors both from server and client side. + */ + void (*release_frags)(struct ptlrpc_bulk_desc *desc); +}; + +extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops; +extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops; +extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops; + +/* * Definition of bulk descriptor. * Bulks are special "Two phase" RPCs where initial request message * is sent first and it is followed bt a transfer (o receiving) of a large @@ -2268,14 +1442,14 @@ struct ptlrpc_bulk_page { struct ptlrpc_bulk_desc { /** completed with failure */ unsigned long bd_failure:1; - /** {put,get}{source,sink} */ - unsigned long bd_type:2; /** client side */ unsigned long bd_registered:1; /** For serialization with callback */ spinlock_t bd_lock; /** Import generation when request for this bulk was sent */ int bd_import_generation; + /** {put,get}{source,sink}{kvec,kiov} */ + enum ptlrpc_bulk_op_type bd_type; /** LNet portal for this bulk */ __u32 bd_portal; /** Server side - export this bulk created for */ @@ -2284,36 +1458,55 @@ struct ptlrpc_bulk_desc { struct obd_import *bd_import; /** Back pointer to the request */ struct ptlrpc_request *bd_req; + struct ptlrpc_bulk_frag_ops *bd_frag_ops; wait_queue_head_t bd_waitq; /* server side only WQ */ int bd_iov_count; /* # entries in bd_iov */ int bd_max_iov; /* allocated size of bd_iov */ int bd_nob; /* # bytes covered */ int bd_nob_transferred; /* # bytes GOT/PUT */ - __u64 bd_last_xid; + __u64 bd_last_mbits; struct ptlrpc_cb_id bd_cbid; /* network callback info */ lnet_nid_t bd_sender; /* stash event::sender */ int bd_md_count; /* # valid entries in bd_mds */ int bd_md_max_brw; /* max entries in bd_mds */ /** array of associated MDs */ - lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; + struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT]; - /* - * encrypt iov, size is either 0 or bd_iov_count. - */ - lnet_kiov_t *bd_enc_iov; + union { + struct { + /* + * encrypt iov, size is either 0 or bd_iov_count. + */ + lnet_kiov_t *bd_enc_vec; + lnet_kiov_t *bd_vec; + } bd_kiov; + + struct { + struct kvec *bd_enc_kvec; + struct kvec *bd_kvec; + } bd_kvec; + } bd_u; - lnet_kiov_t bd_iov[0]; }; +#define GET_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_vec) +#define BD_GET_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_vec[i]) +#define GET_ENC_KIOV(desc) ((desc)->bd_u.bd_kiov.bd_enc_vec) +#define BD_GET_ENC_KIOV(desc, i) ((desc)->bd_u.bd_kiov.bd_enc_vec[i]) +#define GET_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_kvec) +#define BD_GET_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_kvec[i]) +#define GET_ENC_KVEC(desc) ((desc)->bd_u.bd_kvec.bd_enc_kvec) +#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i]) + enum { - SVC_STOPPED = 1 << 0, - SVC_STOPPING = 1 << 1, - SVC_STARTING = 1 << 2, - SVC_RUNNING = 1 << 3, - SVC_EVENT = 1 << 4, - SVC_SIGNAL = 1 << 5, + SVC_INIT = 0, + SVC_STOPPED = 1 << 0, + SVC_STOPPING = 1 << 1, + SVC_STARTING = 1 << 2, + SVC_RUNNING = 1 << 3, + SVC_EVENT = 1 << 4, }; #define PTLRPC_THR_NAME_LEN 32 @@ -2321,30 +1514,32 @@ enum { * Definition of server service thread structure */ struct ptlrpc_thread { - /** - * List of active threads in svc->srv_threads - */ + /** + * List of active threads in svc->srv_threads + */ struct list_head t_link; - /** - * thread-private data (preallocated memory) - */ - void *t_data; - __u32 t_flags; - /** - * service thread index, from ptlrpc_start_threads - */ - unsigned int t_id; - /** - * service thread pid - */ - pid_t t_pid; - /** - * put watchdog in the structure per thread b=14840 - */ - struct lc_watchdog *t_watchdog; - /** - * the svc this thread belonged to b=18582 - */ + /** + * thread-private data (preallocated vmalloc'd memory) + */ + void *t_data; + __u32 t_flags; + /** + * service thread index, from ptlrpc_start_threads + */ + unsigned int t_id; + /** + * service thread + */ + struct task_struct *t_task; + pid_t t_pid; + ktime_t t_touched; + /** + * put watchdog in the structure per thread b=14840 + */ + struct delayed_work t_watchdog; + /** + * the svc this thread belonged to b=18582 + */ struct ptlrpc_service_part *t_svcpt; wait_queue_head_t t_ctl_waitq; struct lu_env *t_env; @@ -2381,11 +1576,6 @@ static inline int thread_is_event(struct ptlrpc_thread *thread) return !!(thread->t_flags & SVC_EVENT); } -static inline int thread_is_signal(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_SIGNAL); -} - static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags) { thread->t_flags &= ~flags; @@ -2426,7 +1616,7 @@ struct ptlrpc_request_buffer_desc { /** Back pointer to service for which this buffer is registered */ struct ptlrpc_service_part *rqbd_svcpt; /** LNet descriptor */ - lnet_handle_md_t rqbd_md_h; + struct lnet_handle_md rqbd_md_h; int rqbd_refcount; /** The buffer itself */ char *rqbd_buffer; @@ -2501,8 +1691,8 @@ struct ptlrpc_service { int srv_nthrs_cpt_init; /** limit of threads number for each partition */ int srv_nthrs_cpt_limit; - /** Root of /proc dir tree for this service */ - struct proc_dir_entry *srv_procroot; + /** Root of debugfs dir tree for this service */ + struct dentry *srv_debugfs_entry; /** Pointer to statistic data for this service */ struct lprocfs_stats *srv_stats; /** # hp per lp reqs to handle */ @@ -2528,17 +1718,25 @@ struct ptlrpc_service { int srv_watchdog_factor; /** under unregister_service */ unsigned srv_is_stopping:1; + /** Whether or not to restrict service threads to CPUs in this CPT */ + unsigned srv_cpt_bind:1; + /** max # request buffers */ + int srv_nrqbds_max; /** max # request buffers in history per partition */ int srv_hist_nrqbds_cpt_max; - /** number of CPTs this service bound on */ + /** number of CPTs this service associated with */ int srv_ncpts; - /** CPTs array this service bound on */ + /** CPTs array this service associated with */ __u32 *srv_cpts; /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */ int srv_cpt_bits; /** CPT table this service is running over */ struct cfs_cpt_table *srv_cptable; + + /* sysfs object */ + struct kobject srv_kobj; + struct completion srv_kobj_unregister; /** * partition data for ptlrpc service */ @@ -2585,6 +1783,8 @@ struct ptlrpc_service_part { * threads starting & stopping are also protected by this lock. */ spinlock_t scp_lock __cfs_cacheline_aligned; + /** userland serialization */ + struct mutex scp_mutex; /** total # req buffer descs allocated */ int scp_nrqbds_total; /** # posted request buffers for receiving */ @@ -2599,8 +1799,8 @@ struct ptlrpc_service_part { struct list_head scp_rqbd_posted; /** incoming reqs */ struct list_head scp_req_incoming; - /** timeout before re-posting reqs, in tick */ - cfs_duration_t scp_rqbd_timeout; + /** timeout before re-posting reqs, in jiffies */ + long scp_rqbd_timeout; /** * all threads sleep on this. This wait-queue is signalled when new * incoming request arrives and when difficult reply has to be handled. @@ -2651,7 +1851,7 @@ struct ptlrpc_service_part { /** early reply timer */ struct timer_list scp_at_timer; /** debug */ - cfs_time_t scp_at_checktime; + ktime_t scp_at_checktime; /** check early replies */ unsigned scp_at_check; /** @} */ @@ -2697,34 +1897,38 @@ struct ptlrpcd_ctl { * Stop completion. */ struct completion pc_finishing; - /** - * Thread requests set. - */ - struct ptlrpc_request_set *pc_set; - /** + /** + * Thread requests set. + */ + struct ptlrpc_request_set *pc_set; + /** * Thread name used in kthread_run() - */ - char pc_name[16]; - /** - * Environment for request interpreters to run in. - */ - struct lu_env pc_env; + */ + char pc_name[16]; + /** + * CPT the thread is bound on. + */ + int pc_cpt; /** * Index of ptlrpcd thread in the array. */ - int pc_index; - /** - * Number of the ptlrpcd's partners. - */ - int pc_npartners; - /** - * Pointer to the array of partners' ptlrpcd_ctl structure. - */ - struct ptlrpcd_ctl **pc_partners; - /** - * Record the partner index to be processed next. - */ - int pc_cursor; + int pc_index; + /** + * Pointer to the array of partners' ptlrpcd_ctl structure. + */ + struct ptlrpcd_ctl **pc_partners; + /** + * Number of the ptlrpcd's partners. + */ + int pc_npartners; + /** + * Record the partner index to be processed next. + */ + int pc_cursor; + /** + * Error code if the thread failed to fully start. + */ + int pc_error; }; /* Bits for pc_flags */ @@ -2747,10 +1951,6 @@ enum ptlrpcd_ctl_flags { * This is a recovery ptlrpc thread. */ LIOD_RECOVERY = 1 << 3, - /** - * The ptlrpcd is bound to some CPU core. - */ - LIOD_BIND = 1 << 4, }; /** @@ -2795,26 +1995,26 @@ static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, /** @} nrs */ /* ptlrpc/events.c */ -extern lnet_handle_eq_t ptlrpc_eq_h; +extern struct lnet_handle_eq ptlrpc_eq_h; extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, - lnet_process_id_t *peer, lnet_nid_t *self); + struct lnet_process_id *peer, lnet_nid_t *self); /** * These callbacks are invoked by LNet when something happened to * underlying buffer * @{ */ -extern void request_out_callback(lnet_event_t *ev); -extern void reply_in_callback(lnet_event_t *ev); -extern void client_bulk_callback(lnet_event_t *ev); -extern void request_in_callback(lnet_event_t *ev); -extern void reply_out_callback(lnet_event_t *ev); +extern void request_out_callback(struct lnet_event *ev); +extern void reply_in_callback(struct lnet_event *ev); +extern void client_bulk_callback(struct lnet_event *ev); +extern void request_in_callback(struct lnet_event *ev); +extern void reply_out_callback(struct lnet_event *ev); #ifdef HAVE_SERVER_SUPPORT -extern void server_bulk_callback(lnet_event_t *ev); +extern void server_bulk_callback(struct lnet_event *ev); #endif /** @} */ /* ptlrpc/connection.c */ -struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer, +struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self, struct obd_uuid *uuid); int ptlrpc_connection_put(struct ptlrpc_connection *c); @@ -2823,6 +2023,30 @@ int ptlrpc_connection_init(void); void ptlrpc_connection_fini(void); extern lnet_pid_t ptl_get_pid(void); +/* + * Check if the peer connection is on the local node. We need to use GFP_NOFS + * for requests from a local client to avoid recursing into the filesystem + * as we might end up waiting on a page sent in the request we're serving. + * + * Use __GFP_HIGHMEM so that the pages can use all of the available memory + * on 32-bit machines. Use more aggressive GFP_HIGHUSER flags from non-local + * clients to be able to generate more memory pressure on the OSS and allow + * inactive pages to be reclaimed, since it doesn't have any other processes + * or allocations that generate memory reclaim pressure. + * + * See b=17576 (bdf50dc9) and b=19529 (3dcf18d3) for details. + */ +static inline bool ptlrpc_connection_is_local(struct ptlrpc_connection *conn) +{ + if (!conn) + return false; + + if (conn->c_peer.nid == conn->c_self) + return true; + + RETURN(LNetIsPeerLocal(conn->c_peer.nid)); +} + /* ptlrpc/niobuf.c */ /** * Actual interfacing with LNet to put/get/register/unregister stuff @@ -2830,8 +2054,11 @@ extern lnet_pid_t ptl_get_pid(void); */ #ifdef HAVE_SERVER_SUPPORT struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal); + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops); int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc); void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc); @@ -2854,17 +2081,17 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async); static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) { struct ptlrpc_bulk_desc *desc; - int rc; + int rc; - LASSERT(req != NULL); + LASSERT(req != NULL); desc = req->rq_bulk; - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && - req->rq_bulk_deadline > cfs_time_current_sec()) - return 1; + if (!desc) + return 0; + + if (req->rq_bulk_deadline > ktime_get_real_seconds()) + return 1; - if (!desc) - return 0; spin_lock(&desc->bd_lock); rc = desc->bd_md_count; @@ -2878,7 +2105,6 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags); int ptlrpc_reply(struct ptlrpc_request *req); int ptlrpc_send_error(struct ptlrpc_request *req, int difficult); int ptlrpc_error(struct ptlrpc_request *req); -void ptlrpc_resend_req(struct ptlrpc_request *request); int ptlrpc_at_get_net_latency(struct ptlrpc_request *req); int ptl_send_rpc(struct ptlrpc_request *request, int noreply); int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); @@ -2895,11 +2121,11 @@ void ptlrpc_request_committed(struct ptlrpc_request *req, int force); void ptlrpc_init_client(int req_portal, int rep_portal, char *name, struct ptlrpc_client *); void ptlrpc_cleanup_client(struct obd_import *imp); -struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid); +struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid, + lnet_nid_t nid4refnet); int ptlrpc_queue_wait(struct ptlrpc_request *req); int ptlrpc_replay_req(struct ptlrpc_request *req); -int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async); void ptlrpc_restart_req(struct ptlrpc_request *req); void ptlrpc_abort_inflight(struct obd_import *imp); void ptlrpc_cleanup_imp(struct obd_import *imp); @@ -2908,25 +2134,18 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set); struct ptlrpc_request_set *ptlrpc_prep_set(void); struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, void *arg); -int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, - set_interpreter_func fn, void *data); -int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set); -int ptlrpc_set_wait(struct ptlrpc_request_set *); -int ptlrpc_expired_set(void *data); -void ptlrpc_interrupted_set(void *data); +int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *); void ptlrpc_mark_interrupted(struct ptlrpc_request *req); void ptlrpc_set_destroy(struct ptlrpc_request_set *); void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); -void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, - struct ptlrpc_request *req); void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool); -void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); +int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); struct ptlrpc_request_pool * ptlrpc_init_rq_pool(int, int, - void (*populate_pool)(struct ptlrpc_request_pool *, int)); + int (*populate_pool)(struct ptlrpc_request_pool *, int)); void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, @@ -2943,30 +2162,21 @@ struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); -struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version, - int opcode, int count, __u32 *lengths, - char **bufs); -struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp, - __u32 version, int opcode, - int count, __u32 *lengths, char **bufs, - struct ptlrpc_request_pool *pool); void ptlrpc_req_finished(struct ptlrpc_request *request); void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request); struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, - unsigned npages, unsigned max_brw, - unsigned type, unsigned portal); -void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin); -static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk) -{ - __ptlrpc_free_bulk(bulk, 1); -} -static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) -{ - __ptlrpc_free_bulk(bulk, 0); -} + unsigned nfrags, unsigned max_brw, + unsigned int type, + unsigned portal, + const struct ptlrpc_bulk_frag_ops + *ops); + +int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, + void *frag, int len); void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, int len, int); + struct page *page, int pageoffset, int len, + int pin); static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len) @@ -2981,6 +2191,20 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); } +void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); + +static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) +{ + int i; + + for (i = 0; i < desc->bd_iov_count ; i++) + put_page(BD_GET_KIOV(desc, i).kiov_page); +} + +static inline void ptlrpc_release_bulk_noop(struct ptlrpc_bulk_desc *desc) +{ +} + void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp); __u64 ptlrpc_next_xid(void); @@ -3028,8 +2252,8 @@ struct ptlrpc_service_thr_conf { /* user specified threads number, it will be validated due to * other members of this structure. */ unsigned int tc_nthrs_user; - /* set NUMA node affinity for service threads */ - unsigned int tc_cpu_affinity; + /* bind service threads to only CPUs in their associated CPT */ + unsigned int tc_cpu_bind; /* Tags for lu_context associated with service thread */ __u32 tc_ctx_tags; }; @@ -3038,6 +2262,8 @@ struct ptlrpc_service_cpt_conf { struct cfs_cpt_table *cc_cptable; /* string pattern to describe CPTs for a service */ char *cc_pattern; + /* whether or not to have per-CPT service partitions */ + bool cc_affinity; }; struct ptlrpc_service_conf { @@ -3062,26 +2288,26 @@ struct ptlrpc_service_conf { * * @{ */ -void ptlrpc_save_lock(struct ptlrpc_request *req, - struct lustre_handle *lock, int mode, int no_ack); +void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock, + int mode, bool no_ack, bool convert_lock); void ptlrpc_commit_replies(struct obd_export *exp); void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); int ptlrpc_hpreq_handler(struct ptlrpc_request *req); struct ptlrpc_service *ptlrpc_register_service( struct ptlrpc_service_conf *conf, - struct proc_dir_entry *proc_entry); + struct kset *parent, + struct dentry *debugfs_entry); void ptlrpc_stop_all_threads(struct ptlrpc_service *svc); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); -void ptlrpc_daemonize(char *name); int ptlrpc_service_health_check(struct ptlrpc_service *); void ptlrpc_server_drop_request(struct ptlrpc_request *req); void ptlrpc_request_change_export(struct ptlrpc_request *req, struct obd_export *export); -void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay); +void ptlrpc_update_export_timer(struct obd_export *exp, + time64_t extra_delay); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -3096,23 +2322,24 @@ void ptlrpc_hr_fini(void); int ptlrpc_connect_import(struct obd_import *imp); int ptlrpc_init_import(struct obd_import *imp); int ptlrpc_disconnect_import(struct obd_import *imp, int noclose); +int ptlrpc_disconnect_and_idle_import(struct obd_import *imp); int ptlrpc_import_recovery_state_machine(struct obd_import *imp); void deuuidify(char *uuid, const char *prefix, char **uuid_start, - int *uuid_len); - + int *uuid_len); +void ptlrpc_import_enter_resend(struct obd_import *imp); /* ptlrpc/pack_generic.c */ int ptlrpc_reconnect_import(struct obd_import *imp); /** @} */ /** - * ptlrpc msg buffer and swab interface + * ptlrpc msg buffer and swab interface * * @{ */ -int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, - int index); +bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, + __u32 index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + __u32 index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -3132,32 +2359,32 @@ int lustre_shrink_msg(struct lustre_msg *msg, int segment, unsigned int newlen, int move_data); void lustre_free_reply_state(struct ptlrpc_reply_state *rs); int __lustre_unpack_msg(struct lustre_msg *m, int len); -int lustre_msg_hdr_size(__u32 magic, int count); -int lustre_msg_size(__u32 magic, int count, __u32 *lengths); -int lustre_msg_size_v2(int count, __u32 *lengths); -int lustre_packed_msg_size(struct lustre_msg *msg); -int lustre_msg_early_size(void); -void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size); -void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen); -int lustre_msg_buflen(struct lustre_msg *m, int n); -void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len); -int lustre_msg_bufcount(struct lustre_msg *m); -char *lustre_msg_string(struct lustre_msg *m, int n, int max_len); +__u32 lustre_msg_hdr_size(__u32 magic, __u32 count); +__u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths); +__u32 lustre_msg_size_v2(int count, __u32 *lengths); +__u32 lustre_packed_msg_size(struct lustre_msg *msg); +__u32 lustre_msg_early_size(void); +void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size); +void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 minlen); +__u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n); +void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len); +__u32 lustre_msg_bufcount(struct lustre_msg *m); +char *lustre_msg_string(struct lustre_msg *m, __u32 n, __u32 max_len); __u32 lustre_msghdr_get_flags(struct lustre_msg *msg); void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_flags(struct lustre_msg *msg); -void lustre_msg_add_flags(struct lustre_msg *msg, int flags); -void lustre_msg_set_flags(struct lustre_msg *msg, int flags); -void lustre_msg_clear_flags(struct lustre_msg *msg, int flags); +void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags); +void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags); +void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_op_flags(struct lustre_msg *msg); -void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags); -void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags); +void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags); struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg); __u32 lustre_msg_get_type(struct lustre_msg *msg); -__u32 lustre_msg_get_version(struct lustre_msg *msg); -void lustre_msg_add_version(struct lustre_msg *msg, int version); +enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg); +void lustre_msg_add_version(struct lustre_msg *msg, __u32 version); __u32 lustre_msg_get_opc(struct lustre_msg *msg); __u64 lustre_msg_get_last_xid(struct lustre_msg *msg); +__u16 lustre_msg_get_tag(struct lustre_msg *msg); __u64 lustre_msg_get_last_committed(struct lustre_msg *msg); __u64 *lustre_msg_get_versions(struct lustre_msg *msg); __u64 lustre_msg_get_transno(struct lustre_msg *msg); @@ -3167,21 +2394,18 @@ void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv); void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit); int lustre_msg_get_status(struct lustre_msg *msg); __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg); -int lustre_msg_is_v1(struct lustre_msg *msg); __u32 lustre_msg_get_magic(struct lustre_msg *msg); __u32 lustre_msg_get_timeout(struct lustre_msg *msg); __u32 lustre_msg_get_service_time(struct lustre_msg *msg); char *lustre_msg_get_jobid(struct lustre_msg *msg); __u32 lustre_msg_get_cksum(struct lustre_msg *msg); -#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 53, 0) -__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18); -#else +__u64 lustre_msg_get_mbits(struct lustre_msg *msg); __u32 lustre_msg_calc_cksum(struct lustre_msg *msg); -#endif void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle); void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid); +void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag); void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed); void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions); void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); @@ -3193,6 +2417,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout); void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid); void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); +void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits); static inline void lustre_shrink_reply(struct ptlrpc_request *req, int segment, @@ -3244,13 +2469,20 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase) if (req->rq_phase == new_phase) return; - if (new_phase == RQ_PHASE_UNREGISTERING) { + if (new_phase == RQ_PHASE_UNREG_RPC || + new_phase == RQ_PHASE_UNREG_BULK) { + /* No embedded unregistering phases */ + if (req->rq_phase == RQ_PHASE_UNREG_RPC || + req->rq_phase == RQ_PHASE_UNREG_BULK) + return; + req->rq_next_phase = req->rq_phase; if (req->rq_import) atomic_inc(&req->rq_import->imp_unregistering); } - if (req->rq_phase == RQ_PHASE_UNREGISTERING) { + if (req->rq_phase == RQ_PHASE_UNREG_RPC || + req->rq_phase == RQ_PHASE_UNREG_BULK) { if (req->rq_import) atomic_dec(&req->rq_import->imp_unregistering); } @@ -3262,14 +2494,11 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase) } /** - * Returns true if request \a req got early reply and hard deadline is not met + * Returns true if request \a req got early reply and hard deadline is not met */ static inline int ptlrpc_client_early(struct ptlrpc_request *req) { - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) - return 0; return req->rq_early; } @@ -3279,20 +2508,18 @@ ptlrpc_client_early(struct ptlrpc_request *req) static inline int ptlrpc_client_replied(struct ptlrpc_request *req) { - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) - return 0; - return req->rq_replied; + if (req->rq_reply_deadline > ktime_get_real_seconds()) + return 0; + return req->rq_replied; } /** Returns true if request \a req is in process of receiving server reply */ static inline int ptlrpc_client_recv(struct ptlrpc_request *req) { - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) - return 1; - return req->rq_receiving_reply; + if (req->rq_reply_deadline > ktime_get_real_seconds()) + return 1; + return req->rq_receiving_reply; } static inline int @@ -3301,13 +2528,17 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) int rc; spin_lock(&req->rq_lock); - if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) && - req->rq_reply_deadline > cfs_time_current_sec()) { + if (req->rq_reply_deadline > ktime_get_real_seconds()) { + spin_unlock(&req->rq_lock); + return 1; + } + if (req->rq_req_deadline > ktime_get_real_seconds()) { spin_unlock(&req->rq_lock); return 1; } - rc = req->rq_receiving_reply ; - rc = rc || req->rq_req_unlink || req->rq_reply_unlink; + + rc = !req->rq_req_unlinked || !req->rq_reply_unlinked || + req->rq_receiving_reply; spin_unlock(&req->rq_lock); return rc; } @@ -3315,6 +2546,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { + smp_mb(); if (req->rq_set == NULL) wake_up(&req->rq_reply_waitq); else @@ -3366,11 +2598,8 @@ static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) { if (req->rq_delay_limit != 0 && - cfs_time_before(cfs_time_add(req->rq_queued_time, - cfs_time_seconds(req->rq_delay_limit)), - cfs_time_current())) { + req->rq_queued_time + req->rq_delay_limit < ktime_get_seconds()) return 1; - } return 0; } @@ -3437,7 +2666,7 @@ struct timeout_item; typedef int (*timeout_cb_t)(struct timeout_item *, void *); int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); -int ptlrpc_add_timeout_client(int time, enum timeout_event event, +int ptlrpc_add_timeout_client(time64_t time, enum timeout_event event, timeout_cb_t cb, void *data, struct list_head *obd_list); int ptlrpc_del_timeout_client(struct list_head *obd_list, @@ -3451,43 +2680,11 @@ void ptlrpc_pinger_ir_down(void); /** @} */ int ptlrpc_pinger_suppress_pings(void); -/* ptlrpc daemon bind policy */ -typedef enum { - /* all ptlrpcd threads are free mode */ - PDB_POLICY_NONE = 1, - /* all ptlrpcd threads are bound mode */ - PDB_POLICY_FULL = 2, - /* ... */ - PDB_POLICY_PAIR = 3, - /* ... , - * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1]. - * If kernel supports NUMA, pthrpcd threads are binded and - * grouped by NUMA node */ - PDB_POLICY_NEIGHBOR = 4, -} pdb_policy_t; - -/* ptlrpc daemon load policy - * It is caller's duty to specify how to push the async RPC into some ptlrpcd - * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is - * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd, - * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner, - * depends on which is scheduled firstly, to accelerate the RPC processing. */ -typedef enum { - /* on the same CPU core as the caller */ - PDL_POLICY_SAME = 1, - /* within the same CPU partition, but not the same core as the caller */ - PDL_POLICY_LOCAL = 2, - /* round-robin on all CPU cores, but not the same core as the caller */ - PDL_POLICY_ROUND = 3, - /* the specified CPU core is preferred, but not enforced */ - PDL_POLICY_PREFERRED = 4, -} pdl_policy_t; - /* ptlrpc/ptlrpcd.c */ void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force); void ptlrpcd_free(struct ptlrpcd_ctl *pc); void ptlrpcd_wake(struct ptlrpc_request *req); -void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx); +void ptlrpcd_add_req(struct ptlrpc_request *req); void ptlrpcd_add_rqset(struct ptlrpc_request_set *set); int ptlrpcd_addref(void); void ptlrpcd_decref(void); @@ -3498,7 +2695,8 @@ void ptlrpcd_decref(void); * @{ */ const char* ll_opcode2str(__u32 opcode); -#ifdef LPROCFS +const int ll_str2opcode(const char *ops); +#ifdef CONFIG_PROC_FS void ptlrpc_lprocfs_register_obd(struct obd_device *obd); void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd); void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes); @@ -3511,15 +2709,12 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {} /* ptlrpc/llog_server.c */ int llog_origin_handle_open(struct ptlrpc_request *req); -int llog_origin_handle_destroy(struct ptlrpc_request *req); int llog_origin_handle_prev_block(struct ptlrpc_request *req); int llog_origin_handle_next_block(struct ptlrpc_request *req); int llog_origin_handle_read_header(struct ptlrpc_request *req); -int llog_origin_handle_close(struct ptlrpc_request *req); /* ptlrpc/llog_client.c */ extern struct llog_operations llog_client_ops; - /** @} net */ #endif