#define PTLRPC_MD_OPTIONS 0
/**
- * Define maxima for bulk I/O
- * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
- * these limits are system wide and not interface-local. */
-#define PTLRPC_MAX_BRW_BITS LNET_MTU_BITS
-#define PTLRPC_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+ * Max # of bulk operations in one request.
+ * In order for the client and server to properly negotiate the maximum
+ * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
+ * value. The client is free to limit the actual RPC size for any bulk
+ * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
+#define PTLRPC_BULK_OPS_BITS 2
+#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
+/**
+ * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
+ * should not be used on the server at all. Otherwise, it imposes a
+ * protocol limitation on the maximum RPC size that can be used by any
+ * RPC sent to that server in the future. Instead, the server should
+ * use the negotiated per-client ocd_brw_size to determine the bulk
+ * RPC count. */
+#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
+
+/**
+ * Define maxima for bulk I/O.
+ *
+ * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
+ * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
+ * currently supported maximum between peers at connect via ocd_brw_size.
+ */
+#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
+#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
-#define DT_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
# endif
-# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU)
+# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
# endif
-# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV)
+# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_PAGES too big"
# endif
#endif /* __KERNEL__ */
* threads for each partition to keep service healthy, so total threads
* number should be 24 * 8 = 192.
*
- * So with these constants, threads number wil be at the similar level
+ * So with these constants, threads number will be at the similar level
* of old versions, unless target machine has over a hundred cores
*/
#define LDLM_THR_FACTOR 8
#define LDLM_NTHRS_BASE 24
#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128)
-#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
-#define LDLM_NBUFS (64 * cfs_num_online_cpus())
-#define LDLM_BUFSIZE (8 * 1024)
-#define LDLM_MAXREQSIZE (5 * 1024)
-#define LDLM_MAXREPSIZE (1024)
+#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
+#define LDLM_CLIENT_NBUFS 1
+#define LDLM_SERVER_NBUFS 64
+#define LDLM_BUFSIZE (8 * 1024)
+#define LDLM_MAXREQSIZE (5 * 1024)
+#define LDLM_MAXREPSIZE (1024)
/*
* MDS threads constants:
#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
-#define MDS_NBUFS (64 * cfs_num_online_cpus())
+#define MDS_NBUFS 64
+
/**
* Assume file name length = FNAME_MAX = 256 (true for ext3).
- * path name length = PATH_MAX = 4096
- * LOV MD size max = EA_MAX = 48000 (2000 stripes)
+ * path name length = PATH_MAX = 4096
+ * LOV MD size max = EA_MAX = 24 * 2000
+ * (NB: 24 is size of lov_ost_data)
+ * LOV LOGCOOKIE size max = 32 * 2000
+ * (NB: 32 is size of llog_cookie)
* symlink: FNAME_MAX + PATH_MAX <- largest
* link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
* rename: FNAME_MAX + FNAME_MAX
* MDS_MAXREQSIZE ~= 4736 bytes =
* lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
* MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
- * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
- * = 9210 bytes = lustre_msg + mdt_body + 160 * (easize + cookiesize)
*
* Realistic size is about 512 bytes (20 character name + 128 char symlink),
* except in the open case where there are a large number of OSTs in a LOV.
*/
-#define MDS_MAXREPSIZE max(10 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
-#define MDS_MAXREQSIZE MDS_MAXREPSIZE
+#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
+#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
+
+/**
+ * MDS incoming request with LOV EA
+ * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
+ */
+#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
+ 362 + LOV_MAX_STRIPE_COUNT * 24)
+/**
+ * MDS outgoing reply with LOV EA
+ *
+ * NB: max reply size Lustre 2.4+ client can get from old MDS is:
+ * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
+ *
+ * but 2.4 or later MDS will never send reply with llog_cookie to any
+ * version client. This macro is defined for server side reply buffer size.
+ */
+#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
+
+/**
+ * This is the size of a maximum REINT_SETXATTR request:
+ *
+ * lustre_msg 56 (32 + 4 x 5 + 4)
+ * ptlrpc_body 184
+ * mdt_rec_setxattr 136
+ * lustre_capa 120
+ * name 256 (XATTR_NAME_MAX)
+ * value 65536 (XATTR_SIZE_MAX)
+ */
+#define MDS_EA_MAXREQSIZE 66288
+
+/**
+ * These are the maximum request and reply sizes (rounded up to 1 KB
+ * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL.
+ */
+#define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \
+ MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10)
+#define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE
+
+/**
+ * The update request includes all of updates from the create, which might
+ * include linkea (4K maxim), together with other updates, we set it to 9K:
+ * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
+ */
+#define MDS_OUT_MAXREQSIZE (9 * 1024)
+#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
+
+/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
+#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 8 * 1024)
+
+/**
+ * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD.
+ * However, we need to allocate a much larger buffer for it because LNet
+ * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid
+ * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a
+ * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request
+ * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory
+ * utilization is very low.
+ *
+ * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
+ * reused until all requests fit in it have been processed and released,
+ * which means one long blocked request can prevent the rqbd be reused.
+ * Now we set request buffer size to 160 KB, so even each rqbd is unlinked
+ * from LNet with unused 65 KB, buffer utilization will be about 59%.
+ * Please check LU-2432 for details.
+ */
+#define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 160 * 1024)
-/** MDS_BUFSIZE = max_reqsize + max sptlrpc payload size */
-#define MDS_BUFSIZE (MDS_MAXREQSIZE + 1024)
+/**
+ * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
+ * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
+ * extra bytes to each request buffer to improve buffer utilization rate.
+ */
+#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 24 * 1024)
/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
#define FLD_MAXREQSIZE (160)
/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
#define FLD_MAXREPSIZE (152)
+#define FLD_BUFSIZE (1 << 12)
/**
* SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
/** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
#define SEQ_MAXREPSIZE (152)
+#define SEQ_BUFSIZE (1 << 12)
/** MGS threads must be >= 3, see bug 22458 comment #28 */
#define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
#define MGS_NTHRS_MAX 32
-#define MGS_NBUFS (64 * cfs_num_online_cpus())
+#define MGS_NBUFS 64
#define MGS_BUFSIZE (8 * 1024)
#define MGS_MAXREQSIZE (7 * 1024)
#define MGS_MAXREPSIZE (9 * 1024)
#define OSS_CR_NTHRS_BASE 8
#define OSS_CR_NTHRS_MAX 64
-#define OST_NBUFS (64 * cfs_num_online_cpus())
-#define OST_BUFSIZE (8 * 1024)
-
/**
- * OST_MAXREQSIZE ~= 4768 bytes =
- * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
+ * OST_IO_MAXREQSIZE ~=
+ * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
+ * DT_MAX_BRW_PAGES * niobuf_remote
*
* - single object with 16 pages is 512 bytes
- * - OST_MAXREQSIZE must be at least 1 page of cookies plus some spillover
+ * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
+ * - Must be a multiple of 1024
+ * - actual size is about 18K
+ */
+#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
+ sizeof(struct ptlrpc_body) + \
+ sizeof(struct obdo) + \
+ sizeof(struct obd_ioobj) + \
+ sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
+/**
+ * FIEMAP request can be 4K+ for now
+ */
+#define OST_MAXREQSIZE (5 * 1024)
+#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
+ (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
+
+#define OST_MAXREPSIZE (9 * 1024)
+#define OST_IO_MAXREPSIZE OST_MAXREPSIZE
+
+#define OST_NBUFS 64
+/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
+#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
+/**
+ * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
+ * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
*/
-#define OST_MAXREQSIZE (5 * 1024)
-#define OST_MAXREPSIZE (9 * 1024)
+#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
*/
enum ptlrpc_nrs_ctl {
/**
+ * Not a valid opcode.
+ */
+ PTLRPC_NRS_CTL_INVALID,
+ /**
* Activate the policy.
*/
PTLRPC_NRS_CTL_START,
*/
PTLRPC_NRS_CTL_STOP,
/**
- * Recycle resources for inactive policies.
- */
- PTLRPC_NRS_CTL_SHRINK,
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
* Policies can start using opcodes from this value and onwards for
* their own purposes; the assigned value itself is arbitrary.
*/
/**
* Called during policy registration; this operation is optional.
*
- * \param[in] policy The policy being initialized
+ * \param[in,out] policy The policy being initialized
*/
int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
/**
* Called during policy unregistration; this operation is optional.
*
- * \param[in] policy The policy being unregistered/finalized
+ * \param[in,out] policy The policy being unregistered/finalized
*/
void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
/**
* Called when activating a policy via lprocfs; policies allocate and
* initialize their resources here; this operation is optional.
*
- * \param[in] policy The policy being started
+ * \param[in,out] policy The policy being started
*
* \see nrs_policy_start_locked()
*/
* Called when deactivating a policy via lprocfs; policies deallocate
* their resources here; this operation is optional
*
- * \param[in] policy The policy being stopped
+ * \param[in,out] policy The policy being stopped
*
- * \see nrs_policy_stop_final()
+ * \see nrs_policy_stop0()
*/
void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
/**
* \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
* to an ioctl; this operation is optional.
*
- * \param[in] policy The policy carrying out operation \a opc
+ * \param[in,out] policy The policy carrying out operation \a opc
* \param[in] opc The command operation being carried out
* \param[in,out] arg An generic buffer for communication between the
* user and the control operation
* service. Policies should return -ve for requests they do not wish
* to handle. This operation is mandatory.
*
- * \param[in] policy The policy we're getting resources for.
- * \param[in] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
+ * \param[in,out] policy The policy we're getting resources for.
+ * \param[in,out] nrq The request we are getting resources for.
+ * \param[in] parent The parent resource of the resource being
* requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
+ * \param[out] resp The resource is to be returned here; the
* fallback policy in an NRS head should
* \e always return a non-NULL pointer value.
* \param[in] moving_req When set, signifies that this is an attempt
*/
int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_request *nrq,
- struct ptlrpc_nrs_resource *parent,
+ const struct ptlrpc_nrs_resource *parent,
struct ptlrpc_nrs_resource **resp,
bool moving_req);
/**
* Called when releasing references taken for resources in the resource
* hierarchy for the request; this operation is optional.
*
- * \param[in] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
+ * \param[in,out] policy The policy the resource belongs to
+ * \param[in] res The resource to be freed
*
* \see ptlrpc_nrs_req_finalize()
* \see ptlrpc_nrs_hpreq_add_nolock()
* \see ptlrpc_nrs_req_hp_move()
*/
void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_resource *res);
+ const struct ptlrpc_nrs_resource *res);
/**
- * Obtain a request for handling from the policy via polling; this
- * operation is mandatory.
+ * Obtains a request for handling from the policy, and optionally
+ * removes the request from the policy; this operation is mandatory.
*
- * \param[in] policy The policy to poll
+ * \param[in,out] policy The policy to poll
+ * \param[in] peek When set, signifies that we just want to
+ * examine the request, and not handle it, so the
+ * request is not removed from the policy.
+ * \param[in] force When set, it will force a policy to return a
+ * request if it has one queued.
*
- * \retval NULL No erquest available for handling
+ * \retval NULL No request available for handling
* \retval valid-pointer The request polled for handling
*
- * \see ptlrpc_nrs_req_poll_nolock()
+ * \see ptlrpc_nrs_req_get_nolock()
*/
struct ptlrpc_nrs_request *
- (*op_req_poll) (struct ptlrpc_nrs_policy *policy);
+ (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
/**
* Called when attempting to add a request to a policy for later
* handling; this operation is mandatory.
*
- * \param[in] policy The policy on which to enqueue \a nrq
- * \param[in] nrq The request to enqueue
+ * \param[in,out] policy The policy on which to enqueue \a nrq
+ * \param[in,out] nrq The request to enqueue
*
* \retval 0 success
* \retval != 0 error
* called after a request has been polled successfully from the policy
* for handling; this operation is mandatory.
*
- * \param[in] policy The policy the request \a nrq belongs to
- * \param[in] nrq The request to dequeue
+ * \param[in,out] policy The policy the request \a nrq belongs to
+ * \param[in,out] nrq The request to dequeue
*
* \see ptlrpc_nrs_req_del_nolock()
*/
void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_request *nrq);
/**
- * Called before carrying out the request; should not block. Could be
- * used for job/resource control; this operation is optional.
- *
- * \param[in] policy The policy which is starting to handle request
- * \a nrq
- * \param[in] nrq The request
- *
- * \pre spin_is_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_start_nolock()
- */
- void (*op_req_start) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
* Called after the request being carried out. Could be used for
* job/resource control; this operation is optional.
*
- * \param[in] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in] nrq The request
+ * \param[in,out] policy The policy which is stopping to handle request
+ * \a nrq
+ * \param[in,out] nrq The request
*
* \pre spin_is_locked(&svcpt->scp_req_lock)
*
/**
* Unegisters the policy's lprocfs interface with a PTLRPC service.
*
+ * In cases of failed policy registration in
+ * \e ptlrpc_nrs_policy_register(), this function may be called for a
+ * service which has not registered the policy successfully, so
+ * implementations of this method should make sure their operations are
+ * safe in such cases.
+ *
* \param[in] svc The service
*/
void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
enum nrs_policy_flags {
/**
* Fallback policy, use this flag only on a single supported policy per
- * service. Do not use this flag for policies registering using
- * ptlrpc_nrs_policy_register() (i.e. ones that are not in
- * \e nrs_pols_builtin).
+ * service. The flag cannot be used on policies that use
+ * \e PTLRPC_NRS_FL_REG_EXTERN
*/
PTLRPC_NRS_FL_FALLBACK = (1 << 0),
/**
*/
PTLRPC_NRS_FL_REG_START = (1 << 1),
/**
- * This is a polciy registering externally with NRS core, via
- * ptlrpc_nrs_policy_register(), (i.e. one that is not in
- * \e nrs_pols_builtin. Used to avoid ptlrpc_nrs_policy_register()
- * racing with a policy start operation issued by the user via lprocfs.
+ * This is a policy registering from a module different to the one NRS
+ * core ships in (currently ptlrpc).
*/
PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
};
* in a service.
*/
enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG,
- PTLRPC_NRS_QUEUE_HP,
- PTLRPC_NRS_QUEUE_BOTH,
+ PTLRPC_NRS_QUEUE_REG = (1 << 0),
+ PTLRPC_NRS_QUEUE_HP = (1 << 1),
+ PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
};
/**
spinlock_t nrs_lock;
/** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
/**
- * Linkage into nrs_core_heads_list
- */
- cfs_list_t nrs_heads;
- /**
* List of registered policies
*/
cfs_list_t nrs_policy_list;
unsigned long nrs_req_started;
/**
* # policies on this NRS
- * TODO: Can we avoid having this?
*/
unsigned nrs_num_pols;
/**
#define NRS_POL_NAME_MAX 16
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+ /**
+ * Human-readable policy name
+ */
+ char nc_name[NRS_POL_NAME_MAX];
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *nc_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t nc_compat;
+ /**
+ * Set for policies that support a single ptlrpc service, i.e. ones that
+ * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+ * depicts the name of the single service that such policies are
+ * compatible with.
+ */
+ const char *nc_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor; policies registering from a
+ * different module to the one the NRS framework is held within
+ * (currently ptlrpc), should set this field to THIS_MODULE.
+ */
+ cfs_module_t *nc_owner;
+ /**
+ * Policy registration flags; a bitmast of \e nrs_policy_flags
+ */
+ unsigned nc_flags;
+};
+
/**
* NRS policy registering descriptor
*
/**
* Human-readable policy name
*/
- char pd_name[NRS_POL_NAME_MAX];
+ char pd_name[NRS_POL_NAME_MAX];
/**
- * NRS operations for this policy
+ * Link into nrs_core::nrs_policies
*/
- struct ptlrpc_nrs_pol_ops *pd_ops;
+ cfs_list_t pd_list;
/**
- * Service Compatibility function; this determines whether a policy is
- * adequate for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy
- * registration and unregistration, and for all partitions of a
- * service; so the result should not depend on temporal service
- * or other properties, that may influence the result.
+ * NRS operations for this policy
*/
- bool (*pd_compat) (struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
+ const struct ptlrpc_nrs_pol_ops *pd_ops;
/**
- * Optionally set for policies that support a single ptlrpc service,
- * i.e. ones that have \a pd_compat set to nrs_policy_compat_one()
+ * Service compatibility predicate
*/
- char *pd_compat_svc_name;
+ nrs_pol_desc_compat_t pd_compat;
/**
- * Bitmask of nrs_policy_flags
+ * Set for policies that are compatible with only one PTLRPC service.
+ *
+ * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
*/
- unsigned pd_flags;
+ const char *pd_compat_svc_name;
/**
- * Link into nrs_core::nrs_policies
- */
- cfs_list_t pd_list;
+ * Owner module for this policy descriptor.
+ *
+ * We need to hold a reference to the module whenever we might make use
+ * of any of the module's contents, i.e.
+ * - If one or more instances of the policy are at a state where they
+ * might be handling a request, i.e.
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+ * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+ * is taken on the module when
+ * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+ * becomes 0, so that we hold only one reference to the module maximum
+ * at any time.
+ *
+ * We do not need to hold a reference to the module, even though we
+ * might use code and data from the module, in the following cases:
+ * - During external policy registration, because this should happen in
+ * the module's init() function, in which case the module is safe from
+ * removal because a reference is being held on the module by the
+ * kernel, and iirc kmod (and I guess module-init-tools also) will
+ * serialize any racing processes properly anyway.
+ * - During external policy unregistration, because this should happen
+ * in a module's exit() function, and any attempts to start a policy
+ * instance would need to take a reference on the module, and this is
+ * not possible once we have reached the point where the exit()
+ * handler is called.
+ * - During service registration and unregistration, as service setup
+ * and cleanup, and policy registration, unregistration and policy
+ * instance starting, are serialized by \e nrs_core::nrs_mutex, so
+ * as long as users adhere to the convention of registering policies
+ * in init() and unregistering them in module exit() functions, there
+ * should not be a race between these operations.
+ * - During any policy-specific lprocfs operations, because a reference
+ * is held by the kernel on a proc entry that has been entered by a
+ * syscall, so as long as proc entries are removed during unregistration time,
+ * then unregistration and lprocfs operations will be properly
+ * serialized.
+ */
+ cfs_module_t *pd_owner;
+ /**
+ * Bitmask of \e nrs_policy_flags
+ */
+ unsigned pd_flags;
+ /**
+ * # of references on this descriptor
+ */
+ cfs_atomic_t pd_refs;
};
/**
*/
NRS_POL_STATE_INVALID,
/**
- * For now, this state is used exclusively for policies that register
- * externally to NRS core, i.e. ones that do so via
- * ptlrpc_nrs_policy_register() and are not part of nrs_pols_builtin;
- * it is used to prevent a race condition between the policy registering
- * with more than one service partition while service is operational,
- * and the user starting the policy via lprocfs.
- *
- * \see nrs_pol_make_avail()
- */
- NRS_POL_STATE_UNAVAIL,
- /**
* Policies are at this state either at the start of their life, or
* transition here when the user selects a different policy to act
* as the primary one.
*/
struct ptlrpc_nrs *pol_nrs;
/**
- * NRS operations for this policy; points to ptlrpc_nrs_pol_desc::pd_ops
- */
- struct ptlrpc_nrs_pol_ops *pol_ops;
- /**
* Private policy data; varies by policy type
*/
void *pol_private;
/**
- * Human-readable policy name; point to ptlrpc_nrs_pol_desc::pd_name
+ * Policy descriptor for this policy instance.
*/
- char *pol_name;
+ struct ptlrpc_nrs_pol_desc *pol_desc;
};
/**
};
struct nrs_fifo_req {
- /** request header, must be the first member of structure */
cfs_list_t fr_list;
__u64 fr_sequence;
};
unsigned nr_res_idx;
unsigned nr_initialized:1;
unsigned nr_enqueued:1;
- unsigned nr_dequeued:1;
unsigned nr_started:1;
unsigned nr_finalized:1;
cfs_binheap_node_t nr_node;
/** \addtogroup nrs
* @{
*/
-int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_desc *desc);
-int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_desc *desc);
+int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
+int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_pol_info *info);
*
* For a reliable result, this should be checked under svcpt->scp_req lock.
*/
-static inline bool
-ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
+static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
{
struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
#define BULK_PUT_SOURCE 3
/**
- * Definition of buk descriptor.
+ * Definition of bulk descriptor.
* Bulks are special "Two phase" RPCs where initial request message
* is sent first and it is followed bt a transfer (o receiving) of a large
* amount of data to be settled into pages referenced from the bulk descriptors.
* Another user is readpage for MDT.
*/
struct ptlrpc_bulk_desc {
- /** completed successfully */
- unsigned long bd_success:1;
- /** accessible to the network (network io potentially in progress) */
- unsigned long bd_network_rw:1;
- /** {put,get}{source,sink} */
- unsigned long bd_type:2;
- /** client side */
- unsigned long bd_registered:1;
- /** For serialization with callback */
+ /** completed with failure */
+ unsigned long bd_failure:1;
+ /** {put,get}{source,sink} */
+ unsigned long bd_type:2;
+ /** client side */
+ unsigned long bd_registered:1;
+ /** For serialization with callback */
spinlock_t bd_lock;
- /** Import generation when request for this bulk was sent */
- int bd_import_generation;
- /** Server side - export this bulk created for */
- struct obd_export *bd_export;
- /** Client side - import this bulk was sent on */
- struct obd_import *bd_import;
- /** LNet portal for this bulk */
- __u32 bd_portal;
- /** Back pointer to the request */
- struct ptlrpc_request *bd_req;
- cfs_waitq_t bd_waitq; /* server side only WQ */
- int bd_iov_count; /* # entries in bd_iov */
- int bd_max_iov; /* allocated size of bd_iov */
- int bd_nob; /* # bytes covered */
- int bd_nob_transferred; /* # bytes GOT/PUT */
-
- __u64 bd_last_xid;
-
- struct ptlrpc_cb_id bd_cbid; /* network callback info */
- lnet_handle_md_t bd_md_h; /* associated MD */
- lnet_nid_t bd_sender; /* stash event::sender */
+ /** Import generation when request for this bulk was sent */
+ int bd_import_generation;
+ /** LNet portal for this bulk */
+ __u32 bd_portal;
+ /** Server side - export this bulk created for */
+ struct obd_export *bd_export;
+ /** Client side - import this bulk was sent on */
+ struct obd_import *bd_import;
+ /** Back pointer to the request */
+ struct ptlrpc_request *bd_req;
+ cfs_waitq_t bd_waitq; /* server side only WQ */
+ int bd_iov_count; /* # entries in bd_iov */
+ int bd_max_iov; /* allocated size of bd_iov */
+ int bd_nob; /* # bytes covered */
+ int bd_nob_transferred; /* # bytes GOT/PUT */
+
+ __u64 bd_last_xid;
+
+ struct ptlrpc_cb_id bd_cbid; /* network callback info */
+ lnet_nid_t bd_sender; /* stash event::sender */
+ int bd_md_count; /* # valid entries in bd_mds */
+ int bd_md_max_brw; /* max entries in bd_mds */
+ /** array of associated MDs */
+ lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
#if defined(__KERNEL__)
- /*
- * encrypt iov, size is either 0 or bd_iov_count.
- */
- lnet_kiov_t *bd_enc_iov;
+ /*
+ * encrypt iov, size is either 0 or bd_iov_count.
+ */
+ lnet_kiov_t *bd_enc_iov;
- lnet_kiov_t bd_iov[0];
+ lnet_kiov_t bd_iov[0];
#else
- lnet_md_iovec_t bd_iov[0];
+ lnet_md_iovec_t bd_iov[0];
#endif
};
* \addtogroup nrs
* @{
*
- * Service compatibility function; policy is compatible with all services.
+ * Service compatibility function; the policy is compatible with all services.
*
* \param[in] svc The service the policy is attempting to register with.
* \param[in] desc The policy descriptor
*
- * \retval true The policy is compatible with the NRS head
+ * \retval true The policy is compatible with the service
*
* \see ptlrpc_nrs_pol_desc::pd_compat()
*/
-static inline bool
-nrs_policy_compat_all(struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
+static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
{
return true;
}
/**
- * Service compatibility function; policy is compatible with only a specific
+ * Service compatibility function; the policy is compatible with only a specific
* service which is identified by its human-readable name at
* ptlrpc_service::srv_name.
*
* \param[in] svc The service the policy is attempting to register with.
* \param[in] desc The policy descriptor
*
- * \retval false The policy is not compatible with the NRS head
- * \retval true The policy is compatible with the NRS head
+ * \retval false The policy is not compatible with the service
+ * \retval true The policy is compatible with the service
*
* \see ptlrpc_nrs_pol_desc::pd_compat()
*/
-static inline bool
-nrs_policy_compat_one(struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
+static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
{
LASSERT(desc->pd_compat_svc_name != NULL);
return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
*/
#ifdef HAVE_SERVER_SUPPORT
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
- int npages, int type, int portal);
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal);
int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
LASSERT(desc != NULL);
spin_lock(&desc->bd_lock);
- rc = desc->bd_network_rw;
+ rc = desc->bd_md_count;
spin_unlock(&desc->bd_lock);
return rc;
}
return 0;
spin_lock(&desc->bd_lock);
- rc = desc->bd_network_rw;
+ rc = desc->bd_md_count;
spin_unlock(&desc->bd_lock);
return rc;
}
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
- int npages, int type, int portal);
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal);
void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
{
/** @} */
struct ptlrpc_service_buf_conf {
- /* nbufs is how many buffers to post */
+ /* nbufs is buffers # to allocate when growing the pool */
unsigned int bc_nbufs;
/* buffer size to post */
unsigned int bc_buf_size;
/* ptlrpc/ptlrpcd.c */
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
+void ptlrpcd_free(struct ptlrpcd_ctl *pc);
void ptlrpcd_wake(struct ptlrpc_request *req);
void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);