* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2012, Intel Corporation.
+ * Copyright (c) 2010, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* @{
*/
-#if defined(__linux__)
-#include <linux/lustre_net.h>
-#elif defined(__APPLE__)
-#include <darwin/lustre_net.h>
-#elif defined(__WINNT__)
-#include <winnt/lustre_net.h>
-#else
-#error Unsupported operating system.
-#endif
-
#include <libcfs/libcfs.h>
-// #include <obd.h>
-#include <lnet/lnet.h>
+#include <lnet/nidstr.h>
+#include <lnet/api.h>
#include <lustre/lustre_idl.h>
#include <lustre_ha.h>
#include <lustre_sec.h>
#include <lprocfs_status.h>
#include <lu_object.h>
#include <lustre_req_layout.h>
-
#include <obd_support.h>
#include <lustre_ver.h>
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
-#ifdef __KERNEL__
-# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
-# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
-# endif
-# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_SIZE too big"
-# endif
-# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
-# error "PTLRPC_MAX_BRW_PAGES too big"
-# endif
-#endif /* __KERNEL__ */
+#if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
+# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
+#endif
+#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+#endif
+#if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
+# error "PTLRPC_MAX_BRW_SIZE too big"
+#endif
+#if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
+# error "PTLRPC_MAX_BRW_PAGES too big"
+#endif
#define PTLRPC_NTHRS_INIT 2
#define LDLM_THR_FACTOR 8
#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
#define LDLM_NTHRS_BASE 24
-#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128)
+#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
#define LDLM_CLIENT_NBUFS 1
* include linkea (4K maxim), together with other updates, we set it to 9K:
* lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
*/
-#define MDS_OUT_MAXREQSIZE (9 * 1024)
-#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
+#define OUT_MAXREQSIZE (9 * 1024)
+#define OUT_MAXREPSIZE MDS_MAXREPSIZE
/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
160 * 1024)
/**
- * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
+ * OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
* about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
* extra bytes to each request buffer to improve buffer utilization rate.
*/
-#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+#define OUT_BUFSIZE max(OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
24 * 1024)
/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
*/
/* depress threads factor for VM with small memory size */
#define OSS_THR_FACTOR min_t(int, 8, \
- CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT))
+ NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
#define OSS_NTHRS_BASE 64
#define OSS_NTHRS_MAX 512
/**
* FIEMAP request can be 4K+ for now
*/
-#define OST_MAXREQSIZE (5 * 1024)
+#define OST_MAXREQSIZE (16 * 1024)
#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
(((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
+struct ptlrpc_replay_async_args {
+ int praa_old_state;
+ int praa_old_status;
+};
+
/**
* Structure to single define portal connection.
*/
struct ptlrpc_connection {
- /** linkage for connections hash table */
- cfs_hlist_node_t c_hash;
- /** Our own lnet nid for this connection */
- lnet_nid_t c_self;
- /** Remote side nid for this connection */
- lnet_process_id_t c_peer;
- /** UUID of the other side */
- struct obd_uuid c_remote_uuid;
- /** reference counter for this connection */
- cfs_atomic_t c_refcount;
+ /** linkage for connections hash table */
+ struct hlist_node c_hash;
+ /** Our own lnet nid for this connection */
+ lnet_nid_t c_self;
+ /** Remote side nid for this connection */
+ lnet_process_id_t c_peer;
+ /** UUID of the other side */
+ struct obd_uuid c_remote_uuid;
+ /** reference counter for this connection */
+ atomic_t c_refcount;
};
/** Client definition for PortalRPC */
* returned.
*/
struct ptlrpc_request_set {
- cfs_atomic_t set_refcount;
+ atomic_t set_refcount;
/** number of in queue requests */
- cfs_atomic_t set_new_count;
+ atomic_t set_new_count;
/** number of uncompleted requests */
- cfs_atomic_t set_remaining;
+ atomic_t set_remaining;
/** wait queue to wait on for request events */
- cfs_waitq_t set_waitq;
- cfs_waitq_t *set_wakeup_ptr;
+ wait_queue_head_t set_waitq;
+ wait_queue_head_t *set_wakeup_ptr;
/** List of requests in the set */
- cfs_list_t set_requests;
+ struct list_head set_requests;
/**
* List of completion callbacks to be called when the set is completed
* This is only used if \a set_interpret is NULL.
* Links struct ptlrpc_set_cbdata.
*/
- cfs_list_t set_cblist;
+ struct list_head set_cblist;
/** Completion callback, if only one. */
- set_interpreter_func set_interpret;
+ set_interpreter_func set_interpret;
/** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
+ void *set_arg;
/**
* Lock for \a set_new_requests manipulations
* locked so that any old caller can communicate requests to
*/
spinlock_t set_new_req_lock;
/** List of new yet unsent requests. Only used with ptlrpcd now. */
- cfs_list_t set_new_requests;
+ struct list_head set_new_requests;
/** rq_status of requests that have been freed already */
- int set_rc;
+ int set_rc;
/** Additional fields used by the flow control extension */
/** Maximum number of RPCs in flight */
- int set_max_inflight;
+ int set_max_inflight;
/** Callback function used to generate RPCs */
- set_producer_func set_producer;
+ set_producer_func set_producer;
/** opaq argument passed to the producer callback */
- void *set_producer_arg;
+ void *set_producer_arg;
};
/**
* Description of a single ptrlrpc_set callback
*/
struct ptlrpc_set_cbdata {
- /** List linkage item */
- cfs_list_t psc_item;
- /** Pointer to interpreting function */
- set_interpreter_func psc_interpret;
- /** Opaq argument to pass to the callback */
- void *psc_data;
+ /** List linkage item */
+ struct list_head psc_item;
+ /** Pointer to interpreting function */
+ set_interpreter_func psc_interpret;
+ /** Opaq argument to pass to the callback */
+ void *psc_data;
};
struct ptlrpc_bulk_desc;
* added to the state for replay/failover consistency guarantees.
*/
struct ptlrpc_reply_state {
- /** Callback description */
- struct ptlrpc_cb_id rs_cb_id;
- /** Linkage for list of all reply states in a system */
- cfs_list_t rs_list;
- /** Linkage for list of all reply states on same export */
- cfs_list_t rs_exp_list;
- /** Linkage for list of all reply states for same obd */
- cfs_list_t rs_obd_list;
+ /** Callback description */
+ struct ptlrpc_cb_id rs_cb_id;
+ /** Linkage for list of all reply states in a system */
+ struct list_head rs_list;
+ /** Linkage for list of all reply states on same export */
+ struct list_head rs_exp_list;
+ /** Linkage for list of all reply states for same obd */
+ struct list_head rs_obd_list;
#if RS_DEBUG
- cfs_list_t rs_debug_list;
+ struct list_head rs_debug_list;
#endif
- /** A spinlock to protect the reply state flags */
+ /** A spinlock to protect the reply state flags */
spinlock_t rs_lock;
- /** Reply state flags */
+ /** Reply state flags */
unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
difficult requests */
__u64 rs_transno;
/** xid */
__u64 rs_xid;
- struct obd_export *rs_export;
+ struct obd_export *rs_export;
struct ptlrpc_service_part *rs_svcpt;
- /** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
- cfs_atomic_t rs_refcount;
-
- /** Context for the sevice thread */
- struct ptlrpc_svc_ctx *rs_svc_ctx;
- /** Reply buffer (actually sent to the client), encoded if needed */
- struct lustre_msg *rs_repbuf; /* wrapper */
+ /** Lnet metadata handle for the reply */
+ lnet_handle_md_t rs_md_h;
+ atomic_t rs_refcount;
+
+ /** Context for the sevice thread */
+ struct ptlrpc_svc_ctx *rs_svc_ctx;
+ /** Reply buffer (actually sent to the client), encoded if needed */
+ struct lustre_msg *rs_repbuf; /* wrapper */
/** Size of the reply buffer */
int rs_repbuf_len; /* wrapper buf length */
/** Size of the reply message */
typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
struct ptlrpc_request *req,
void *arg, int rc);
+/** Type of request resend call-back */
+typedef void (*ptlrpc_resend_cb_t)(struct ptlrpc_request *req,
+ void *arg);
/**
* Definition of request pool structure.
*/
struct ptlrpc_request_pool {
/** Locks the list */
- spinlock_t prp_lock;
- /** list of ptlrpc_request structs */
- cfs_list_t prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
- int prp_rq_size;
- /** Function to allocate more requests for this pool */
- void (*prp_populate)(struct ptlrpc_request_pool *, int);
+ spinlock_t prp_lock;
+ /** list of ptlrpc_request structs */
+ struct list_head prp_req_list;
+ /** Maximum message size that would fit into a rquest from this pool */
+ int prp_rq_size;
+ /** Function to allocate more requests for this pool */
+ void (*prp_populate)(struct ptlrpc_request_pool *, int);
};
struct lu_context;
*/
enum ptlrpc_nrs_ctl {
/**
+ * Not a valid opcode.
+ */
+ PTLRPC_NRS_CTL_INVALID,
+ /**
* Activate the policy.
*/
PTLRPC_NRS_CTL_START,
*/
PTLRPC_NRS_CTL_STOP,
/**
- * Recycle resources for inactive policies.
- */
- PTLRPC_NRS_CTL_SHRINK,
- /**
- * Not a valid opcode.
- */
- PTLRPC_NRS_CTL_INVALID,
- /**
* Policies can start using opcodes from this value and onwards for
* their own purposes; the assigned value itself is arbitrary.
*/
};
/**
+ * ORR policy operations
+ */
+enum nrs_ctl_orr {
+ NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
+ NRS_CTL_ORR_WR_QUANTUM,
+ NRS_CTL_ORR_RD_OFF_TYPE,
+ NRS_CTL_ORR_WR_OFF_TYPE,
+ NRS_CTL_ORR_RD_SUPP_REQ,
+ NRS_CTL_ORR_WR_SUPP_REQ,
+};
+
+/**
* NRS policy operations.
*
* These determine the behaviour of a policy, and are called in response to
/**
* Called during policy registration; this operation is optional.
*
- * \param[in] policy The policy being initialized
+ * \param[in,out] policy The policy being initialized
*/
int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
/**
* Called during policy unregistration; this operation is optional.
*
- * \param[in] policy The policy being unregistered/finalized
+ * \param[in,out] policy The policy being unregistered/finalized
*/
void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
/**
* Called when activating a policy via lprocfs; policies allocate and
* initialize their resources here; this operation is optional.
*
- * \param[in] policy The policy being started
+ * \param[in,out] policy The policy being started
+ * \param[in,out] arg A generic char buffer
*
* \see nrs_policy_start_locked()
*/
- int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
+ int (*op_policy_start) (struct ptlrpc_nrs_policy *policy,
+ char *arg);
/**
* Called when deactivating a policy via lprocfs; policies deallocate
* their resources here; this operation is optional
*
- * \param[in] policy The policy being stopped
+ * \param[in,out] policy The policy being stopped
*
- * \see nrs_policy_stop_final()
+ * \see nrs_policy_stop0()
*/
void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
/**
* \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
* to an ioctl; this operation is optional.
*
- * \param[in] policy The policy carrying out operation \a opc
+ * \param[in,out] policy The policy carrying out operation \a opc
* \param[in] opc The command operation being carried out
* \param[in,out] arg An generic buffer for communication between the
* user and the control operation
* service. Policies should return -ve for requests they do not wish
* to handle. This operation is mandatory.
*
- * \param[in] policy The policy we're getting resources for.
- * \param[in] nrq The request we are getting resources for.
- * \param[in] parent The parent resource of the resource being
+ * \param[in,out] policy The policy we're getting resources for.
+ * \param[in,out] nrq The request we are getting resources for.
+ * \param[in] parent The parent resource of the resource being
* requested; set to NULL if none.
- * \param[out] resp The resource is to be returned here; the
+ * \param[out] resp The resource is to be returned here; the
* fallback policy in an NRS head should
* \e always return a non-NULL pointer value.
* \param[in] moving_req When set, signifies that this is an attempt
*/
int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_request *nrq,
- struct ptlrpc_nrs_resource *parent,
+ const struct ptlrpc_nrs_resource *parent,
struct ptlrpc_nrs_resource **resp,
bool moving_req);
/**
* Called when releasing references taken for resources in the resource
* hierarchy for the request; this operation is optional.
*
- * \param[in] policy The policy the resource belongs to
- * \param[in] res The resource to be freed
+ * \param[in,out] policy The policy the resource belongs to
+ * \param[in] res The resource to be freed
*
* \see ptlrpc_nrs_req_finalize()
* \see ptlrpc_nrs_hpreq_add_nolock()
* \see ptlrpc_nrs_req_hp_move()
*/
void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_resource *res);
+ const struct ptlrpc_nrs_resource *res);
/**
- * Obtain a request for handling from the policy via polling; this
- * operation is mandatory.
+ * Obtains a request for handling from the policy, and optionally
+ * removes the request from the policy; this operation is mandatory.
*
- * \param[in] policy The policy to poll
+ * \param[in,out] policy The policy to poll
+ * \param[in] peek When set, signifies that we just want to
+ * examine the request, and not handle it, so the
+ * request is not removed from the policy.
+ * \param[in] force When set, it will force a policy to return a
+ * request if it has one queued.
*
- * \retval NULL No erquest available for handling
+ * \retval NULL No request available for handling
* \retval valid-pointer The request polled for handling
*
- * \see ptlrpc_nrs_req_poll_nolock()
+ * \see ptlrpc_nrs_req_get_nolock()
*/
struct ptlrpc_nrs_request *
- (*op_req_poll) (struct ptlrpc_nrs_policy *policy);
+ (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
/**
* Called when attempting to add a request to a policy for later
* handling; this operation is mandatory.
*
- * \param[in] policy The policy on which to enqueue \a nrq
- * \param[in] nrq The request to enqueue
+ * \param[in,out] policy The policy on which to enqueue \a nrq
+ * \param[in,out] nrq The request to enqueue
*
* \retval 0 success
* \retval != 0 error
* called after a request has been polled successfully from the policy
* for handling; this operation is mandatory.
*
- * \param[in] policy The policy the request \a nrq belongs to
- * \param[in] nrq The request to dequeue
+ * \param[in,out] policy The policy the request \a nrq belongs to
+ * \param[in,out] nrq The request to dequeue
*
* \see ptlrpc_nrs_req_del_nolock()
*/
void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_request *nrq);
/**
- * Called before carrying out the request; should not block. Could be
- * used for job/resource control; this operation is optional.
- *
- * \param[in] policy The policy which is starting to handle request
- * \a nrq
- * \param[in] nrq The request
- *
- * \pre spin_is_locked(&svcpt->scp_req_lock)
- *
- * \see ptlrpc_nrs_req_start_nolock()
- */
- void (*op_req_start) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
- /**
* Called after the request being carried out. Could be used for
* job/resource control; this operation is optional.
*
- * \param[in] policy The policy which is stopping to handle request
- * \a nrq
- * \param[in] nrq The request
+ * \param[in,out] policy The policy which is stopping to handle request
+ * \a nrq
+ * \param[in,out] nrq The request
*
- * \pre spin_is_locked(&svcpt->scp_req_lock)
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
/**
* Unegisters the policy's lprocfs interface with a PTLRPC service.
*
+ * In cases of failed policy registration in
+ * \e ptlrpc_nrs_policy_register(), this function may be called for a
+ * service which has not registered the policy successfully, so
+ * implementations of this method should make sure their operations are
+ * safe in such cases.
+ *
* \param[in] svc The service
*/
void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
enum nrs_policy_flags {
/**
* Fallback policy, use this flag only on a single supported policy per
- * service. Do not use this flag for policies registering using
- * ptlrpc_nrs_policy_register() (i.e. ones that are not in
- * \e nrs_pols_builtin).
+ * service. The flag cannot be used on policies that use
+ * \e PTLRPC_NRS_FL_REG_EXTERN
*/
PTLRPC_NRS_FL_FALLBACK = (1 << 0),
/**
*/
PTLRPC_NRS_FL_REG_START = (1 << 1),
/**
- * This is a polciy registering externally with NRS core, via
- * ptlrpc_nrs_policy_register(), (i.e. one that is not in
- * \e nrs_pols_builtin. Used to avoid ptlrpc_nrs_policy_register()
- * racing with a policy start operation issued by the user via lprocfs.
+ * This is a policy registering from a module different to the one NRS
+ * core ships in (currently ptlrpc).
*/
PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
};
* in a service.
*/
enum ptlrpc_nrs_queue_type {
- PTLRPC_NRS_QUEUE_REG,
- PTLRPC_NRS_QUEUE_HP,
- PTLRPC_NRS_QUEUE_BOTH,
+ PTLRPC_NRS_QUEUE_REG = (1 << 0),
+ PTLRPC_NRS_QUEUE_HP = (1 << 1),
+ PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
};
/**
spinlock_t nrs_lock;
/** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
/**
- * Linkage into nrs_core_heads_list
- */
- cfs_list_t nrs_heads;
- /**
* List of registered policies
*/
- cfs_list_t nrs_policy_list;
+ struct list_head nrs_policy_list;
/**
* List of policies with queued requests. Policies that have any
* outstanding requests are queued here, and this list is queried
* point transition away from the
* ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
*/
- cfs_list_t nrs_policy_queued;
+ struct list_head nrs_policy_queued;
/**
* Service partition for this NRS head
*/
unsigned long nrs_req_started;
/**
* # policies on this NRS
- * TODO: Can we avoid having this?
*/
unsigned nrs_num_pols;
/**
* unregistration
*/
unsigned nrs_stopping:1;
+ /**
+ * NRS policy is throttling reqeust
+ */
+ unsigned nrs_throttling:1;
};
#define NRS_POL_NAME_MAX 16
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+ /**
+ * Human-readable policy name
+ */
+ char nc_name[NRS_POL_NAME_MAX];
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *nc_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t nc_compat;
+ /**
+ * Set for policies that support a single ptlrpc service, i.e. ones that
+ * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+ * depicts the name of the single service that such policies are
+ * compatible with.
+ */
+ const char *nc_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor; policies registering from a
+ * different module to the one the NRS framework is held within
+ * (currently ptlrpc), should set this field to THIS_MODULE.
+ */
+ struct module *nc_owner;
+ /**
+ * Policy registration flags; a bitmast of \e nrs_policy_flags
+ */
+ unsigned nc_flags;
+};
+
/**
* NRS policy registering descriptor
*
/**
* Human-readable policy name
*/
- char pd_name[NRS_POL_NAME_MAX];
+ char pd_name[NRS_POL_NAME_MAX];
/**
- * NRS operations for this policy
+ * Link into nrs_core::nrs_policies
*/
- struct ptlrpc_nrs_pol_ops *pd_ops;
+ struct list_head pd_list;
/**
- * Service Compatibility function; this determines whether a policy is
- * adequate for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy
- * registration and unregistration, and for all partitions of a
- * service; so the result should not depend on temporal service
- * or other properties, that may influence the result.
+ * NRS operations for this policy
*/
- bool (*pd_compat) (struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc);
+ const struct ptlrpc_nrs_pol_ops *pd_ops;
/**
- * Optionally set for policies that support a single ptlrpc service,
- * i.e. ones that have \a pd_compat set to nrs_policy_compat_one()
+ * Service compatibility predicate
*/
- char *pd_compat_svc_name;
+ nrs_pol_desc_compat_t pd_compat;
/**
- * Bitmask of nrs_policy_flags
+ * Set for policies that are compatible with only one PTLRPC service.
+ *
+ * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
*/
- unsigned pd_flags;
+ const char *pd_compat_svc_name;
/**
- * Link into nrs_core::nrs_policies
- */
- cfs_list_t pd_list;
+ * Owner module for this policy descriptor.
+ *
+ * We need to hold a reference to the module whenever we might make use
+ * of any of the module's contents, i.e.
+ * - If one or more instances of the policy are at a state where they
+ * might be handling a request, i.e.
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+ * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+ * is taken on the module when
+ * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+ * becomes 0, so that we hold only one reference to the module maximum
+ * at any time.
+ *
+ * We do not need to hold a reference to the module, even though we
+ * might use code and data from the module, in the following cases:
+ * - During external policy registration, because this should happen in
+ * the module's init() function, in which case the module is safe from
+ * removal because a reference is being held on the module by the
+ * kernel, and iirc kmod (and I guess module-init-tools also) will
+ * serialize any racing processes properly anyway.
+ * - During external policy unregistration, because this should happen
+ * in a module's exit() function, and any attempts to start a policy
+ * instance would need to take a reference on the module, and this is
+ * not possible once we have reached the point where the exit()
+ * handler is called.
+ * - During service registration and unregistration, as service setup
+ * and cleanup, and policy registration, unregistration and policy
+ * instance starting, are serialized by \e nrs_core::nrs_mutex, so
+ * as long as users adhere to the convention of registering policies
+ * in init() and unregistering them in module exit() functions, there
+ * should not be a race between these operations.
+ * - During any policy-specific lprocfs operations, because a reference
+ * is held by the kernel on a proc entry that has been entered by a
+ * syscall, so as long as proc entries are removed during unregistration time,
+ * then unregistration and lprocfs operations will be properly
+ * serialized.
+ */
+ struct module *pd_owner;
+ /**
+ * Bitmask of \e nrs_policy_flags
+ */
+ unsigned pd_flags;
+ /**
+ * # of references on this descriptor
+ */
+ atomic_t pd_refs;
};
/**
*/
NRS_POL_STATE_INVALID,
/**
- * For now, this state is used exclusively for policies that register
- * externally to NRS core, i.e. ones that do so via
- * ptlrpc_nrs_policy_register() and are not part of nrs_pols_builtin;
- * it is used to prevent a race condition between the policy registering
- * with more than one service partition while service is operational,
- * and the user starting the policy via lprocfs.
- *
- * \see nrs_pol_make_avail()
- */
- NRS_POL_STATE_UNAVAIL,
- /**
* Policies are at this state either at the start of their life, or
* transition here when the user selects a different policy to act
* as the primary one.
* Linkage into the NRS head's list of policies,
* ptlrpc_nrs:nrs_policy_list
*/
- cfs_list_t pol_list;
+ struct list_head pol_list;
/**
* Linkage into the NRS head's list of policies with enqueued
* requests ptlrpc_nrs:nrs_policy_queued
*/
- cfs_list_t pol_list_queued;
+ struct list_head pol_list_queued;
/**
* Current state of this policy
*/
*/
struct ptlrpc_nrs *pol_nrs;
/**
- * NRS operations for this policy; points to ptlrpc_nrs_pol_desc::pd_ops
- */
- struct ptlrpc_nrs_pol_ops *pol_ops;
- /**
* Private policy data; varies by policy type
*/
void *pol_private;
/**
- * Human-readable policy name; point to ptlrpc_nrs_pol_desc::pd_name
+ * Policy descriptor for this policy instance.
*/
- char *pol_name;
+ struct ptlrpc_nrs_pol_desc *pol_desc;
};
/**
/**
* List of queued requests.
*/
- cfs_list_t fh_list;
+ struct list_head fh_list;
/**
* For debugging purposes.
*/
};
struct nrs_fifo_req {
- /** request header, must be the first member of structure */
- cfs_list_t fr_list;
+ struct list_head fr_list;
__u64 fr_sequence;
};
/** @} fifo */
/**
+ * \name CRR-N
+ *
+ * CRR-N, Client Round Robin over NIDs
+ * @{
+ */
+
+/**
+ * private data structure for CRR-N NRS
+ */
+struct nrs_crrn_net {
+ struct ptlrpc_nrs_resource cn_res;
+ cfs_binheap_t *cn_binheap;
+ cfs_hash_t *cn_cli_hash;
+ /**
+ * Used when a new scheduling round commences, in order to synchronize
+ * all clients with the new round number.
+ */
+ __u64 cn_round;
+ /**
+ * Determines the relevant ordering amongst request batches within a
+ * scheduling round.
+ */
+ __u64 cn_sequence;
+ /**
+ * Round Robin quantum; the maximum number of RPCs that each request
+ * batch for each client can have in a scheduling round.
+ */
+ __u16 cn_quantum;
+};
+
+/**
+ * Object representing a client in CRR-N, as identified by its NID
+ */
+struct nrs_crrn_client {
+ struct ptlrpc_nrs_resource cc_res;
+ struct hlist_node cc_hnode;
+ lnet_nid_t cc_nid;
+ /**
+ * The round number against which this client is currently scheduling
+ * requests.
+ */
+ __u64 cc_round;
+ /**
+ * The sequence number used for requests scheduled by this client during
+ * the current round number.
+ */
+ __u64 cc_sequence;
+ atomic_t cc_ref;
+ /**
+ * Round Robin quantum; the maximum number of RPCs the client is allowed
+ * to schedule in a single batch of each round.
+ */
+ __u16 cc_quantum;
+ /**
+ * # of pending requests for this client, on all existing rounds
+ */
+ __u16 cc_active;
+};
+
+/**
+ * CRR-N NRS request definition
+ */
+struct nrs_crrn_req {
+ /**
+ * Round number for this request; shared with all other requests in the
+ * same batch.
+ */
+ __u64 cr_round;
+ /**
+ * Sequence number for this request; shared with all other requests in
+ * the same batch.
+ */
+ __u64 cr_sequence;
+};
+
+/**
+ * CRR-N policy operations.
+ */
+enum nrs_ctl_crr {
+ /**
+ * Read the RR quantum size of a CRR-N policy.
+ */
+ NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
+ /**
+ * Write the RR quantum size of a CRR-N policy.
+ */
+ NRS_CTL_CRRN_WR_QUANTUM,
+};
+
+/** @} CRR-N */
+
+/**
+ * \name ORR/TRR
+ *
+ * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
+ * @{
+ */
+
+/**
+ * Lower and upper byte offsets of a brw RPC
+ */
+struct nrs_orr_req_range {
+ __u64 or_start;
+ __u64 or_end;
+};
+
+/**
+ * RPC types supported by the ORR/TRR policies
+ */
+enum nrs_orr_supp {
+ NOS_OST_READ = (1 << 0),
+ NOS_OST_WRITE = (1 << 1),
+ NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE),
+ /**
+ * Default value for policies.
+ */
+ NOS_DFLT = NOS_OST_READ
+};
+
+/**
+ * As unique keys for grouping RPCs together, we use the object's OST FID for
+ * the ORR policy, and the OST index for the TRR policy.
+ *
+ * XXX: We waste some space for TRR policy instances by using a union, but it
+ * allows to consolidate some of the code between ORR and TRR, and these
+ * policies will probably eventually merge into one anyway.
+ */
+struct nrs_orr_key {
+ union {
+ /** object FID for ORR */
+ struct lu_fid ok_fid;
+ /** OST index for TRR */
+ __u32 ok_idx;
+ };
+};
+
+/**
+ * The largest base string for unique hash/slab object names is
+ * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT
+ * id number, so this _should_ be more than enough for the maximum number of
+ * CPTs on any system. If it does happen that this statement is incorrect,
+ * nrs_orr_genobjname() will inevitably yield a non-unique name and cause
+ * kmem_cache_create() to complain (on Linux), so the erroneous situation
+ * will hopefully not go unnoticed.
+ */
+#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
+
+/**
+ * private data structure for ORR and TRR NRS
+ */
+struct nrs_orr_data {
+ struct ptlrpc_nrs_resource od_res;
+ cfs_binheap_t *od_binheap;
+ cfs_hash_t *od_obj_hash;
+ struct kmem_cache *od_cache;
+ /**
+ * Used when a new scheduling round commences, in order to synchronize
+ * all object or OST batches with the new round number.
+ */
+ __u64 od_round;
+ /**
+ * Determines the relevant ordering amongst request batches within a
+ * scheduling round.
+ */
+ __u64 od_sequence;
+ /**
+ * RPC types that are currently supported.
+ */
+ enum nrs_orr_supp od_supp;
+ /**
+ * Round Robin quantum; the maxium number of RPCs that each request
+ * batch for each object or OST can have in a scheduling round.
+ */
+ __u16 od_quantum;
+ /**
+ * Whether to use physical disk offsets or logical file offsets.
+ */
+ bool od_physical;
+ /**
+ * XXX: We need to provide a persistently allocated string to hold
+ * unique object names for this policy, since in currently supported
+ * versions of Linux by Lustre, kmem_cache_create() just sets a pointer
+ * to the name string provided. kstrdup() is used in the version of
+ * kmeme_cache_create() in current Linux mainline, so we may be able to
+ * remove this in the future.
+ */
+ char od_objname[NRS_ORR_OBJ_NAME_MAX];
+};
+
+/**
+ * Represents a backend-fs object or OST in the ORR and TRR policies
+ * respectively
+ */
+struct nrs_orr_object {
+ struct ptlrpc_nrs_resource oo_res;
+ struct hlist_node oo_hnode;
+ /**
+ * The round number against which requests are being scheduled for this
+ * object or OST
+ */
+ __u64 oo_round;
+ /**
+ * The sequence number used for requests scheduled for this object or
+ * OST during the current round number.
+ */
+ __u64 oo_sequence;
+ /**
+ * The key of the object or OST for which this structure instance is
+ * scheduling RPCs
+ */
+ struct nrs_orr_key oo_key;
+ long oo_ref;
+ /**
+ * Round Robin quantum; the maximum number of RPCs that are allowed to
+ * be scheduled for the object or OST in a single batch of each round.
+ */
+ __u16 oo_quantum;
+ /**
+ * # of pending requests for this object or OST, on all existing rounds
+ */
+ __u16 oo_active;
+};
+
+/**
+ * ORR/TRR NRS request definition
+ */
+struct nrs_orr_req {
+ /**
+ * The offset range this request covers
+ */
+ struct nrs_orr_req_range or_range;
+ /**
+ * Round number for this request; shared with all other requests in the
+ * same batch.
+ */
+ __u64 or_round;
+ /**
+ * Sequence number for this request; shared with all other requests in
+ * the same batch.
+ */
+ __u64 or_sequence;
+ /**
+ * For debugging purposes.
+ */
+ struct nrs_orr_key or_key;
+ /**
+ * An ORR policy instance has filled in request information while
+ * enqueueing the request on the service partition's regular NRS head.
+ */
+ unsigned int or_orr_set:1;
+ /**
+ * A TRR policy instance has filled in request information while
+ * enqueueing the request on the service partition's regular NRS head.
+ */
+ unsigned int or_trr_set:1;
+ /**
+ * Request offset ranges have been filled in with logical offset
+ * values.
+ */
+ unsigned int or_logical_set:1;
+ /**
+ * Request offset ranges have been filled in with physical offset
+ * values.
+ */
+ unsigned int or_physical_set:1;
+};
+
+/** @} ORR/TRR */
+
+#include <lustre_nrs_tbf.h>
+
+/**
* NRS request
*
* Instances of this object exist embedded within ptlrpc_request; the main
unsigned nr_res_idx;
unsigned nr_initialized:1;
unsigned nr_enqueued:1;
- unsigned nr_dequeued:1;
unsigned nr_started:1;
unsigned nr_finalized:1;
cfs_binheap_node_t nr_node;
* Fields for the FIFO policy
*/
struct nrs_fifo_req fifo;
+ /**
+ * CRR-N request defintion
+ */
+ struct nrs_crrn_req crr;
+ /** ORR and TRR share the same request definition */
+ struct nrs_orr_req orr;
+ /**
+ * TBF request definition
+ */
+ struct nrs_tbf_req tbf;
} nr_u;
/**
* Externally-registering policies may want to use this to allocate
void (*hpreq_fini)(struct ptlrpc_request *);
};
+struct ptlrpc_cli_req {
+ /** For bulk requests on client only: bulk descriptor */
+ struct ptlrpc_bulk_desc *cr_bulk;
+ /** optional time limit for send attempts */
+ cfs_duration_t cr_delay_limit;
+ /** time request was first queued */
+ cfs_time_t cr_queued_time;
+ /** request sent timeval */
+ struct timeval cr_sent_tv;
+ /** time for request really sent out */
+ time_t cr_sent_out;
+ /** when req reply unlink must finish. */
+ time_t cr_reply_deadline;
+ /** when req bulk unlink must finish. */
+ time_t cr_bulk_deadline;
+ /** Portal to which this request would be sent */
+ short cr_req_ptl;
+ /** Portal where to wait for reply and where reply would be sent */
+ short cr_rep_ptl;
+ /** request resending number */
+ unsigned int cr_resend_nr;
+ /** What was import generation when this request was sent */
+ int cr_imp_gen;
+ enum lustre_imp_state cr_send_state;
+ /** Per-request waitq introduced by bug 21938 for recovery waiting */
+ wait_queue_head_t cr_set_waitq;
+ /** Link item for request set lists */
+ struct list_head cr_set_chain;
+ /** link to waited ctx */
+ struct list_head cr_ctx_chain;
+
+ /** client's half ctx */
+ struct ptlrpc_cli_ctx *cr_cli_ctx;
+ /** Link back to the request set */
+ struct ptlrpc_request_set *cr_set;
+ /** outgoing request MD handle */
+ lnet_handle_md_t cr_req_md_h;
+ /** request-out callback parameter */
+ struct ptlrpc_cb_id cr_req_cbid;
+ /** incoming reply MD handle */
+ lnet_handle_md_t cr_reply_md_h;
+ wait_queue_head_t cr_reply_waitq;
+ /** reply callback parameter */
+ struct ptlrpc_cb_id cr_reply_cbid;
+ /** Async completion handler, called when reply is received */
+ ptlrpc_interpterer_t cr_reply_interp;
+ /** Resend handler, called when request is resend to update RPC data */
+ ptlrpc_resend_cb_t cr_resend_cb;
+ /** Async completion context */
+ union ptlrpc_async_args cr_async_args;
+ /** Opaq data for replay and commit callbacks. */
+ void *cr_cb_data;
+ /**
+ * Commit callback, called when request is committed and about to be
+ * freed.
+ */
+ void (*cr_commit_cb)(struct ptlrpc_request *);
+ /** Replay callback, called after request is replayed at recovery */
+ void (*cr_replay_cb)(struct ptlrpc_request *);
+};
+
+/** client request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse */
+#define rq_bulk rq_cli.cr_bulk
+#define rq_delay_limit rq_cli.cr_delay_limit
+#define rq_queued_time rq_cli.cr_queued_time
+#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_real_sent rq_cli.cr_sent_out
+#define rq_reply_deadline rq_cli.cr_reply_deadline
+#define rq_bulk_deadline rq_cli.cr_bulk_deadline
+#define rq_nr_resend rq_cli.cr_resend_nr
+#define rq_request_portal rq_cli.cr_req_ptl
+#define rq_reply_portal rq_cli.cr_rep_ptl
+#define rq_import_generation rq_cli.cr_imp_gen
+#define rq_send_state rq_cli.cr_send_state
+#define rq_set_chain rq_cli.cr_set_chain
+#define rq_ctx_chain rq_cli.cr_ctx_chain
+#define rq_set rq_cli.cr_set
+#define rq_set_waitq rq_cli.cr_set_waitq
+#define rq_cli_ctx rq_cli.cr_cli_ctx
+#define rq_req_md_h rq_cli.cr_req_md_h
+#define rq_req_cbid rq_cli.cr_req_cbid
+#define rq_reply_md_h rq_cli.cr_reply_md_h
+#define rq_reply_waitq rq_cli.cr_reply_waitq
+#define rq_reply_cbid rq_cli.cr_reply_cbid
+#define rq_interpret_reply rq_cli.cr_reply_interp
+#define rq_resend_cb rq_cli.cr_resend_cb
+#define rq_async_args rq_cli.cr_async_args
+#define rq_cb_data rq_cli.cr_cb_data
+#define rq_commit_cb rq_cli.cr_commit_cb
+#define rq_replay_cb rq_cli.cr_replay_cb
+
+struct ptlrpc_srv_req {
+ /** initial thread servicing this request */
+ struct ptlrpc_thread *sr_svc_thread;
+ /**
+ * Server side list of incoming unserved requests sorted by arrival
+ * time. Traversed from time to time to notice about to expire
+ * requests and sent back "early replies" to clients to let them
+ * know server is alive and well, just very busy to service their
+ * requests in time
+ */
+ struct list_head sr_timed_list;
+ /** server-side per-export list */
+ struct list_head sr_exp_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head sr_hist_list;
+ /** history sequence # */
+ __u64 sr_hist_seq;
+ /** the index of service's srv_at_array into which request is linked */
+ time_t sr_at_index;
+ /** authed uid */
+ uid_t sr_auth_uid;
+ /** authed uid mapped to */
+ uid_t sr_auth_mapped_uid;
+ /** RPC is generated from what part of Lustre */
+ enum lustre_sec_part sr_sp_from;
+ /** request session context */
+ struct lu_context sr_ses;
+ /** \addtogroup nrs
+ * @{
+ */
+ /** stub for NRS request */
+ struct ptlrpc_nrs_request sr_nrq;
+ /** @} nrs */
+ /** request arrival time */
+ struct timeval sr_arrival_time;
+ /** server's half ctx */
+ struct ptlrpc_svc_ctx *sr_svc_ctx;
+ /** (server side), pointed directly into req buffer */
+ struct ptlrpc_user_desc *sr_user_desc;
+ /** separated reply state */
+ struct ptlrpc_reply_state *sr_reply_state;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *sr_ops;
+ /** incoming request buffer */
+ struct ptlrpc_request_buffer_desc *sr_rqbd;
+};
+
+/** server request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse */
+#define rq_svc_thread rq_srv.sr_svc_thread
+#define rq_timed_list rq_srv.sr_timed_list
+#define rq_exp_list rq_srv.sr_exp_list
+#define rq_history_list rq_srv.sr_hist_list
+#define rq_history_seq rq_srv.sr_hist_seq
+#define rq_at_index rq_srv.sr_at_index
+#define rq_auth_uid rq_srv.sr_auth_uid
+#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid
+#define rq_sp_from rq_srv.sr_sp_from
+#define rq_session rq_srv.sr_ses
+#define rq_nrq rq_srv.sr_nrq
+#define rq_arrival_time rq_srv.sr_arrival_time
+#define rq_reply_state rq_srv.sr_reply_state
+#define rq_svc_ctx rq_srv.sr_svc_ctx
+#define rq_user_desc rq_srv.sr_user_desc
+#define rq_ops rq_srv.sr_ops
+#define rq_rqbd rq_srv.sr_rqbd
+
/**
* Represents remote procedure call.
*
*/
struct ptlrpc_request {
/* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
+ int rq_type;
/** Result of request processing */
- int rq_status;
- /**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
- */
- cfs_list_t rq_list;
- /**
- * Server side list of incoming unserved requests sorted by arrival
- * time. Traversed from time to time to notice about to expire
- * requests and sent back "early replies" to clients to let them
- * know server is alive and well, just very busy to service their
- * requests in time
- */
- cfs_list_t rq_timed_list;
- /** server-side history, used for debuging purposes. */
- cfs_list_t rq_history_list;
- /** server-side per-export list */
- cfs_list_t rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
-
- /** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
-
- /** history sequence # */
- __u64 rq_history_seq;
- /** \addtogroup nrs
- * @{
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
*/
- /** stub for NRS request */
- struct ptlrpc_nrs_request rq_nrq;
- /** @} nrs */
- /** the index of service's srv_at_array into which request is linked */
- time_t rq_at_index;
- /** Lock to protect request flags and some other important bits, like
- * rq_list
- */
- spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock */
+ struct list_head rq_list;
+ /** Lock to protect request flags and some other important bits, like
+ * rq_list
+ */
+ spinlock_t rq_lock;
+ /** client-side flags are serialized by rq_lock @{ */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
/**
rq_replay:1,
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
- rq_early:1, rq_must_unlink:1,
- rq_memalloc:1, /* req originated from "kswapd" */
- /* server-side flags */
- rq_packed_final:1, /* packed final reply */
- rq_hp:1, /* high priority RPC */
- rq_at_linked:1, /* link into service's srv_at_array */
- rq_reply_truncate:1,
- rq_committed:1,
- /* whether the "rq_set" is a valid one */
- rq_invalid_rqset:1,
+ rq_early:1,
+ rq_req_unlinked:1, /* unlinked request buffer from lnet */
+ rq_reply_unlinked:1, /* unlinked reply buffer from lnet */
+ rq_memalloc:1, /* req originated from "kswapd" */
+ rq_committed:1,
+ rq_reply_truncated:1,
+ /** whether the "rq_set" is a valid one */
+ rq_invalid_rqset:1,
rq_generation_set:1,
- /* do not resend request on -EINPROGRESS */
+ /** do not resend request on -EINPROGRESS */
rq_no_retry_einprogress:1,
/* allow the req to be sent if the import is in recovery
* status */
- rq_allow_replay:1;
-
- unsigned int rq_nr_resend;
-
- enum rq_phase rq_phase; /* one of RQ_PHASE_* */
- enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ rq_allow_replay:1,
+ /* bulk request, sent to server, but uncommitted */
+ rq_unstable:1;
+ /** @} */
- /** Portal to which this request would be sent */
- short rq_request_portal; /* XXX FIXME bug 249 */
- /** Portal where to wait for reply and where reply would be sent */
- short rq_reply_portal; /* XXX FIXME bug 249 */
+ /** server-side flags @{ */
+ unsigned int
+ rq_hp:1, /**< high priority RPC */
+ rq_at_linked:1, /**< link into service's srv_at_array */
+ rq_packed_final:1; /**< packed final reply */
+ /** @} */
+ /** one of RQ_PHASE_* */
+ enum rq_phase rq_phase;
+ /** one of RQ_PHASE_* to be used next */
+ enum rq_phase rq_next_phase;
+ /**
+ * client-side refcount for SENT race, server-side refcounf
+ * for multiple replies
+ */
+ atomic_t rq_refcount;
/**
* client-side:
* !rq_truncate : # reply bytes actually received,
int rq_reqlen;
/** Reply length */
int rq_replen;
+ /** Pool if request is from preallocated list */
+ struct ptlrpc_request_pool *rq_pool;
/** Request message - what client sent */
struct lustre_msg *rq_reqmsg;
/** Reply message - server response */
__u64 rq_transno;
/** xid */
__u64 rq_xid;
- /**
- * List item to for replay list. Not yet commited requests get linked
- * there.
- * Also see \a rq_replay comment above.
- */
- cfs_list_t rq_replay_list;
-
- /**
- * security and encryption data
- * @{ */
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- cfs_list_t rq_ctx_chain; /**< link to waited ctx */
-
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
+ /**
+ * List item to for replay list. Not yet commited requests get linked
+ * there.
+ * Also see \a rq_replay comment above.
+ * It's also link chain on obd_export::exp_req_replay_queue
+ */
+ struct list_head rq_replay_list;
+ /** non-shared members for client & server request*/
+ union {
+ struct ptlrpc_cli_req rq_cli;
+ struct ptlrpc_srv_req rq_srv;
+ };
+ /**
+ * security and encryption data
+ * @{ */
+ /** description of flavors for client & server */
+ struct sptlrpc_flavor rq_flvr;
/* client/server security flags */
unsigned int
rq_pack_bulk:1,
/* doesn't expect reply FIXME */
rq_no_reply:1,
- rq_pill_init:1; /* pill initialized */
+ rq_pill_init:1, /* pill initialized */
+ rq_srv_req:1; /* server request */
- uid_t rq_auth_uid; /* authed uid */
- uid_t rq_auth_mapped_uid; /* authed uid mapped to */
- /* (server side), pointed directly into req buffer */
- struct ptlrpc_user_desc *rq_user_desc;
-
- /* various buffer pointers */
- struct lustre_msg *rq_reqbuf; /* req wrapper */
- char *rq_repbuf; /* rep buffer */
- struct lustre_msg *rq_repdata; /* rep wrapper msg */
- struct lustre_msg *rq_clrbuf; /* only in priv mode */
+ /** various buffer pointers */
+ struct lustre_msg *rq_reqbuf; /**< req wrapper */
+ char *rq_repbuf; /**< rep buffer */
+ struct lustre_msg *rq_repdata; /**< rep wrapper msg */
+ /** only in priv mode */
+ struct lustre_msg *rq_clrbuf;
int rq_reqbuf_len; /* req wrapper buf len */
int rq_reqdata_len; /* req wrapper msg len */
int rq_repbuf_len; /* rep buffer len */
int rq_clrdata_len; /* only in priv mode */
/** early replies go to offset 0, regular replies go after that */
- unsigned int rq_reply_off;
-
- /** @} */
-
- /** Fields that help to see if request and reply were swabbed or not */
- __u32 rq_req_swab_mask;
- __u32 rq_rep_swab_mask;
-
- /** What was import generation when this request was sent */
- int rq_import_generation;
- enum lustre_imp_state rq_send_state;
-
- /** how many early replies (for stats) */
- int rq_early_count;
-
- /** client+server request */
- lnet_handle_md_t rq_req_md_h;
- struct ptlrpc_cb_id rq_req_cbid;
- /** optional time limit for send attempts */
- cfs_duration_t rq_delay_limit;
- /** time request was first queued */
- cfs_time_t rq_queued_time;
-
- /* server-side... */
- /** request arrival time */
- struct timeval rq_arrival_time;
- /** separated reply state */
- struct ptlrpc_reply_state *rq_reply_state;
- /** incoming request buffer */
- struct ptlrpc_request_buffer_desc *rq_rqbd;
-
- /** client-only incoming reply */
- lnet_handle_md_t rq_reply_md_h;
- cfs_waitq_t rq_reply_waitq;
- struct ptlrpc_cb_id rq_reply_cbid;
-
- /** our LNet NID */
- lnet_nid_t rq_self;
- /** Peer description (the other side) */
- lnet_process_id_t rq_peer;
- /** Server-side, export on which request was received */
- struct obd_export *rq_export;
- /** Client side, import where request is being sent */
- struct obd_import *rq_import;
-
- /** Replay callback, called after request is replayed at recovery */
- void (*rq_replay_cb)(struct ptlrpc_request *);
- /**
- * Commit callback, called when request is committed and about to be
- * freed.
- */
- void (*rq_commit_cb)(struct ptlrpc_request *);
- /** Opaq data for replay and commit callbacks. */
- void *rq_cb_data;
+ unsigned int rq_reply_off;
- /** For bulk requests on client only: bulk descriptor */
- struct ptlrpc_bulk_desc *rq_bulk;
-
- /** client outgoing req */
- /**
- * when request/reply sent (secs), or time when request should be sent
- */
- time_t rq_sent;
- /** time for request really sent out */
- time_t rq_real_sent;
-
- /** when request must finish. volatile
- * so that servers' early reply updates to the deadline aren't
- * kept in per-cpu cache */
- volatile time_t rq_deadline;
- /** when req reply unlink must finish. */
- time_t rq_reply_deadline;
- /** when req bulk unlink must finish. */
- time_t rq_bulk_deadline;
- /**
- * service time estimate (secs)
- * If the requestsis not served by this time, it is marked as timed out.
- */
- int rq_timeout;
+ /** @} */
- /** Multi-rpc bits */
- /** Per-request waitq introduced by bug 21938 for recovery waiting */
- cfs_waitq_t rq_set_waitq;
- /** Link item for request set lists */
- cfs_list_t rq_set_chain;
- /** Link back to the request set */
- struct ptlrpc_request_set *rq_set;
- /** Async completion handler, called when reply is received */
- ptlrpc_interpterer_t rq_interpret_reply;
- /** Async completion context */
- union ptlrpc_async_args rq_async_args;
-
- /** Pool if request is from preallocated list */
- struct ptlrpc_request_pool *rq_pool;
-
- struct lu_context rq_session;
- struct lu_context rq_recov_session;
-
- /** request format description */
- struct req_capsule rq_pill;
+ /** Fields that help to see if request and reply were swabbed or not */
+ __u32 rq_req_swab_mask;
+ __u32 rq_rep_swab_mask;
+
+ /** how many early replies (for stats) */
+ int rq_early_count;
+ /** Server-side, export on which request was received */
+ struct obd_export *rq_export;
+ /** import where request is being sent */
+ struct obd_import *rq_import;
+ /** our LNet NID */
+ lnet_nid_t rq_self;
+ /** Peer description (the other side) */
+ lnet_process_id_t rq_peer;
+ /**
+ * service time estimate (secs)
+ * If the request is not served by this time, it is marked as timed out.
+ */
+ int rq_timeout;
+ /**
+ * when request/reply sent (secs), or time when request should be sent
+ */
+ time_t rq_sent;
+ /** when request must finish. */
+ time_t rq_deadline;
+ /** request format description */
+ struct req_capsule rq_pill;
};
/**
/** \addtogroup nrs
* @{
*/
-int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_desc *desc);
-int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_desc *desc);
+int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
+int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
struct ptlrpc_nrs_pol_info *info);
*
* For a reliable result, this should be checked under svcpt->scp_req lock.
*/
-static inline bool
-ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
+static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
{
struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
/**
* Returns 1 if request buffer at offset \a index was already swabbed
*/
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
return req->rq_req_swab_mask & (1 << index);
/**
* Returns 1 if request reply buffer at offset \a index was already swabbed
*/
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
return req->rq_rep_swab_mask & (1 << index);
/**
* Mark request buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
/**
* Mark request reply buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
/**
* Debugging functions and helpers to print request structure into debug log
* @{
- */
+ */
/* Spare the preprocessor, spoil the bugs. */
#define FLAG(field, str) (field ? str : "")
* Structure that defines a single page of a bulk transfer
*/
struct ptlrpc_bulk_page {
- /** Linkage to list of pages in a bulk */
- cfs_list_t bp_link;
- /**
- * Number of bytes in a page to transfer starting from \a bp_pageoffset
- */
- int bp_buflen;
- /** offset within a page */
- int bp_pageoffset;
- /** The page itself */
- struct page *bp_page;
+ /** Linkage to list of pages in a bulk */
+ struct list_head bp_link;
+ /**
+ * Number of bytes in a page to transfer starting from \a bp_pageoffset
+ */
+ int bp_buflen;
+ /** offset within a page */
+ int bp_pageoffset;
+ /** The page itself */
+ struct page *bp_page;
};
#define BULK_GET_SOURCE 0
struct obd_import *bd_import;
/** Back pointer to the request */
struct ptlrpc_request *bd_req;
- cfs_waitq_t bd_waitq; /* server side only WQ */
+ wait_queue_head_t bd_waitq; /* server side only WQ */
int bd_iov_count; /* # entries in bd_iov */
int bd_max_iov; /* allocated size of bd_iov */
int bd_nob; /* # bytes covered */
/** array of associated MDs */
lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
-#if defined(__KERNEL__)
/*
* encrypt iov, size is either 0 or bd_iov_count.
*/
lnet_kiov_t *bd_enc_iov;
lnet_kiov_t bd_iov[0];
-#else
- lnet_md_iovec_t bd_iov[0];
-#endif
};
enum {
/**
* List of active threads in svc->srv_threads
*/
- cfs_list_t t_link;
+ struct list_head t_link;
/**
* thread-private data (preallocated memory)
*/
/**
* service thread pid
*/
- pid_t t_pid;
+ pid_t t_pid;
/**
* put watchdog in the structure per thread b=14840
*/
* the svc this thread belonged to b=18582
*/
struct ptlrpc_service_part *t_svcpt;
- cfs_waitq_t t_ctl_waitq;
+ wait_queue_head_t t_ctl_waitq;
struct lu_env *t_env;
char t_name[PTLRPC_THR_NAME_LEN];
};
* More than one request can fit into the buffer.
*/
struct ptlrpc_request_buffer_desc {
- /** Link item for rqbds on a service */
- cfs_list_t rqbd_list;
- /** History of requests for this buffer */
- cfs_list_t rqbd_reqs;
- /** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service_part *rqbd_svcpt;
- /** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
- int rqbd_refcount;
- /** The buffer itself */
- char *rqbd_buffer;
- struct ptlrpc_cb_id rqbd_cbid;
- /**
- * This "embedded" request structure is only used for the
- * last request to fit into the buffer
- */
- struct ptlrpc_request rqbd_req;
+ /** Link item for rqbds on a service */
+ struct list_head rqbd_list;
+ /** History of requests for this buffer */
+ struct list_head rqbd_reqs;
+ /** Back pointer to service for which this buffer is registered */
+ struct ptlrpc_service_part *rqbd_svcpt;
+ /** LNet descriptor */
+ lnet_handle_md_t rqbd_md_h;
+ int rqbd_refcount;
+ /** The buffer itself */
+ char *rqbd_buffer;
+ struct ptlrpc_cb_id rqbd_cbid;
+ /**
+ * This "embedded" request structure is only used for the
+ * last request to fit into the buffer
+ */
+ struct ptlrpc_request rqbd_req;
};
typedef int (*svc_handler_t)(struct ptlrpc_request *req);
struct ptlrpc_service {
/** serialize /proc operations */
spinlock_t srv_lock;
- /** most often accessed fields */
- /** chain thru all services */
- cfs_list_t srv_list;
+ /** most often accessed fields */
+ /** chain thru all services */
+ struct list_head srv_list;
/** service operations table */
struct ptlrpc_service_ops srv_ops;
/** only statically allocated strings here; we don't clean them */
/** only statically allocated strings here; we don't clean them */
char *srv_thread_name;
/** service thread list */
- cfs_list_t srv_threads;
+ struct list_head srv_threads;
/** threads # should be created for each partition on initializing */
int srv_nthrs_cpt_init;
/** limit of threads number for each partition */
int srv_nthrs_cpt_limit;
/** Root of /proc dir tree for this service */
- cfs_proc_dir_entry_t *srv_procroot;
+ struct proc_dir_entry *srv_procroot;
/** Pointer to statistic data for this service */
struct lprocfs_stats *srv_stats;
/** # hp per lp reqs to handle */
/** # running threads */
int scp_nthrs_running;
/** service threads list */
- cfs_list_t scp_threads;
+ struct list_head scp_threads;
/**
* serialize the following fields, used for protecting
/** # incoming reqs */
int scp_nreqs_incoming;
/** request buffers to be reposted */
- cfs_list_t scp_rqbd_idle;
+ struct list_head scp_rqbd_idle;
/** req buffers receiving */
- cfs_list_t scp_rqbd_posted;
+ struct list_head scp_rqbd_posted;
/** incoming reqs */
- cfs_list_t scp_req_incoming;
+ struct list_head scp_req_incoming;
/** timeout before re-posting reqs, in tick */
cfs_duration_t scp_rqbd_timeout;
/**
* all threads sleep on this. This wait-queue is signalled when new
* incoming request arrives and when difficult reply has to be handled.
*/
- cfs_waitq_t scp_waitq;
+ wait_queue_head_t scp_waitq;
/** request history */
- cfs_list_t scp_hist_reqs;
+ struct list_head scp_hist_reqs;
/** request buffer history */
- cfs_list_t scp_hist_rqbds;
+ struct list_head scp_hist_rqbds;
/** # request buffers in history */
int scp_hist_nrqbds;
/** sequence number for request */
/** reqs waiting for replies */
struct ptlrpc_at_array scp_at_array;
/** early reply timer */
- cfs_timer_t scp_at_timer;
+ struct timer_list scp_at_timer;
/** debug */
cfs_time_t scp_at_checktime;
/** check early replies */
*/
spinlock_t scp_rep_lock __cfs_cacheline_aligned;
/** all the active replies */
- cfs_list_t scp_rep_active;
-#ifndef __KERNEL__
- /** replies waiting for service */
- cfs_list_t scp_rep_queue;
-#endif
+ struct list_head scp_rep_active;
/** List of free reply_states */
- cfs_list_t scp_rep_idle;
+ struct list_head scp_rep_idle;
/** waitq to run, when adding stuff to srv_free_rs_list */
- cfs_waitq_t scp_rep_waitq;
+ wait_queue_head_t scp_rep_waitq;
/** # 'difficult' replies */
- cfs_atomic_t scp_nreps_difficult;
+ atomic_t scp_nreps_difficult;
};
#define ptlrpc_service_for_each_part(part, i, svc) \
*/
struct ptlrpc_request_set *pc_set;
/**
- * Thread name used in cfs_daemonize()
+ * Thread name used in kthread_run()
*/
char pc_name[16];
/**
* Record the partner index to be processed next.
*/
int pc_cursor;
-#ifndef __KERNEL__
- /**
- * Async rpcs flag to make sure that ptlrpcd_check() is called only
- * once.
- */
- int pc_recurred;
- /**
- * Currently not used.
- */
- void *pc_callback;
- /**
- * User-space async rpcs callback.
- */
- void *pc_wait_callback;
- /**
- * User-space check idle rpcs callback.
- */
- void *pc_idle_callback;
-#endif
};
/* Bits for pc_flags */
* \addtogroup nrs
* @{
*
- * Service compatibility function; policy is compatible with all services.
+ * Service compatibility function; the policy is compatible with all services.
*
* \param[in] svc The service the policy is attempting to register with.
* \param[in] desc The policy descriptor
*
- * \retval true The policy is compatible with the NRS head
+ * \retval true The policy is compatible with the service
*
* \see ptlrpc_nrs_pol_desc::pd_compat()
*/
-static inline bool
-nrs_policy_compat_all(struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
+static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
{
return true;
}
/**
- * Service compatibility function; policy is compatible with only a specific
+ * Service compatibility function; the policy is compatible with only a specific
* service which is identified by its human-readable name at
* ptlrpc_service::srv_name.
*
* \param[in] svc The service the policy is attempting to register with.
* \param[in] desc The policy descriptor
*
- * \retval false The policy is not compatible with the NRS head
- * \retval true The policy is compatible with the NRS head
+ * \retval false The policy is not compatible with the service
+ * \retval true The policy is compatible with the service
*
* \see ptlrpc_nrs_pol_desc::pd_compat()
*/
-static inline bool
-nrs_policy_compat_one(struct ptlrpc_service *svc,
- const struct ptlrpc_nrs_pol_desc *desc)
+static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
{
LASSERT(desc->pd_compat_svc_name != NULL);
return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
int ptlrpc_reply(struct ptlrpc_request *req);
int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
int ptlrpc_error(struct ptlrpc_request *req);
-void ptlrpc_resend_req(struct ptlrpc_request *request);
int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
* request queues, request management, etc.
* @{
*/
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
+
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
int ptlrpc_queue_wait(struct ptlrpc_request *req);
int ptlrpc_replay_req(struct ptlrpc_request *req);
-int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
void ptlrpc_restart_req(struct ptlrpc_request *req);
void ptlrpc_abort_inflight(struct obd_import *imp);
void ptlrpc_cleanup_imp(struct obd_import *imp);
void *arg);
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data);
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
int ptlrpc_set_wait(struct ptlrpc_request_set *);
-int ptlrpc_expired_set(void *data);
-void ptlrpc_interrupted_set(void *data);
void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
void ptlrpc_set_destroy(struct ptlrpc_request_set *);
void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
-void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
- struct ptlrpc_request *req);
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
__ptlrpc_free_bulk(bulk, 0);
}
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset, int len, int);
+ struct page *page, int pageoffset, int len, int);
static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset,
+ struct page *page, int pageoffset,
int len)
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
}
static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset,
+ struct page *page, int pageoffset,
int len)
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
void ptlrpc_daemonize(char *name);
int ptlrpc_service_health_check(struct ptlrpc_service *);
void ptlrpc_server_drop_request(struct ptlrpc_request *req);
+void ptlrpc_request_change_export(struct ptlrpc_request *req,
+ struct obd_export *export);
+void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay);
-#ifdef __KERNEL__
int ptlrpc_hr_init(void);
void ptlrpc_hr_fini(void);
-#else
-# define ptlrpc_hr_init() (0)
-# define ptlrpc_hr_fini() do {} while(0)
-#endif
/** @} */
/** @} */
/**
- * ptlrpc msg buffer and swab interface
+ * ptlrpc msg buffer and swab interface
*
* @{
*/
int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- int index);
+ __u32 index);
void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index);
+ __u32 index);
int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
unsigned int newlen, int move_data);
void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
int __lustre_unpack_msg(struct lustre_msg *m, int len);
-int lustre_msg_hdr_size(__u32 magic, int count);
-int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
-int lustre_msg_size_v2(int count, __u32 *lengths);
-int lustre_packed_msg_size(struct lustre_msg *msg);
-int lustre_msg_early_size(void);
-void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
-void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
-int lustre_msg_buflen(struct lustre_msg *m, int n);
-void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
-int lustre_msg_bufcount(struct lustre_msg *m);
-char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
+__u32 lustre_msg_hdr_size(__u32 magic, __u32 count);
+__u32 lustre_msg_size(__u32 magic, int count, __u32 *lengths);
+__u32 lustre_msg_size_v2(int count, __u32 *lengths);
+__u32 lustre_packed_msg_size(struct lustre_msg *msg);
+__u32 lustre_msg_early_size(void);
+void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size);
+void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 minlen);
+__u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n);
+void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len);
+__u32 lustre_msg_bufcount(struct lustre_msg *m);
+char *lustre_msg_string(struct lustre_msg *m, __u32 n, __u32 max_len);
__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
__u32 lustre_msg_get_flags(struct lustre_msg *msg);
-void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags);
+void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags);
+void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags);
__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
-void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
-void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags);
+void lustre_msg_set_op_flags(struct lustre_msg *msg, __u32 flags);
struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
__u32 lustre_msg_get_type(struct lustre_msg *msg);
__u32 lustre_msg_get_version(struct lustre_msg *msg);
-void lustre_msg_add_version(struct lustre_msg *msg, int version);
+void lustre_msg_add_version(struct lustre_msg *msg, __u32 version);
__u32 lustre_msg_get_opc(struct lustre_msg *msg);
__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg);
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
__u64 lustre_msg_get_transno(struct lustre_msg *msg);
__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
char *lustre_msg_get_jobid(struct lustre_msg *msg);
__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 53, 0)
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
#else
-# warning "remove checksum compatibility support for b1_8"
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
#endif
void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
newlen, move_data);
}
+
+#ifdef LUSTRE_TRANSLATE_ERRNOS
+
+static inline int ptlrpc_status_hton(int h)
+{
+ /*
+ * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
+ * ELDLM_LOCK_ABORTED, etc.
+ */
+ if (h < 0)
+ return -lustre_errno_hton(-h);
+ else
+ return h;
+}
+
+static inline int ptlrpc_status_ntoh(int n)
+{
+ /*
+ * See the comment in ptlrpc_status_hton().
+ */
+ if (n < 0)
+ return -lustre_errno_ntoh(-n);
+ else
+ return n;
+}
+
+#else
+
+#define ptlrpc_status_hton(h) (h)
+#define ptlrpc_status_ntoh(n) (n)
+
+#endif
/** @} */
/** Change request phase of \a req to \a new_phase */
static inline void
ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
{
- if (req->rq_phase == new_phase)
- return;
+ if (req->rq_phase == new_phase)
+ return;
- if (new_phase == RQ_PHASE_UNREGISTERING) {
- req->rq_next_phase = req->rq_phase;
- if (req->rq_import)
- cfs_atomic_inc(&req->rq_import->imp_unregistering);
- }
+ if (new_phase == RQ_PHASE_UNREGISTERING) {
+ req->rq_next_phase = req->rq_phase;
+ if (req->rq_import)
+ atomic_inc(&req->rq_import->imp_unregistering);
+ }
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
- if (req->rq_import)
- cfs_atomic_dec(&req->rq_import->imp_unregistering);
- }
+ if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_import)
+ atomic_dec(&req->rq_import->imp_unregistering);
+ }
- DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
- ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
+ DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
+ ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
- req->rq_phase = new_phase;
+ req->rq_phase = new_phase;
}
/**
- * Returns true if request \a req got early reply and hard deadline is not met
+ * Returns true if request \a req got early reply and hard deadline is not met
*/
static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
spin_unlock(&req->rq_lock);
return 1;
}
- rc = req->rq_receiving_reply || req->rq_must_unlink;
+ rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
+ req->rq_receiving_reply;
spin_unlock(&req->rq_lock);
return rc;
}
static inline void
ptlrpc_client_wake_req(struct ptlrpc_request *req)
{
- if (req->rq_set == NULL)
- cfs_waitq_signal(&req->rq_reply_waitq);
- else
- cfs_waitq_signal(&req->rq_set->set_waitq);
+ if (req->rq_set == NULL)
+ wake_up(&req->rq_reply_waitq);
+ else
+ wake_up(&req->rq_set->set_waitq);
}
static inline void
ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
{
- LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
- cfs_atomic_inc(&rs->rs_refcount);
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ atomic_inc(&rs->rs_refcount);
}
static inline void
ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
{
- LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
- if (cfs_atomic_dec_and_test(&rs->rs_refcount))
- lustre_free_reply_state(rs);
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ if (atomic_dec_and_test(&rs->rs_refcount))
+ lustre_free_reply_state(rs);
}
/* Should only be called once per req */
* Pinger API (client side only)
* @{
*/
-extern int suppress_pings;
enum timeout_event {
TIMEOUT_GRANT = 1
};
int ptlrpc_pinger_add_import(struct obd_import *imp);
int ptlrpc_pinger_del_import(struct obd_import *imp);
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- cfs_list_t *obd_list);
-int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list);
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
-cfs_time_t ptlrpc_suspend_wakeup_time(void);
-#ifdef __KERNEL__
void ping_evictor_start(void);
void ping_evictor_stop(void);
-#else
-#define ping_evictor_start() do {} while (0)
-#define ping_evictor_stop() do {} while (0)
-#endif
-int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
+void ptlrpc_pinger_ir_up(void);
+void ptlrpc_pinger_ir_down(void);
/** @} */
+int ptlrpc_pinger_suppress_pings(void);
/* ptlrpc daemon bind policy */
typedef enum {
* @{
*/
const char* ll_opcode2str(__u32 opcode);
-#ifdef LPROCFS
+#ifdef CONFIG_PROC_FS
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
int llog_origin_handle_next_block(struct ptlrpc_request *req);
int llog_origin_handle_read_header(struct ptlrpc_request *req);
int llog_origin_handle_close(struct ptlrpc_request *req);
-int llog_origin_handle_cancel(struct ptlrpc_request *req);
/* ptlrpc/llog_client.c */
extern struct llog_operations llog_client_ops;