#include <linux/uio.h>
#include <libcfs/libcfs.h>
#include <lnet/api.h>
+#include <lnet/lib-types.h>
#include <uapi/linux/lnet/nidstr.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lustre_ha.h>
* value. The client is free to limit the actual RPC size for any bulk
* transfer via cl_max_pages_per_rpc to some non-power-of-two value.
* NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS. */
-#define PTLRPC_BULK_OPS_BITS 4
+#define PTLRPC_BULK_OPS_BITS 6
#if PTLRPC_BULK_OPS_BITS > 16
#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
#endif
* - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
* - Must be a multiple of 1024
*/
-#define _OST_MAXREQSIZE_BASE (sizeof(struct lustre_msg) + \
- sizeof(struct ptlrpc_body) + \
- sizeof(struct obdo) + \
- sizeof(struct obd_ioobj) + \
- sizeof(struct niobuf_remote))
-#define _OST_MAXREQSIZE_SUM (_OST_MAXREQSIZE_BASE + \
- sizeof(struct niobuf_remote) * \
- (DT_MAX_BRW_PAGES - 1))
+#define _OST_MAXREQSIZE_BASE ((unsigned long)(sizeof(struct lustre_msg) + \
+ sizeof(struct ptlrpc_body) + \
+ sizeof(struct obdo) + \
+ sizeof(struct obd_ioobj) + \
+ sizeof(struct niobuf_remote)))
+#define _OST_MAXREQSIZE_SUM ((unsigned long)(_OST_MAXREQSIZE_BASE + \
+ sizeof(struct niobuf_remote) * \
+ (DT_MAX_BRW_PAGES - 1)))
/**
* FIEMAP request can be 4K+ for now
*/
-#define OST_MAXREQSIZE (16 * 1024)
-#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
- (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
+#define OST_MAXREQSIZE (16UL * 1024UL)
+#define OST_IO_MAXREQSIZE max(OST_MAXREQSIZE, \
+ ((_OST_MAXREQSIZE_SUM - 1) | \
+ (1024UL - 1)) + 1)
/* Safe estimate of free space in standard RPC, provides upper limit for # of
* bytes of i/o to pack in RPC (skipping bulk transfer). */
#define OST_SHORT_IO_SPACE (OST_IO_MAXREQSIZE - _OST_MAXREQSIZE_BASE)
/* Actual size used for short i/o buffer. Calculation means this:
* At least one page (for large PAGE_SIZE), or 16 KiB, but not more
* than the available space aligned to a page boundary. */
-#define OBD_MAX_SHORT_IO_BYTES (min(max(PAGE_SIZE, 16UL * 1024UL), \
- OST_SHORT_IO_SPACE & PAGE_MASK))
+#define OBD_MAX_SHORT_IO_BYTES min(max(PAGE_SIZE, 16UL * 1024UL), \
+ OST_SHORT_IO_SPACE & PAGE_MASK)
#define OST_MAXREPSIZE (9 * 1024)
#define OST_IO_MAXREPSIZE OST_MAXREPSIZE
#define OST_NBUFS 64
/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
-#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
+#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 32 * 1024)
/**
* OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
* rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
*/
#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
-
-/* Macro to hide a typecast. */
-#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
+/* Macro to hide a typecast and BUILD_BUG. */
+#define ptlrpc_req_async_args(_var, req) ({ \
+ BUILD_BUG_ON(sizeof(*_var) > sizeof(req->rq_async_args)); \
+ (typeof(_var))&req->rq_async_args; \
+ })
struct ptlrpc_replay_async_args {
int praa_old_state;
/** What portal do we expect replies on */
__u32 cli_reply_portal;
/** Name of the client */
- char *cli_name;
+ const char *cli_name;
};
/** state flags of requests */
/** description of flavors for client & server */
struct sptlrpc_flavor rq_flvr;
+ /**
+ * SELinux policy info at the time of the request
+ * sepol string format is:
+ * <mode>:<policy name>:<policy version>:<policy hash>
+ */
+ char rq_sepol[LUSTRE_NODEMAP_SEPOL_LENGTH + 1];
+
/* client/server security flags */
unsigned int
rq_ctx_init:1, /* context initiation */
static inline int ptlrpc_req_interpret(const struct lu_env *env,
struct ptlrpc_request *req, int rc)
{
- if (req->rq_interpret_reply != NULL) {
- req->rq_status = req->rq_interpret_reply(env, req,
- &req->rq_async_args,
- rc);
- return req->rq_status;
- }
- return rc;
+ if (req->rq_interpret_reply != NULL) {
+ req->rq_status = req->rq_interpret_reply(env, req,
+ &req->rq_async_args,
+ rc);
+ return req->rq_status;
+ }
+
+ return rc;
}
/** \addtogroup nrs
/** @} nrs */
/**
- * Returns 1 if request buffer at offset \a index was already swabbed
+ * Returns true if request buffer at offset \a index was already swabbed
*/
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
+static inline bool lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
{
- LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
- return req->rq_req_swab_mask & (1 << index);
+ LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
+ return req->rq_req_swab_mask & (1 << index);
}
/**
- * Returns 1 if request reply buffer at offset \a index was already swabbed
+ * Returns true if request reply buffer at offset \a index was already swabbed
*/
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
+static inline bool lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
{
- LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
- return req->rq_rep_swab_mask & (1 << index);
+ LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
+ return req->rq_rep_swab_mask & (1 << index);
}
/**
- * Returns 1 if request needs to be swabbed into local cpu byteorder
+ * Returns true if request needs to be swabbed into local cpu byteorder
*/
-static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
+static inline bool ptlrpc_req_need_swab(struct ptlrpc_request *req)
{
- return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
}
/**
- * Returns 1 if request reply needs to be swabbed into local cpu byteorder
+ * Returns true if request reply needs to be swabbed into local cpu byteorder
*/
-static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
+static inline bool ptlrpc_rep_need_swab(struct ptlrpc_request *req)
{
- return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
}
/**
FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
- FLAG(req->rq_no_resend, "N"), \
+ FLAG(req->rq_no_resend, "N"), FLAG(req->rq_no_reply, "n"), \
FLAG(req->rq_waiting, "W"), \
FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
- FLAG(req->rq_committed, "M")
+ FLAG(req->rq_committed, "M"), \
+ FLAG(req->rq_req_unlinked, "Q"), \
+ FLAG(req->rq_reply_unlinked, "U"), \
+ FLAG(req->rq_receiving_reply, "r")
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *data, const char *fmt, ...)
struct obd_import *bd_import;
/** Back pointer to the request */
struct ptlrpc_request *bd_req;
- struct ptlrpc_bulk_frag_ops *bd_frag_ops;
+ const struct ptlrpc_bulk_frag_ops *bd_frag_ops;
wait_queue_head_t bd_waitq; /* server side only WQ */
int bd_iov_count; /* # entries in bd_iov */
int bd_max_iov; /* allocated size of bd_iov */
SVC_STARTING = 1 << 2,
SVC_RUNNING = 1 << 3,
SVC_EVENT = 1 << 4,
- SVC_SIGNAL = 1 << 5,
};
#define PTLRPC_THR_NAME_LEN 32
*/
unsigned int t_id;
/**
- * service thread pid
+ * service thread
*/
+ struct task_struct *t_task;
pid_t t_pid;
+ ktime_t t_touched;
/**
* put watchdog in the structure per thread b=14840
*/
- struct lc_watchdog *t_watchdog;
+ struct delayed_work t_watchdog;
/**
* the svc this thread belonged to b=18582
*/
return !!(thread->t_flags & SVC_EVENT);
}
-static inline int thread_is_signal(struct ptlrpc_thread *thread)
-{
- return !!(thread->t_flags & SVC_SIGNAL);
-}
-
static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
{
thread->t_flags &= ~flags;
int srv_nthrs_cpt_init;
/** limit of threads number for each partition */
int srv_nthrs_cpt_limit;
- /** Root of /proc dir tree for this service */
- struct proc_dir_entry *srv_procroot;
+ /** Root of debugfs dir tree for this service */
+ struct dentry *srv_debugfs_entry;
/** Pointer to statistic data for this service */
struct lprocfs_stats *srv_stats;
/** # hp per lp reqs to handle */
int srv_watchdog_factor;
/** under unregister_service */
unsigned srv_is_stopping:1;
+ /** Whether or not to restrict service threads to CPUs in this CPT */
+ unsigned srv_cpt_bind:1;
/** max # request buffers */
int srv_nrqbds_max;
/** max # request buffers in history per partition */
int srv_hist_nrqbds_cpt_max;
- /** number of CPTs this service bound on */
+ /** number of CPTs this service associated with */
int srv_ncpts;
- /** CPTs array this service bound on */
+ /** CPTs array this service associated with */
__u32 *srv_cpts;
/** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
int srv_cpt_bits;
* threads starting & stopping are also protected by this lock.
*/
spinlock_t scp_lock __cfs_cacheline_aligned;
+ /** userland serialization */
+ struct mutex scp_mutex;
/** total # req buffer descs allocated */
int scp_nrqbds_total;
/** # posted request buffers for receiving */
struct list_head scp_rqbd_posted;
/** incoming reqs */
struct list_head scp_req_incoming;
- /** timeout before re-posting reqs, in tick */
- cfs_duration_t scp_rqbd_timeout;
+ /** timeout before re-posting reqs, in jiffies */
+ long scp_rqbd_timeout;
/**
* all threads sleep on this. This wait-queue is signalled when new
* incoming request arrives and when difficult reply has to be handled.
/** early reply timer */
struct timer_list scp_at_timer;
/** debug */
- cfs_time_t scp_at_checktime;
+ ktime_t scp_at_checktime;
/** check early replies */
unsigned scp_at_check;
/** @} */
*/
void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
-void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
+void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
void *arg);
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
-int ptlrpc_set_wait(struct ptlrpc_request_set *);
+int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *);
void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
void ptlrpc_set_destroy(struct ptlrpc_request_set *);
void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len,
int pin);
-static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
-}
-
-static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
- struct page *page, int pageoffset,
- int len)
-{
- __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
-}
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
-static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- for (i = 0; i < desc->bd_iov_count ; i++)
- put_page(BD_GET_KIOV(desc, i).kiov_page);
-}
-
static inline void ptlrpc_release_bulk_noop(struct ptlrpc_bulk_desc *desc)
{
}
/* user specified threads number, it will be validated due to
* other members of this structure. */
unsigned int tc_nthrs_user;
- /* set NUMA node affinity for service threads */
- unsigned int tc_cpu_affinity;
+ /* bind service threads to only CPUs in their associated CPT */
+ unsigned int tc_cpu_bind;
/* Tags for lu_context associated with service thread */
__u32 tc_ctx_tags;
};
struct cfs_cpt_table *cc_cptable;
/* string pattern to describe CPTs for a service */
char *cc_pattern;
+ /* whether or not to have per-CPT service partitions */
+ bool cc_affinity;
};
struct ptlrpc_service_conf {
struct ptlrpc_service *ptlrpc_register_service(
struct ptlrpc_service_conf *conf,
struct kset *parent,
- struct proc_dir_entry *proc_entry);
+ struct dentry *debugfs_entry);
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
int ptlrpc_start_threads(struct ptlrpc_service *svc);
int ptlrpc_connect_import(struct obd_import *imp);
int ptlrpc_init_import(struct obd_import *imp);
int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
+int ptlrpc_disconnect_and_idle_import(struct obd_import *imp);
int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
void deuuidify(char *uuid, const char *prefix, char **uuid_start,
int *uuid_len);
*
* @{
*/
-int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
- __u32 index);
+#define PTLRPC_MAX_BUFCOUNT \
+ (sizeof(((struct ptlrpc_request *)0)->rq_req_swab_mask) * 8)
+#define MD_MAX_BUFLEN (MDS_REG_MAXREQSIZE > OUT_MAXREQSIZE ? \
+ MDS_REG_MAXREQSIZE : OUT_MAXREQSIZE)
+#define PTLRPC_MAX_BUFLEN (OST_IO_MAXREQSIZE > MD_MAX_BUFLEN ? \
+ OST_IO_MAXREQSIZE : MD_MAX_BUFLEN)
+bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
+ __u32 index);
void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
__u32 index);
int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
/* ptlrpc/llog_server.c */
int llog_origin_handle_open(struct ptlrpc_request *req);
-int llog_origin_handle_destroy(struct ptlrpc_request *req);
int llog_origin_handle_prev_block(struct ptlrpc_request *req);
int llog_origin_handle_next_block(struct ptlrpc_request *req);
int llog_origin_handle_read_header(struct ptlrpc_request *req);
-int llog_origin_handle_close(struct ptlrpc_request *req);
/* ptlrpc/llog_client.c */
extern struct llog_operations llog_client_ops;