*
* @{
*/
-
+#include <linux/kobject.h>
#include <linux/uio.h>
#include <libcfs/libcfs.h>
#include <lnet/nidstr.h>
* currently supported maximum between peers at connect via ocd_brw_size.
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
-#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
+#define PTLRPC_MAX_BRW_SIZE (1U << PTLRPC_MAX_BRW_BITS)
#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
-#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+#define ONE_MB_BRW_SIZE (1U << LNET_MTU_BITS)
+#define MD_MAX_BRW_SIZE (1U << LNET_MTU_BITS)
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
+#define DT_DEF_BRW_SIZE (4 * ONE_MB_BRW_SIZE)
#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
-#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+#define OFD_MAX_BRW_SIZE (1U << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
#if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
/** Our own lnet nid for this connection */
lnet_nid_t c_self;
/** Remote side nid for this connection */
- lnet_process_id_t c_peer;
+ struct lnet_process_id c_peer;
/** UUID of the other side */
struct obd_uuid c_remote_uuid;
/** reference counter for this connection */
* ptlrpc callback & work item stuff
*/
struct ptlrpc_cb_id {
- void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
- void *cbid_arg; /* additional arg */
+ void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */
+ void *cbid_arg; /* additional arg */
};
/** Maximum number of locks to fit into reply state */
unsigned long rs_committed:1;/* the transaction was committed
and the rs was dispatched
by ptlrpc_commit_replies */
+ unsigned long rs_convert_lock:1; /* need to convert saved
+ * locks to COS mode */
atomic_t rs_refcount; /* number of users */
/** Number of locks awaiting client ACK */
int rs_nlocks;
struct obd_export *rs_export;
struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
+ struct lnet_handle_md rs_md_h;
/** Context for the sevice thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
cfs_duration_t cr_delay_limit;
/** time request was first queued */
cfs_time_t cr_queued_time;
- /** request sent timeval */
- struct timeval cr_sent_tv;
+ /** request sent in nanoseconds */
+ ktime_t cr_sent_ns;
/** time for request really sent out */
- time_t cr_sent_out;
+ time64_t cr_sent_out;
/** when req reply unlink must finish. */
- time_t cr_reply_deadline;
+ time64_t cr_reply_deadline;
/** when req bulk unlink must finish. */
- time_t cr_bulk_deadline;
+ time64_t cr_bulk_deadline;
/** when req unlink must finish. */
- time_t cr_req_deadline;
+ time64_t cr_req_deadline;
/** Portal to which this request would be sent */
short cr_req_ptl;
/** Portal where to wait for reply and where reply would be sent */
/** Link back to the request set */
struct ptlrpc_request_set *cr_set;
/** outgoing request MD handle */
- lnet_handle_md_t cr_req_md_h;
+ struct lnet_handle_md cr_req_md_h;
/** request-out callback parameter */
struct ptlrpc_cb_id cr_req_cbid;
/** incoming reply MD handle */
- lnet_handle_md_t cr_reply_md_h;
+ struct lnet_handle_md cr_reply_md_h;
wait_queue_head_t cr_reply_waitq;
/** reply callback parameter */
struct ptlrpc_cb_id cr_reply_cbid;
#define rq_bulk rq_cli.cr_bulk
#define rq_delay_limit rq_cli.cr_delay_limit
#define rq_queued_time rq_cli.cr_queued_time
-#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_sent_ns rq_cli.cr_sent_ns
#define rq_real_sent rq_cli.cr_sent_out
#define rq_reply_deadline rq_cli.cr_reply_deadline
#define rq_bulk_deadline rq_cli.cr_bulk_deadline
struct ptlrpc_nrs_request sr_nrq;
/** @} nrs */
/** request arrival time */
- struct timeval sr_arrival_time;
+ struct timespec64 sr_arrival_time;
/** server's half ctx */
struct ptlrpc_svc_ctx *sr_svc_ctx;
/** (server side), pointed directly into req buffer */
* rq_list
*/
spinlock_t rq_lock;
+ spinlock_t rq_early_free_lock;
/** client-side flags are serialized by rq_lock @{ */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
rq_allow_replay:1,
/* bulk request, sent to server, but uncommitted */
rq_unstable:1,
+ rq_early_free_repbuf:1, /* free reply buffer in advance */
rq_allow_intr:1;
/** @} */
/** our LNet NID */
lnet_nid_t rq_self;
/** Peer description (the other side) */
- lnet_process_id_t rq_peer;
+ struct lnet_process_id rq_peer;
+ /** Descriptor for the NID from which the peer sent the request. */
+ struct lnet_process_id rq_source;
/**
* service time estimate (secs)
* If the request is not served by this time, it is marked as timed out.
/**
* when request/reply sent (secs), or time when request should be sent
*/
- time_t rq_sent;
+ time64_t rq_sent;
/** when request must finish. */
- time_t rq_deadline;
+ time64_t rq_deadline;
/** request format description */
struct req_capsule rq_pill;
};
int bd_md_count; /* # valid entries in bd_mds */
int bd_md_max_brw; /* max entries in bd_mds */
/** array of associated MDs */
- lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
+ struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
union {
struct {
#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
enum {
- SVC_STOPPED = 1 << 0,
- SVC_STOPPING = 1 << 1,
- SVC_STARTING = 1 << 2,
- SVC_RUNNING = 1 << 3,
- SVC_EVENT = 1 << 4,
- SVC_SIGNAL = 1 << 5,
+ SVC_INIT = 0,
+ SVC_STOPPED = 1 << 0,
+ SVC_STOPPING = 1 << 1,
+ SVC_STARTING = 1 << 2,
+ SVC_RUNNING = 1 << 3,
+ SVC_EVENT = 1 << 4,
+ SVC_SIGNAL = 1 << 5,
};
#define PTLRPC_THR_NAME_LEN 32
/** Back pointer to service for which this buffer is registered */
struct ptlrpc_service_part *rqbd_svcpt;
/** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
+ struct lnet_handle_md rqbd_md_h;
int rqbd_refcount;
/** The buffer itself */
char *rqbd_buffer;
int srv_cpt_bits;
/** CPT table this service is running over */
struct cfs_cpt_table *srv_cptable;
+
+ /* sysfs object */
+ struct kobject srv_kobj;
+ struct completion srv_kobj_unregister;
/**
* partition data for ptlrpc service
*/
*/
char pc_name[16];
/**
- * Environment for request interpreters to run in.
- */
- struct lu_env pc_env;
- /**
* CPT the thread is bound on.
*/
int pc_cpt;
/** @} nrs */
/* ptlrpc/events.c */
-extern lnet_handle_eq_t ptlrpc_eq_h;
+extern struct lnet_handle_eq ptlrpc_eq_h;
extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self);
+ struct lnet_process_id *peer, lnet_nid_t *self);
/**
* These callbacks are invoked by LNet when something happened to
* underlying buffer
* @{
*/
-extern void request_out_callback(lnet_event_t *ev);
-extern void reply_in_callback(lnet_event_t *ev);
-extern void client_bulk_callback(lnet_event_t *ev);
-extern void request_in_callback(lnet_event_t *ev);
-extern void reply_out_callback(lnet_event_t *ev);
+extern void request_out_callback(struct lnet_event *ev);
+extern void reply_in_callback(struct lnet_event *ev);
+extern void client_bulk_callback(struct lnet_event *ev);
+extern void request_in_callback(struct lnet_event *ev);
+extern void reply_out_callback(struct lnet_event *ev);
#ifdef HAVE_SERVER_SUPPORT
-extern void server_bulk_callback(lnet_event_t *ev);
+extern void server_bulk_callback(struct lnet_event *ev);
#endif
/** @} */
/* ptlrpc/connection.c */
-struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
+struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer,
lnet_nid_t self,
struct obd_uuid *uuid);
int ptlrpc_connection_put(struct ptlrpc_connection *c);
void ptlrpc_connection_fini(void);
extern lnet_pid_t ptl_get_pid(void);
+/*
+ * Check if the peer connection is on the local node. We need to use GFP_NOFS
+ * for requests from a local client to avoid recursing into the filesystem
+ * as we might end up waiting on a page sent in the request we're serving.
+ *
+ * Use __GFP_HIGHMEM so that the pages can use all of the available memory
+ * on 32-bit machines. Use more aggressive GFP_HIGHUSER flags from non-local
+ * clients to be able to generate more memory pressure on the OSS and allow
+ * inactive pages to be reclaimed, since it doesn't have any other processes
+ * or allocations that generate memory reclaim pressure.
+ *
+ * See b=17576 (bdf50dc9) and b=19529 (3dcf18d3) for details.
+ */
+static inline bool ptlrpc_connection_is_local(struct ptlrpc_connection *conn)
+{
+ if (!conn)
+ return false;
+
+ if (conn->c_peer.nid == conn->c_self)
+ return true;
+
+ RETURN(LNetIsPeerLocal(conn->c_peer.nid));
+}
+
/* ptlrpc/niobuf.c */
/**
* Actual interfacing with LNet to put/get/register/unregister stuff
LASSERT(req != NULL);
desc = req->rq_bulk;
- if (req->rq_bulk_deadline > cfs_time_current_sec())
+ if (req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
*
* @{
*/
-void ptlrpc_save_lock(struct ptlrpc_request *req,
- struct lustre_handle *lock, int mode, int no_ack);
+void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock,
+ int mode, bool no_ack, bool convert_lock);
void ptlrpc_commit_replies(struct obd_export *exp);
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
struct ptlrpc_service *ptlrpc_register_service(
struct ptlrpc_service_conf *conf,
+ struct kset *parent,
struct proc_dir_entry *proc_entry);
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
- if (req->rq_reply_deadline > cfs_time_current_sec())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
- if (req->rq_reply_deadline > cfs_time_current_sec())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
int rc;
spin_lock(&req->rq_lock);
- if (req->rq_reply_deadline > cfs_time_current_sec()) {
+ if (req->rq_reply_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
- if (req->rq_req_deadline > cfs_time_current_sec()) {
+ if (req->rq_req_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
static inline void
ptlrpc_client_wake_req(struct ptlrpc_request *req)
{
+ smp_mb();
if (req->rq_set == NULL)
wake_up(&req->rq_reply_waitq);
else
* @{
*/
const char* ll_opcode2str(__u32 opcode);
+const int ll_str2opcode(const char *ops);
#ifdef CONFIG_PROC_FS
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);