*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2015, Intel Corporation.
+ * Copyright (c) 2010, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*
* @{
*/
-
+#include <linux/kobject.h>
#include <linux/uio.h>
#include <libcfs/libcfs.h>
#include <lnet/nidstr.h>
* currently supported maximum between peers at connect via ocd_brw_size.
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
-#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_SIZE (1U << PTLRPC_MAX_BRW_BITS)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
-#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define ONE_MB_BRW_SIZE (1U << LNET_MTU_BITS)
+#define MD_MAX_BRW_SIZE (1U << LNET_MTU_BITS)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
-#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+#define DT_DEF_BRW_SIZE (4 * ONE_MB_BRW_SIZE)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
+#define OFD_MAX_BRW_SIZE (1U << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
#if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
#endif
-#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
#endif
#if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
*/
/* depress threads factor for VM with small memory size */
#define OSS_THR_FACTOR min_t(int, 8, \
- NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
+ NUM_CACHEPAGES >> (28 - PAGE_SHIFT))
#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
#define OSS_NTHRS_BASE 64
/** Our own lnet nid for this connection */
lnet_nid_t c_self;
/** Remote side nid for this connection */
- lnet_process_id_t c_peer;
+ struct lnet_process_id c_peer;
/** UUID of the other side */
struct obd_uuid c_remote_uuid;
/** reference counter for this connection */
* ptlrpc callback & work item stuff
*/
struct ptlrpc_cb_id {
- void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
- void *cbid_arg; /* additional arg */
+ void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */
+ void *cbid_arg; /* additional arg */
};
/** Maximum number of locks to fit into reply state */
unsigned long rs_committed:1;/* the transaction was committed
and the rs was dispatched
by ptlrpc_commit_replies */
+ unsigned long rs_convert_lock:1; /* need to convert saved
+ * locks to COS mode */
atomic_t rs_refcount; /* number of users */
/** Number of locks awaiting client ACK */
int rs_nlocks;
struct obd_export *rs_export;
struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
+ struct lnet_handle_md rs_md_h;
/** Context for the sevice thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
/** RPC stages */
enum rq_phase {
- RQ_PHASE_NEW = 0xebc0de00,
- RQ_PHASE_RPC = 0xebc0de01,
- RQ_PHASE_BULK = 0xebc0de02,
- RQ_PHASE_INTERPRET = 0xebc0de03,
- RQ_PHASE_COMPLETE = 0xebc0de04,
- RQ_PHASE_UNREGISTERING = 0xebc0de05,
- RQ_PHASE_UNDEFINED = 0xebc0de06
+ RQ_PHASE_NEW = 0xebc0de00,
+ RQ_PHASE_RPC = 0xebc0de01,
+ RQ_PHASE_BULK = 0xebc0de02,
+ RQ_PHASE_INTERPRET = 0xebc0de03,
+ RQ_PHASE_COMPLETE = 0xebc0de04,
+ RQ_PHASE_UNREG_RPC = 0xebc0de05,
+ RQ_PHASE_UNREG_BULK = 0xebc0de06,
+ RQ_PHASE_UNDEFINED = 0xebc0de07
};
/** Type of request interpreter call-back */
cfs_duration_t cr_delay_limit;
/** time request was first queued */
cfs_time_t cr_queued_time;
- /** request sent timeval */
- struct timeval cr_sent_tv;
+ /** request sent in nanoseconds */
+ ktime_t cr_sent_ns;
/** time for request really sent out */
- time_t cr_sent_out;
+ time64_t cr_sent_out;
/** when req reply unlink must finish. */
- time_t cr_reply_deadline;
+ time64_t cr_reply_deadline;
/** when req bulk unlink must finish. */
- time_t cr_bulk_deadline;
+ time64_t cr_bulk_deadline;
+ /** when req unlink must finish. */
+ time64_t cr_req_deadline;
/** Portal to which this request would be sent */
short cr_req_ptl;
/** Portal where to wait for reply and where reply would be sent */
/** Link back to the request set */
struct ptlrpc_request_set *cr_set;
/** outgoing request MD handle */
- lnet_handle_md_t cr_req_md_h;
+ struct lnet_handle_md cr_req_md_h;
/** request-out callback parameter */
struct ptlrpc_cb_id cr_req_cbid;
/** incoming reply MD handle */
- lnet_handle_md_t cr_reply_md_h;
+ struct lnet_handle_md cr_reply_md_h;
wait_queue_head_t cr_reply_waitq;
/** reply callback parameter */
struct ptlrpc_cb_id cr_reply_cbid;
#define rq_bulk rq_cli.cr_bulk
#define rq_delay_limit rq_cli.cr_delay_limit
#define rq_queued_time rq_cli.cr_queued_time
-#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_sent_ns rq_cli.cr_sent_ns
#define rq_real_sent rq_cli.cr_sent_out
#define rq_reply_deadline rq_cli.cr_reply_deadline
#define rq_bulk_deadline rq_cli.cr_bulk_deadline
+#define rq_req_deadline rq_cli.cr_req_deadline
#define rq_nr_resend rq_cli.cr_resend_nr
#define rq_request_portal rq_cli.cr_req_ptl
#define rq_reply_portal rq_cli.cr_rep_ptl
struct ptlrpc_nrs_request sr_nrq;
/** @} nrs */
/** request arrival time */
- struct timeval sr_arrival_time;
+ struct timespec64 sr_arrival_time;
/** server's half ctx */
struct ptlrpc_svc_ctx *sr_svc_ctx;
/** (server side), pointed directly into req buffer */
* rq_list
*/
spinlock_t rq_lock;
+ spinlock_t rq_early_free_lock;
/** client-side flags are serialized by rq_lock @{ */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
rq_allow_replay:1,
/* bulk request, sent to server, but uncommitted */
rq_unstable:1,
+ rq_early_free_repbuf:1, /* free reply buffer in advance */
rq_allow_intr:1;
/** @} */
rq_bulk_write:1, /* request bulk write */
/* server authentication flags */
rq_auth_gss:1, /* authenticated by gss */
- rq_auth_remote:1, /* authed as remote user */
rq_auth_usr_root:1, /* authed as root */
rq_auth_usr_mdt:1, /* authed as mdt */
rq_auth_usr_ost:1, /* authed as ost */
/** our LNet NID */
lnet_nid_t rq_self;
/** Peer description (the other side) */
- lnet_process_id_t rq_peer;
+ struct lnet_process_id rq_peer;
+ /** Descriptor for the NID from which the peer sent the request. */
+ struct lnet_process_id rq_source;
/**
* service time estimate (secs)
* If the request is not served by this time, it is marked as timed out.
/**
* when request/reply sent (secs), or time when request should be sent
*/
- time_t rq_sent;
+ time64_t rq_sent;
/** when request must finish. */
- time_t rq_deadline;
+ time64_t rq_deadline;
/** request format description */
struct req_capsule rq_pill;
};
static inline const char *
ptlrpc_phase2str(enum rq_phase phase)
{
- switch (phase) {
- case RQ_PHASE_NEW:
- return "New";
- case RQ_PHASE_RPC:
- return "Rpc";
- case RQ_PHASE_BULK:
- return "Bulk";
- case RQ_PHASE_INTERPRET:
- return "Interpret";
- case RQ_PHASE_COMPLETE:
- return "Complete";
- case RQ_PHASE_UNREGISTERING:
- return "Unregistering";
- default:
- return "?Phase?";
- }
+ switch (phase) {
+ case RQ_PHASE_NEW:
+ return "New";
+ case RQ_PHASE_RPC:
+ return "Rpc";
+ case RQ_PHASE_BULK:
+ return "Bulk";
+ case RQ_PHASE_INTERPRET:
+ return "Interpret";
+ case RQ_PHASE_COMPLETE:
+ return "Complete";
+ case RQ_PHASE_UNREG_RPC:
+ return "UnregRPC";
+ case RQ_PHASE_UNREG_BULK:
+ return "UnregBULK";
+ default:
+ return "?Phase?";
+ }
}
/**
#define FLAG(field, str) (field ? str : "")
/** Convert bit flags into a string */
-#define DEBUG_REQ_FLAGS(req) \
- ptlrpc_rqphase2str(req), \
- FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
- FLAG(req->rq_err, "E"), \
- FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
- FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
- FLAG(req->rq_no_resend, "N"), \
- FLAG(req->rq_waiting, "W"), \
- FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
- FLAG(req->rq_committed, "M")
-
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
+#define DEBUG_REQ_FLAGS(req) \
+ ptlrpc_rqphase2str(req), \
+ FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
+ FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
+ FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
+ FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
+ FLAG(req->rq_no_resend, "N"), \
+ FLAG(req->rq_waiting, "W"), \
+ FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
+ FLAG(req->rq_committed, "M")
+
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *data, const char *fmt, ...)
int bd_md_count; /* # valid entries in bd_mds */
int bd_md_max_brw; /* max entries in bd_mds */
/** array of associated MDs */
- lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
+ struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
union {
struct {
#define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
enum {
- SVC_STOPPED = 1 << 0,
- SVC_STOPPING = 1 << 1,
- SVC_STARTING = 1 << 2,
- SVC_RUNNING = 1 << 3,
- SVC_EVENT = 1 << 4,
- SVC_SIGNAL = 1 << 5,
+ SVC_INIT = 0,
+ SVC_STOPPED = 1 << 0,
+ SVC_STOPPING = 1 << 1,
+ SVC_STARTING = 1 << 2,
+ SVC_RUNNING = 1 << 3,
+ SVC_EVENT = 1 << 4,
+ SVC_SIGNAL = 1 << 5,
};
#define PTLRPC_THR_NAME_LEN 32
/** Back pointer to service for which this buffer is registered */
struct ptlrpc_service_part *rqbd_svcpt;
/** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
+ struct lnet_handle_md rqbd_md_h;
int rqbd_refcount;
/** The buffer itself */
char *rqbd_buffer;
int srv_cpt_bits;
/** CPT table this service is running over */
struct cfs_cpt_table *srv_cptable;
+
+ /* sysfs object */
+ struct kobject srv_kobj;
+ struct completion srv_kobj_unregister;
/**
* partition data for ptlrpc service
*/
*/
char pc_name[16];
/**
- * Environment for request interpreters to run in.
- */
- struct lu_env pc_env;
- /**
* CPT the thread is bound on.
*/
int pc_cpt;
/** @} nrs */
/* ptlrpc/events.c */
-extern lnet_handle_eq_t ptlrpc_eq_h;
+extern struct lnet_handle_eq ptlrpc_eq_h;
extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self);
+ struct lnet_process_id *peer, lnet_nid_t *self);
/**
* These callbacks are invoked by LNet when something happened to
* underlying buffer
* @{
*/
-extern void request_out_callback(lnet_event_t *ev);
-extern void reply_in_callback(lnet_event_t *ev);
-extern void client_bulk_callback(lnet_event_t *ev);
-extern void request_in_callback(lnet_event_t *ev);
-extern void reply_out_callback(lnet_event_t *ev);
+extern void request_out_callback(struct lnet_event *ev);
+extern void reply_in_callback(struct lnet_event *ev);
+extern void client_bulk_callback(struct lnet_event *ev);
+extern void request_in_callback(struct lnet_event *ev);
+extern void reply_out_callback(struct lnet_event *ev);
#ifdef HAVE_SERVER_SUPPORT
-extern void server_bulk_callback(lnet_event_t *ev);
+extern void server_bulk_callback(struct lnet_event *ev);
#endif
/** @} */
/* ptlrpc/connection.c */
-struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
+struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer,
lnet_nid_t self,
struct obd_uuid *uuid);
int ptlrpc_connection_put(struct ptlrpc_connection *c);
void ptlrpc_connection_fini(void);
extern lnet_pid_t ptl_get_pid(void);
+/*
+ * Check if the peer connection is on the local node. We need to use GFP_NOFS
+ * for requests from a local client to avoid recursing into the filesystem
+ * as we might end up waiting on a page sent in the request we're serving.
+ *
+ * Use __GFP_HIGHMEM so that the pages can use all of the available memory
+ * on 32-bit machines. Use more aggressive GFP_HIGHUSER flags from non-local
+ * clients to be able to generate more memory pressure on the OSS and allow
+ * inactive pages to be reclaimed, since it doesn't have any other processes
+ * or allocations that generate memory reclaim pressure.
+ *
+ * See b=17576 (bdf50dc9) and b=19529 (3dcf18d3) for details.
+ */
+static inline bool ptlrpc_connection_is_local(struct ptlrpc_connection *conn)
+{
+ if (!conn)
+ return false;
+
+ if (conn->c_peer.nid == conn->c_self)
+ return true;
+
+ RETURN(LNetIsPeerLocal(conn->c_peer.nid));
+}
+
/* ptlrpc/niobuf.c */
/**
* Actual interfacing with LNet to put/get/register/unregister stuff
static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc;
- int rc;
+ int rc;
- LASSERT(req != NULL);
+ LASSERT(req != NULL);
desc = req->rq_bulk;
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > cfs_time_current_sec())
- return 1;
+ if (req->rq_bulk_deadline > ktime_get_real_seconds())
+ return 1;
- if (!desc)
- return 0;
+ if (!desc)
+ return 0;
spin_lock(&desc->bd_lock);
rc = desc->bd_md_count;
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
+ lnet_nid_t nid4refnet);
int ptlrpc_queue_wait(struct ptlrpc_request *req);
int ptlrpc_replay_req(struct ptlrpc_request *req);
int i;
for (i = 0; i < desc->bd_iov_count ; i++)
- page_cache_release(BD_GET_KIOV(desc, i).kiov_page);
+ put_page(BD_GET_KIOV(desc, i).kiov_page);
}
static inline void ptlrpc_release_bulk_noop(struct ptlrpc_bulk_desc *desc)
*
* @{
*/
-void ptlrpc_save_lock(struct ptlrpc_request *req,
- struct lustre_handle *lock, int mode, int no_ack);
+void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock,
+ int mode, bool no_ack, bool convert_lock);
void ptlrpc_commit_replies(struct obd_export *exp);
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
struct ptlrpc_service *ptlrpc_register_service(
struct ptlrpc_service_conf *conf,
+ struct kset *parent,
struct proc_dir_entry *proc_entry);
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
if (req->rq_phase == new_phase)
return;
- if (new_phase == RQ_PHASE_UNREGISTERING) {
+ if (new_phase == RQ_PHASE_UNREG_RPC ||
+ new_phase == RQ_PHASE_UNREG_BULK) {
+ /* No embedded unregistering phases */
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK)
+ return;
+
req->rq_next_phase = req->rq_phase;
if (req->rq_import)
atomic_inc(&req->rq_import->imp_unregistering);
}
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
if (req->rq_import)
atomic_dec(&req->rq_import->imp_unregistering);
}
static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
- return 0;
return req->rq_early;
}
static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
- return 0;
- return req->rq_replied;
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
+ return 0;
+ return req->rq_replied;
}
/** Returns true if request \a req is in process of receiving server reply */
static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
- return 1;
- return req->rq_receiving_reply;
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
+ return 1;
+ return req->rq_receiving_reply;
}
static inline int
int rc;
spin_lock(&req->rq_lock);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec()) {
+ if (req->rq_reply_deadline > ktime_get_real_seconds()) {
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+ if (req->rq_req_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
+
rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
req->rq_receiving_reply;
spin_unlock(&req->rq_lock);
static inline void
ptlrpc_client_wake_req(struct ptlrpc_request *req)
{
+ smp_mb();
if (req->rq_set == NULL)
wake_up(&req->rq_reply_waitq);
else
* @{
*/
const char* ll_opcode2str(__u32 opcode);
+const int ll_str2opcode(const char *ops);
#ifdef CONFIG_PROC_FS
void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);