Whamcloud - gitweb
LU-9728 osd: use GFP_HIGHUSER for non-local IO
[fs/lustre-release.git] / lustre / include / lustre_net.h
index 6cc0c91..db61399 100644 (file)
@@ -50,7 +50,7 @@
  *
  * @{
  */
-
+#include <linux/kobject.h>
 #include <linux/uio.h>
 #include <libcfs/libcfs.h>
 #include <lnet/nidstr.h>
 #define MD_MAX_BRW_SIZE                (1U << LNET_MTU_BITS)
 #define MD_MAX_BRW_PAGES       (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define DT_MAX_BRW_SIZE                PTLRPC_MAX_BRW_SIZE
+#define DT_DEF_BRW_SIZE                (4 * ONE_MB_BRW_SIZE)
 #define DT_MAX_BRW_PAGES       (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define OFD_MAX_BRW_SIZE       (1U << LNET_MTU_BITS)
 
@@ -786,16 +787,16 @@ struct ptlrpc_cli_req {
        cfs_duration_t                   cr_delay_limit;
        /** time request was first queued */
        cfs_time_t                       cr_queued_time;
-       /** request sent timeval */
-       struct timeval                   cr_sent_tv;
+       /** request sent in nanoseconds */
+       ktime_t                          cr_sent_ns;
        /** time for request really sent out */
-       time_t                           cr_sent_out;
+       time64_t                         cr_sent_out;
        /** when req reply unlink must finish. */
-       time_t                           cr_reply_deadline;
+       time64_t                         cr_reply_deadline;
        /** when req bulk unlink must finish. */
-       time_t                           cr_bulk_deadline;
+       time64_t                         cr_bulk_deadline;
        /** when req unlink must finish. */
-       time_t                           cr_req_deadline;
+       time64_t                         cr_req_deadline;
        /** Portal to which this request would be sent */
        short                            cr_req_ptl;
        /** Portal where to wait for reply and where reply would be sent */
@@ -850,7 +851,7 @@ struct ptlrpc_cli_req {
 #define rq_bulk                        rq_cli.cr_bulk
 #define rq_delay_limit         rq_cli.cr_delay_limit
 #define rq_queued_time         rq_cli.cr_queued_time
-#define rq_sent_tv             rq_cli.cr_sent_tv
+#define rq_sent_ns             rq_cli.cr_sent_ns
 #define rq_real_sent           rq_cli.cr_sent_out
 #define rq_reply_deadline      rq_cli.cr_reply_deadline
 #define rq_bulk_deadline       rq_cli.cr_bulk_deadline
@@ -912,7 +913,7 @@ struct ptlrpc_srv_req {
        struct ptlrpc_nrs_request        sr_nrq;
        /** @} nrs */
        /** request arrival time */
-       struct timeval                   sr_arrival_time;
+       struct timespec64                sr_arrival_time;
        /** server's half ctx */
        struct ptlrpc_svc_ctx           *sr_svc_ctx;
        /** (server side), pointed directly into req buffer */
@@ -966,6 +967,7 @@ struct ptlrpc_request {
         * rq_list
         */
        spinlock_t                       rq_lock;
+       spinlock_t                       rq_early_free_lock;
        /** client-side flags are serialized by rq_lock @{ */
        unsigned int rq_intr:1, rq_replied:1, rq_err:1,
                 rq_timedout:1, rq_resend:1, rq_restart:1,
@@ -997,6 +999,7 @@ struct ptlrpc_request {
                rq_allow_replay:1,
                /* bulk request, sent to server, but uncommitted */
                rq_unstable:1,
+               rq_early_free_repbuf:1, /* free reply buffer in advance */
                rq_allow_intr:1;
        /** @} */
 
@@ -1117,9 +1120,9 @@ struct ptlrpc_request {
        /**
         * when request/reply sent (secs), or time when request should be sent
         */
-       time_t                           rq_sent;
+       time64_t                         rq_sent;
        /** when request must finish. */
-       time_t                           rq_deadline;
+       time64_t                         rq_deadline;
        /** request format description */
        struct req_capsule               rq_pill;
 };
@@ -1728,6 +1731,10 @@ struct ptlrpc_service {
        int                             srv_cpt_bits;
        /** CPT table this service is running over */
        struct cfs_cpt_table            *srv_cptable;
+
+       /* sysfs object */
+       struct kobject                  srv_kobj;
+       struct completion               srv_kobj_unregister;
        /**
         * partition data for ptlrpc service
         */
@@ -2012,6 +2019,30 @@ int ptlrpc_connection_init(void);
 void ptlrpc_connection_fini(void);
 extern lnet_pid_t ptl_get_pid(void);
 
+/*
+ * Check if the peer connection is on the local node.  We need to use GFP_NOFS
+ * for requests from a local client to avoid recursing into the filesystem
+ * as we might end up waiting on a page sent in the request we're serving.
+ *
+ * Use __GFP_HIGHMEM so that the pages can use all of the available memory
+ * on 32-bit machines.  Use more aggressive GFP_HIGHUSER flags from non-local
+ * clients to be able to generate more memory pressure on the OSS and allow
+ * inactive pages to be reclaimed, since it doesn't have any other processes
+ * or allocations that generate memory reclaim pressure.
+ *
+ * See b=17576 (bdf50dc9) and b=19529 (3dcf18d3) for details.
+ */
+static inline bool ptlrpc_connection_is_local(struct ptlrpc_connection *conn)
+{
+       if (!conn)
+               return false;
+
+       if (conn->c_peer.nid == conn->c_self)
+               return true;
+
+       RETURN(LNetIsPeerLocal(conn->c_peer.nid));
+}
+
 /* ptlrpc/niobuf.c */
 /**
  * Actual interfacing with LNet to put/get/register/unregister stuff
@@ -2051,7 +2082,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
        LASSERT(req != NULL);
        desc = req->rq_bulk;
 
-       if (req->rq_bulk_deadline > cfs_time_current_sec())
+       if (req->rq_bulk_deadline > ktime_get_real_seconds())
                return 1;
 
        if (!desc)
@@ -2260,6 +2291,7 @@ void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
 int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
 struct ptlrpc_service *ptlrpc_register_service(
                                struct ptlrpc_service_conf *conf,
+                               struct kset *parent,
                                struct proc_dir_entry *proc_entry);
 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
 
@@ -2471,7 +2503,7 @@ ptlrpc_client_early(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_replied(struct ptlrpc_request *req)
 {
-       if (req->rq_reply_deadline > cfs_time_current_sec())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 0;
        return req->rq_replied;
 }
@@ -2480,7 +2512,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_recv(struct ptlrpc_request *req)
 {
-       if (req->rq_reply_deadline > cfs_time_current_sec())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 1;
        return req->rq_receiving_reply;
 }
@@ -2491,11 +2523,11 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
        int rc;
 
        spin_lock(&req->rq_lock);
-       if (req->rq_reply_deadline > cfs_time_current_sec()) {
+       if (req->rq_reply_deadline > ktime_get_real_seconds()) {
                spin_unlock(&req->rq_lock);
                return 1;
        }
-       if (req->rq_req_deadline > cfs_time_current_sec()) {
+       if (req->rq_req_deadline > ktime_get_real_seconds()) {
                spin_unlock(&req->rq_lock);
                return 1;
        }
@@ -2509,6 +2541,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
 static inline void
 ptlrpc_client_wake_req(struct ptlrpc_request *req)
 {
+       smp_mb();
        if (req->rq_set == NULL)
                wake_up(&req->rq_reply_waitq);
        else