Whamcloud - gitweb
LU-1144 ptlrpc: implement a NUMA aware ptlrpcd binding policy
[fs/lustre-release.git] / lustre / include / lustre_net.h
index a85c8c3..41acb98 100644 (file)
@@ -28,6 +28,8 @@
 /*
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 # endif
 #endif /* __KERNEL__ */
 
-/* Size over which to OBD_VMALLOC() rather than OBD_ALLOC() service request
- * buffers */
-#define SVC_BUF_VMALLOC_THRESHOLD (2 * CFS_PAGE_SIZE)
-
 /**
  * The following constants determine how memory is used to buffer incoming
  * service requests.
 #define LDLM_MAXREQSIZE (5 * 1024)
 #define LDLM_MAXREPSIZE (1024)
 
+/** Absolute limits */
 #define MDT_MIN_THREADS 2UL
+#ifndef MDT_MAX_THREADS
 #define MDT_MAX_THREADS 512UL
-#define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \
-                                  cfs_num_physpages >> (25 - CFS_PAGE_SHIFT)), \
-                                  2UL)
-
-/** Absolute limits */
-#define MDS_THREADS_MIN 2
-#define MDS_THREADS_MAX 512
-#define MDS_THREADS_MIN_READPAGE 2
+#endif
 #define MDS_NBUFS       (64 * cfs_num_online_cpus())
-#define MDS_BUFSIZE     (8 * 1024)
 /**
  * Assume file name length = FNAME_MAX = 256 (true for ext3).
  *        path name length = PATH_MAX = 4096
- *        LOV MD size max  = EA_MAX = 4000
+ *        LOV MD size max  = EA_MAX = 48000 (2000 stripes)
  * symlink:  FNAME_MAX + PATH_MAX  <- largest
  * link:     FNAME_MAX + PATH_MAX  (mds_rec_link < mds_rec_create)
  * rename:   FNAME_MAX + FNAME_MAX
  * open:     FNAME_MAX + EA_MAX
  *
  * MDS_MAXREQSIZE ~= 4736 bytes =
- * lustre_msg + ldlm_request + mds_body + mds_rec_create + FNAME_MAX + PATH_MAX
+ * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
  * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
  * or, for mds_close() and mds_reint_unlink() on a many-OST filesystem:
- *      = 9210 bytes = lustre_msg + mds_body + 160 * (easize + cookiesize)
+ *      = 9210 bytes = lustre_msg + mdt_body + 160 * (easize + cookiesize)
  *
  * Realistic size is about 512 bytes (20 character name + 128 char symlink),
  * except in the open case where there are a large number of OSTs in a LOV.
  */
-#define MDS_MAXREQSIZE  (5 * 1024)
-#define MDS_MAXREPSIZE  max(9 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
+#define MDS_MAXREPSIZE  max(10 * 1024, 362 + LOV_MAX_STRIPE_COUNT * 56)
+#define MDS_MAXREQSIZE  MDS_MAXREPSIZE
 
-/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */
+/** MDS_BUFSIZE = max_reqsize + max sptlrpc payload size */
+#define MDS_BUFSIZE     (MDS_MAXREQSIZE + 1024)
+
+/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
 #define FLD_MAXREQSIZE  (160)
 
-/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */
+/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
 #define FLD_MAXREPSIZE  (152)
 
 /**
@@ -243,7 +238,7 @@ struct ptlrpc_client {
 union ptlrpc_async_args {
         /**
          * Scratchpad for passing args to completion interpreter. Users
-         * cast to the struct of their choosing, and LASSERT that this is
+         * cast to the struct of their choosing, and CLASSERT that this is
          * big enough.  For _tons_ of context, OBD_ALLOC a struct and store
          * a pointer to it here.  The pointer_arg ensures this struct is at
          * least big enough for that.
@@ -267,6 +262,9 @@ typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
  * returned.
  */
 struct ptlrpc_request_set {
+        cfs_atomic_t          set_refcount;
+        /** number of in queue requests */
+        cfs_atomic_t          set_new_count;
         /** number of uncompleted requests */
         cfs_atomic_t          set_remaining;
         /** wait queue to wait on for request events */
@@ -449,6 +447,10 @@ struct ptlrpc_hpreq_ops {
          * Check if the request is a high priority one.
          */
         int  (*hpreq_check)(struct ptlrpc_request *);
+        /**
+         * Called after the request has been handled.
+         */
+        void (*hpreq_fini)(struct ptlrpc_request *);
 };
 
 /**
@@ -575,6 +577,7 @@ struct ptlrpc_request {
                                  rq_auth_remote:1,   /* authed as remote user */
                                  rq_auth_usr_root:1, /* authed as root */
                                  rq_auth_usr_mdt:1,  /* authed as mdt */
+                                 rq_auth_usr_ost:1,  /* authed as ost */
                                  /* security tfm flags */
                                  rq_pack_udesc:1,
                                  rq_pack_bulk:1,
@@ -831,25 +834,22 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
 
 #define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
 
-void _debug_req(struct ptlrpc_request *req, __u32 mask,
+void _debug_req(struct ptlrpc_request *req,
                 struct libcfs_debug_msg_data *data, const char *fmt, ...)
-        __attribute__ ((format (printf, 4, 5)));
+        __attribute__ ((format (printf, 3, 4)));
 
 /**
  * Helper that decides if we need to print request accordig to current debug
  * level settings
  */
-#define debug_req(cdls, level, req, file, func, line, fmt, a...)              \
+#define debug_req(msgdata, mask, cdls, req, fmt, a...)                        \
 do {                                                                          \
-        CFS_CHECK_STACK();                                                    \
+        CFS_CHECK_STACK(msgdata, mask, cdls);                                 \
                                                                               \
-        if (((level) & D_CANTMASK) != 0 ||                                    \
-            ((libcfs_debug & (level)) != 0 &&                                 \
-             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) {              \
-                static struct libcfs_debug_msg_data _req_dbg_data =           \
-                DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \
-                _debug_req((req), (level), &_req_dbg_data, fmt, ##a);         \
-        }                                                                     \
+        if (((mask) & D_CANTMASK) != 0 ||                                     \
+            ((libcfs_debug & (mask)) != 0 &&                                  \
+             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0))                \
+                _debug_req((req), msgdata, fmt, ##a);                         \
 } while(0)
 
 /**
@@ -860,11 +860,12 @@ do {                                                                          \
 do {                                                                          \
         if ((level) & (D_ERROR | D_WARNING)) {                                \
                 static cfs_debug_limit_state_t cdls;                          \
-                debug_req(&cdls, level, req, __FILE__, __func__, __LINE__,    \
-                          "@@@ "fmt" ", ## args);                             \
-        } else                                                                \
-                debug_req(NULL, level, req, __FILE__, __func__, __LINE__,     \
-                          "@@@ "fmt" ", ## args);                             \
+                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls);            \
+                debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
+        } else {                                                              \
+                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL);             \
+                debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
+        }                                                                     \
 } while (0)
 /** @} */
 
@@ -944,6 +945,15 @@ struct ptlrpc_bulk_desc {
 #endif
 };
 
+enum {
+        SVC_STOPPED     = 1 << 0,
+        SVC_STOPPING    = 1 << 1,
+        SVC_STARTING    = 1 << 2,
+        SVC_RUNNING     = 1 << 3,
+        SVC_EVENT       = 1 << 4,
+        SVC_SIGNAL      = 1 << 5,
+};
+
 /**
  * Definition of server service thread structure
  */
@@ -977,6 +987,61 @@ struct ptlrpc_thread {
         struct lu_env *t_env;
 };
 
+static inline int thread_is_stopped(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_STOPPED);
+}
+
+static inline int thread_is_stopping(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_STOPPING);
+}
+
+static inline int thread_is_starting(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_STARTING);
+}
+
+static inline int thread_is_running(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_RUNNING);
+}
+
+static inline int thread_is_event(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_EVENT);
+}
+
+static inline int thread_is_signal(struct ptlrpc_thread *thread)
+{
+        return !!(thread->t_flags & SVC_SIGNAL);
+}
+
+static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+        thread->t_flags &= ~flags;
+}
+
+static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+        thread->t_flags = flags;
+}
+
+static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+        thread->t_flags |= flags;
+}
+
+static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
+                                              __u32 flags)
+{
+        if (thread->t_flags & flags) {
+                thread->t_flags &= ~flags;
+                return 1;
+        }
+        return 0;
+}
+
 /**
  * Request buffer descriptor structure.
  * This is a structure that contains one posted request buffer for service.
@@ -1004,9 +1069,16 @@ struct ptlrpc_request_buffer_desc {
         struct ptlrpc_request  rqbd_req;
 };
 
-typedef int (*svc_handler_t)(struct ptlrpc_request *req);
-typedef void (*svcreq_printfn_t)(void *, struct ptlrpc_request *);
-typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
+typedef int  (*svc_thr_init_t)(struct ptlrpc_thread *thread);
+typedef void (*svc_thr_done_t)(struct ptlrpc_thread *thread);
+typedef int  (*svc_handler_t)(struct ptlrpc_request *req);
+typedef int  (*svc_hpreq_handler_t)(struct ptlrpc_request *);
+typedef void (*svc_req_printfn_t)(void *, struct ptlrpc_request *);
+
+#ifndef __cfs_cacheline_aligned
+/* NB: put it here for reducing patche dependence */
+# define __cfs_cacheline_aligned
+#endif
 
 /**
  * How many high priority requests to serve before serving one normal
@@ -1019,106 +1091,181 @@ typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
  * The service is listening on a particular portal (like tcp port)
  * and perform actions for a specific server like IO service for OST
  * or general metadata service for MDS.
+ *
+ * ptlrpc service has four locks:
+ * \a srv_lock
+ *    serialize operations on rqbd and requests waiting for preprocess
+ * \a srv_rq_lock
+ *    serialize operations active requests sent to this portal
+ * \a srv_at_lock
+ *    serialize adaptive timeout stuff
+ * \a srv_rs_lock
+ *    serialize operations on RS list (reply states)
+ *
+ * We don't have any use-case to take two or more locks at the same time
+ * for now, so there is no lock order issue.
  */
 struct ptlrpc_service {
-        cfs_list_t       srv_list;              /* chain thru all services */
-        int              srv_max_req_size;      /* biggest request to receive */
-        int              srv_max_reply_size;    /* biggest reply to send */
-        int              srv_buf_size;          /* size of individual buffers */
-        int              srv_nbuf_per_group;    /* # buffers to allocate in 1 group */
-        int              srv_nbufs;             /* total # req buffer descs allocated */
-        int              srv_threads_min;       /* threads to start at SOW */
-        int              srv_threads_max;       /* thread upper limit */
-        int              srv_threads_started;   /* index of last started thread */
-        int              srv_threads_running;   /* # running threads */
-        cfs_atomic_t     srv_n_difficult_replies; /* # 'difficult' replies */
-        int              srv_n_active_reqs;     /* # reqs being served */
-        int              srv_n_hpreq;           /* # HPreqs being served */
-        cfs_duration_t   srv_rqbd_timeout;      /* timeout before re-posting reqs, in tick */
-        int              srv_watchdog_factor;   /* soft watchdog timeout multiplier */
-        unsigned         srv_cpu_affinity:1;    /* bind threads to CPUs */
-        unsigned         srv_at_check:1;        /* check early replies */
-        unsigned         srv_is_stopping:1;     /* under unregister_service */
-        cfs_time_t       srv_at_checktime;      /* debug */
-
-        /** Local portal on which to receive requests */
-        __u32            srv_req_portal;
-        /** Portal on the client to send replies to */
-        __u32            srv_rep_portal;
-
-        /** AT stuff */
+        /** most often accessed fields */
+        /** chain thru all services */
+        cfs_list_t                      srv_list;
+        /** only statically allocated strings here; we don't clean them */
+        char                           *srv_name;
+        /** only statically allocated strings here; we don't clean them */
+        char                           *srv_thread_name;
+        /** service thread list */
+        cfs_list_t                      srv_threads;
+        /** threads to start at beginning of service */
+        int                             srv_threads_min;
+        /** thread upper limit */
+        int                             srv_threads_max;
+        /** always increasing number */
+        unsigned                        srv_threads_next_id;
+        /** # of starting threads */
+        int                             srv_threads_starting;
+        /** # running threads */
+        int                             srv_threads_running;
+
+        /** service operations, move to ptlrpc_svc_ops_t in the future */
         /** @{ */
-        struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
-        cfs_spinlock_t   srv_at_lock;
-        struct ptlrpc_at_array  srv_at_array;   /* reqs waiting for replies */
-        cfs_timer_t      srv_at_timer;      /* early reply timer */
-        /** @} */
-
-        int              srv_n_queued_reqs; /* # reqs in either of the queues below */
-        int              srv_hpreq_count;   /* # hp requests handled */
-        int              srv_hpreq_ratio;   /* # hp per lp reqs to handle */
-        cfs_list_t       srv_req_in_queue;  /* incoming reqs */
-        cfs_list_t       srv_request_queue; /* reqs waiting for service */
-        cfs_list_t       srv_request_hpq;   /* high priority queue */
-
-        cfs_list_t       srv_request_history;  /* request history */
-        __u64            srv_request_seq;      /* next request sequence # */
-        __u64            srv_request_max_cull_seq; /* highest seq culled from history */
-        svcreq_printfn_t srv_request_history_print_fn; /* service-specific print fn */
-
-        cfs_list_t       srv_idle_rqbds;    /* request buffers to be reposted */
-        cfs_list_t       srv_active_rqbds;  /* req buffers receiving */
-        cfs_list_t       srv_history_rqbds; /* request buffer history */
-        int              srv_nrqbd_receiving; /* # posted request buffers */
-        int              srv_n_history_rqbds;  /* # request buffers in history */
-        int              srv_max_history_rqbds;/* max # request buffers in history */
-
-        cfs_atomic_t     srv_outstanding_replies;
-        cfs_list_t       srv_active_replies;   /* all the active replies */
-#ifndef __KERNEL__
-        cfs_list_t       srv_reply_queue;  /* replies waiting for service */
-#endif
-        cfs_waitq_t      srv_waitq; /* all threads sleep on this. This
-                                     * wait-queue is signalled when new
-                                     * incoming request arrives and when
-                                     * difficult reply has to be handled. */
-
-        cfs_list_t       srv_threads;       /* service thread list */
+        /**
+         * if non-NULL called during thread creation (ptlrpc_start_thread())
+         * to initialize service specific per-thread state.
+         */
+        svc_thr_init_t                  srv_init;
+        /**
+         * if non-NULL called during thread shutdown (ptlrpc_main()) to
+         * destruct state created by ->srv_init().
+         */
+        svc_thr_done_t                  srv_done;
         /** Handler function for incoming requests for this service */
-        svc_handler_t    srv_handler;
-        svc_hpreq_handler_t  srv_hpreq_handler; /* hp request handler */
-
-        char *srv_name; /* only statically allocated strings here; we don't clean them */
-        char *srv_thread_name; /* only statically allocated strings here; we don't clean them */
-
-        cfs_spinlock_t        srv_lock;
+        svc_handler_t                   srv_handler;
+        /** hp request handler */
+        svc_hpreq_handler_t             srv_hpreq_handler;
+        /** service-specific print fn */
+        svc_req_printfn_t               srv_req_printfn;
+        /** @} */
 
         /** Root of /proc dir tree for this service */
-        cfs_proc_dir_entry_t *srv_procroot;
+        cfs_proc_dir_entry_t           *srv_procroot;
         /** Pointer to statistic data for this service */
-        struct lprocfs_stats *srv_stats;
-
-        /** List of free reply_states */
-        cfs_list_t           srv_free_rs_list;
-        /** waitq to run, when adding stuff to srv_free_rs_list */
-        cfs_waitq_t          srv_free_rs_waitq;
-
+        struct lprocfs_stats           *srv_stats;
+        /** # hp per lp reqs to handle */
+        int                             srv_hpreq_ratio;
+        /** biggest request to receive */
+        int                             srv_max_req_size;
+        /** biggest reply to send */
+        int                             srv_max_reply_size;
+        /** size of individual buffers */
+        int                             srv_buf_size;
+        /** # buffers to allocate in 1 group */
+        int                             srv_nbuf_per_group;
+        /** Local portal on which to receive requests */
+        __u32                           srv_req_portal;
+        /** Portal on the client to send replies to */
+        __u32                           srv_rep_portal;
         /**
          * Tags for lu_context associated with this thread, see struct
          * lu_context.
          */
-        __u32                srv_ctx_tags;
+        __u32                           srv_ctx_tags;
+        /** soft watchdog timeout multiplier */
+        int                             srv_watchdog_factor;
+        /** bind threads to CPUs */
+        unsigned                        srv_cpu_affinity:1;
+        /** under unregister_service */
+        unsigned                        srv_is_stopping:1;
+
         /**
-         * if non-NULL called during thread creation (ptlrpc_start_thread())
-         * to initialize service specific per-thread state.
+         * serialize the following fields, used for protecting
+         * rqbd list and incoming requests waiting for preprocess
          */
-        int (*srv_init)(struct ptlrpc_thread *thread);
+        cfs_spinlock_t                  srv_lock  __cfs_cacheline_aligned;
+        /** incoming reqs */
+        cfs_list_t                      srv_req_in_queue;
+        /** total # req buffer descs allocated */
+        int                             srv_nbufs;
+        /** # posted request buffers */
+        int                             srv_nrqbd_receiving;
+        /** timeout before re-posting reqs, in tick */
+        cfs_duration_t                  srv_rqbd_timeout;
+        /** request buffers to be reposted */
+        cfs_list_t                      srv_idle_rqbds;
+        /** req buffers receiving */
+        cfs_list_t                      srv_active_rqbds;
+        /** request buffer history */
+        cfs_list_t                      srv_history_rqbds;
+        /** # request buffers in history */
+        int                             srv_n_history_rqbds;
+        /** max # request buffers in history */
+        int                             srv_max_history_rqbds;
+        /** request history */
+        cfs_list_t                      srv_request_history;
+        /** next request sequence # */
+        __u64                           srv_request_seq;
+        /** highest seq culled from history */
+        __u64                           srv_request_max_cull_seq;
         /**
-         * if non-NULL called during thread shutdown (ptlrpc_main()) to
-         * destruct state created by ->srv_init().
+         * all threads sleep on this. This wait-queue is signalled when new
+         * incoming request arrives and when difficult reply has to be handled.
          */
-        void (*srv_done)(struct ptlrpc_thread *thread);
+        cfs_waitq_t                     srv_waitq;
 
+        /**
+         * serialize the following fields, used for processing requests
+         * sent to this portal
+         */
+        cfs_spinlock_t                  srv_rq_lock __cfs_cacheline_aligned;
+        /** # reqs in either of the queues below */
+        /** reqs waiting for service */
+        cfs_list_t                      srv_request_queue;
+        /** high priority queue */
+        cfs_list_t                      srv_request_hpq;
+        /** # incoming reqs */
+        int                             srv_n_queued_reqs;
+        /** # reqs being served */
+        int                             srv_n_active_reqs;
+        /** # HPreqs being served */
+        int                             srv_n_active_hpreq;
+        /** # hp requests handled */
+        int                             srv_hpreq_count;
+
+        /** AT stuff */
+        /** @{ */
+        /**
+         * serialize the following fields, used for changes on
+         * adaptive timeout
+         */
+        cfs_spinlock_t                  srv_at_lock __cfs_cacheline_aligned;
+        /** estimated rpc service time */
+        struct adaptive_timeout         srv_at_estimate;
+        /** reqs waiting for replies */
+        struct ptlrpc_at_array          srv_at_array;
+        /** early reply timer */
+        cfs_timer_t                     srv_at_timer;
+        /** check early replies */
+        unsigned                        srv_at_check;
+        /** debug */
+        cfs_time_t                      srv_at_checktime;
+        /** @} */
+
+        /**
+         * serialize the following fields, used for processing
+         * replies for this portal
+         */
+        cfs_spinlock_t                  srv_rs_lock __cfs_cacheline_aligned;
+        /** all the active replies */
+        cfs_list_t                      srv_active_replies;
+#ifndef __KERNEL__
+        /** replies waiting for service */
+        cfs_list_t                      srv_reply_queue;
+#endif
+        /** List of free reply_states */
+        cfs_list_t                      srv_free_rs_list;
+        /** waitq to run, when adding stuff to srv_free_rs_list */
+        cfs_waitq_t                     srv_free_rs_waitq;
+        /** # 'difficult' replies */
+        cfs_atomic_t                    srv_n_difficult_replies;
         //struct ptlrpc_srv_ni srv_interfaces[0];
 };
 
@@ -1154,6 +1301,22 @@ struct ptlrpcd_ctl {
          * Environment for request interpreters to run in.
          */
         struct lu_env               pc_env;
+        /**
+         * Index of ptlrpcd thread in the array.
+         */
+        int                         pc_index;
+        /**
+         * Number of the ptlrpcd's partners.
+         */
+        int                         pc_npartners;
+        /**
+         * Pointer to the array of partners' ptlrpcd_ctl structure.
+         */
+        struct ptlrpcd_ctl        **pc_partners;
+        /**
+         * Record the partner index to be processed next.
+         */
+        int                         pc_cursor;
 #ifndef __KERNEL__
         /**
          * Async rpcs flag to make sure that ptlrpcd_check() is called only
@@ -1194,7 +1357,11 @@ enum ptlrpcd_ctl_flags {
         /**
          * This is a recovery ptlrpc thread.
          */
-        LIOD_RECOVERY    = 1 << 3
+        LIOD_RECOVERY    = 1 << 3,
+        /**
+         * The ptlrpcd is bound to some CPU core.
+         */
+        LIOD_BIND        = 1 << 4,
 };
 
 /* ptlrpc/events.c */
@@ -1308,8 +1475,8 @@ void ptlrpc_interrupted_set(void *data);
 void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
 void ptlrpc_set_destroy(struct ptlrpc_request_set *);
 void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
-int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
-                           struct ptlrpc_request *req);
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+                            struct ptlrpc_request *req);
 
 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
@@ -1361,6 +1528,12 @@ __u64 ptlrpc_next_xid(void);
 __u64 ptlrpc_sample_next_xid(void);
 __u64 ptlrpc_req_xid(struct ptlrpc_request *request);
 
+/* Set of routines to run a function in ptlrpcd context */
+void *ptlrpcd_alloc_work(struct obd_import *imp,
+                         int (*cb)(const struct lu_env *, void *), void *data);
+void ptlrpcd_destroy_work(void *handler);
+int ptlrpcd_queue_work(void *handler);
+
 /** @} */
 
 struct ptlrpc_service_conf {
@@ -1391,7 +1564,7 @@ void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
 struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
                                             svc_handler_t h, char *name,
                                             struct proc_dir_entry *proc_entry,
-                                            svcreq_printfn_t prntfn,
+                                            svc_req_printfn_t prntfn,
                                             char *threadname);
 
 struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
@@ -1400,21 +1573,19 @@ struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
                                        int watchdog_factor,
                                        svc_handler_t, char *name,
                                        cfs_proc_dir_entry_t *proc_entry,
-                                       svcreq_printfn_t,
+                                       svc_req_printfn_t,
                                        int min_threads, int max_threads,
                                        char *threadname, __u32 ctx_tags,
                                        svc_hpreq_handler_t);
 void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
 
-int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc);
-int ptlrpc_start_thread(struct obd_device *dev, struct ptlrpc_service *svc);
+int ptlrpc_start_threads(struct ptlrpc_service *svc);
+int ptlrpc_start_thread(struct ptlrpc_service *svc);
 int ptlrpc_unregister_service(struct ptlrpc_service *service);
 int liblustre_check_services (void *arg);
 void ptlrpc_daemonize(char *name);
 int ptlrpc_service_health_check(struct ptlrpc_service *);
 void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
-void ptlrpc_server_active_request_inc(struct ptlrpc_request *req);
-void ptlrpc_server_active_request_dec(struct ptlrpc_request *req);
 void ptlrpc_server_drop_request(struct ptlrpc_request *req);
 
 #ifdef __KERNEL__
@@ -1429,7 +1600,6 @@ struct ptlrpc_svc_data {
         char *name;
         struct ptlrpc_service *svc;
         struct ptlrpc_thread *thread;
-        struct obd_device *dev;
 };
 /** @} */
 
@@ -1438,7 +1608,7 @@ struct ptlrpc_svc_data {
  * Import API
  * @{
  */
-int ptlrpc_connect_import(struct obd_import *imp, char * new_uuid);
+int ptlrpc_connect_import(struct obd_import *imp);
 int ptlrpc_init_import(struct obd_import *imp);
 int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
 int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
@@ -1708,6 +1878,8 @@ int client_disconnect_export(struct obd_export *exp);
 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
                            int priority);
 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
+int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
+                            struct obd_uuid *uuid);
 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
 void client_destroy_import(struct obd_import *imp);
 /** @} */
@@ -1744,26 +1916,42 @@ void ping_evictor_stop(void);
 int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
 /** @} */
 
-/* ptlrpc/ptlrpcd.c */
+/* ptlrpc daemon bind policy */
+typedef enum {
+        /* all ptlrpcd threads are free mode */
+        PDB_POLICY_NONE          = 1,
+        /* all ptlrpcd threads are bound mode */
+        PDB_POLICY_FULL          = 2,
+        /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
+        PDB_POLICY_PAIR          = 3,
+        /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
+         * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
+         * If kernel supports NUMA, pthrpcd threads are binded and
+         * grouped by NUMA node */
+        PDB_POLICY_NEIGHBOR      = 4,
+} pdb_policy_t;
+
+/* ptlrpc daemon load policy
+ * It is caller's duty to specify how to push the async RPC into some ptlrpcd
+ * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
+ * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
+ * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
+ * depends on which is scheduled firstly, to accelerate the RPC processing. */
+typedef enum {
+        /* on the same CPU core as the caller */
+        PDL_POLICY_SAME         = 1,
+        /* within the same CPU partition, but not the same core as the caller */
+        PDL_POLICY_LOCAL        = 2,
+        /* round-robin on all CPU cores, but not the same core as the caller */
+        PDL_POLICY_ROUND        = 3,
+        /* the specified CPU core is preferred, but not enforced */
+        PDL_POLICY_PREFERRED    = 4,
+} pdl_policy_t;
 
-/**
- * Ptlrpcd scope is a set of two threads: ptlrpcd-foo and ptlrpcd-foo-rcv,
- * these threads are used to asynchronously send requests queued with
- * ptlrpcd_add_req(req, PCSOPE_FOO), and to handle completion call-backs for
- * such requests. Multiple scopes are needed to avoid dead-locks.
- */
-enum ptlrpcd_scope {
-        /** Scope of bulk read-write rpcs. */
-        PSCOPE_BRW,
-        /** Everything else. */
-        PSCOPE_OTHER,
-        PSCOPE_NR
-};
-
-int ptlrpcd_start(const char *name, struct ptlrpcd_ctl *pc);
+/* ptlrpc/ptlrpcd.c */
 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
 void ptlrpcd_wake(struct ptlrpc_request *req);
-int ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
+void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
 int ptlrpcd_addref(void);
 void ptlrpcd_decref(void);