Whamcloud - gitweb
LU-793 ptlrpc: fix ptlrpc_request_change_export()
[fs/lustre-release.git] / lustre / ptlrpc / service.c
index 09b32fa..ca2aa01 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2010, 2012, Intel Corporation.
+ * Copyright (c) 2010, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -67,8 +67,10 @@ static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
 
-static CFS_LIST_HEAD(ptlrpc_all_services);
-spinlock_t ptlrpc_all_services_lock;
+/** Holds a list of all PTLRPC services */
+CFS_LIST_HEAD(ptlrpc_all_services);
+/** Used to protect the \e ptlrpc_all_services list */
+struct mutex ptlrpc_all_services_mutex;
 
 struct ptlrpc_request_buffer_desc *
 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
@@ -209,7 +211,7 @@ struct ptlrpc_hr_partition;
 struct ptlrpc_hr_thread {
        int                             hrt_id;         /* thread ID */
        spinlock_t                      hrt_lock;
-       cfs_waitq_t                     hrt_waitq;
+       wait_queue_head_t               hrt_waitq;
        cfs_list_t                      hrt_queue;      /* RS queue */
        struct ptlrpc_hr_partition      *hrt_partition;
 };
@@ -236,7 +238,7 @@ struct ptlrpc_hr_service {
        /* CPU partition table, it's just cfs_cpt_table for now */
        struct cfs_cpt_table            *hr_cpt_table;
        /** controller sleep waitq */
-       cfs_waitq_t                     hr_waitq;
+       wait_queue_head_t               hr_waitq;
         unsigned int                   hr_stopping;
        /** roundrobin rotor for non-affinity service */
        unsigned int                    hr_rotor;
@@ -311,7 +313,7 @@ static void rs_batch_dispatch(struct rs_batch *b)
                cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
                spin_unlock(&hrt->hrt_lock);
 
-               cfs_waitq_signal(&hrt->hrt_waitq);
+               wake_up(&hrt->hrt_waitq);
                b->rsb_n_replies = 0;
        }
 }
@@ -390,7 +392,7 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
        cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
        spin_unlock(&hrt->hrt_lock);
 
-       cfs_waitq_signal(&hrt->hrt_waitq);
+       wake_up(&hrt->hrt_waitq);
        EXIT;
 #else
        cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
@@ -402,20 +404,20 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
 {
        ENTRY;
 
-       LASSERT_SPIN_LOCKED(&rs->rs_svcpt->scp_rep_lock);
-        LASSERT_SPIN_LOCKED(&rs->rs_lock);
-        LASSERT (rs->rs_difficult);
-        rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
+       LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
+       LASSERT(spin_is_locked(&rs->rs_lock));
+       LASSERT (rs->rs_difficult);
+       rs->rs_scheduled_ever = 1;  /* flag any notification attempt */
 
-        if (rs->rs_scheduled) {     /* being set up or already notified */
-                EXIT;
-                return;
-        }
+       if (rs->rs_scheduled) {     /* being set up or already notified */
+               EXIT;
+               return;
+       }
 
-        rs->rs_scheduled = 1;
-        cfs_list_del_init(&rs->rs_list);
-        ptlrpc_dispatch_difficult_reply(rs);
-        EXIT;
+       rs->rs_scheduled = 1;
+       cfs_list_del_init(&rs->rs_list);
+       ptlrpc_dispatch_difficult_reply(rs);
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
 
@@ -502,7 +504,7 @@ static void ptlrpc_at_timer(unsigned long castmeharder)
 
        svcpt->scp_at_check = 1;
        svcpt->scp_at_checktime = cfs_time_current();
-       cfs_waitq_signal(&svcpt->scp_waitq);
+       wake_up(&svcpt->scp_waitq);
 }
 
 static void
@@ -601,10 +603,10 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
        svc->srv_nthrs_cpt_init = init;
 
        if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
-               LCONSOLE_WARN("%s: This service may have more threads (%d) "
-                             "than the given soft limit (%d)\n",
-                             svc->srv_name, nthrs * svc->srv_ncpts,
-                             tc->tc_nthrs_max);
+               CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
+                      "than the given soft limit (%d)\n",
+                      svc->srv_name, nthrs * svc->srv_ncpts,
+                      tc->tc_nthrs_max);
        }
 #endif
 }
@@ -629,15 +631,13 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
        CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
-       cfs_waitq_init(&svcpt->scp_waitq);
+       init_waitqueue_head(&svcpt->scp_waitq);
        /* history request & rqbd list */
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
 
        /* acitve requests and hp requests */
        spin_lock_init(&svcpt->scp_req_lock);
-       CFS_INIT_LIST_HEAD(&svcpt->scp_req_pending);
-       CFS_INIT_LIST_HEAD(&svcpt->scp_hreq_pending);
 
        /* reply states */
        spin_lock_init(&svcpt->scp_rep_lock);
@@ -646,7 +646,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
 #endif
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
-       cfs_waitq_init(&svcpt->scp_rep_waitq);
+       init_waitqueue_head(&svcpt->scp_rep_waitq);
        cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
 
        /* adaptive timeout */
@@ -710,7 +710,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
  */
 struct ptlrpc_service *
 ptlrpc_register_service(struct ptlrpc_service_conf *conf,
-                       cfs_proc_dir_entry_t *proc_entry)
+                       struct proc_dir_entry *proc_entry)
 {
        struct ptlrpc_service_cpt_conf  *cconf = &conf->psc_cpt;
        struct ptlrpc_service           *service;
@@ -783,9 +783,8 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
        CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
 
        /* buffer configuration */
-       service->srv_nbuf_per_group     = test_req_buffer_pressure ?  1 :
-                                         max(conf->psc_buf.bc_nbufs /
-                                             service->srv_ncpts, 1U);
+       service->srv_nbuf_per_group     = test_req_buffer_pressure ?
+                                         1 : conf->psc_buf.bc_nbufs;
        service->srv_max_req_size       = conf->psc_buf.bc_req_max_size +
                                          SPTLRPC_MAX_PAYLOAD;
        service->srv_buf_size           = conf->psc_buf.bc_buf_size;
@@ -824,15 +823,19 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
        rc = LNetSetLazyPortal(service->srv_req_portal);
        LASSERT(rc == 0);
 
-       spin_lock(&ptlrpc_all_services_lock);
-        cfs_list_add (&service->srv_list, &ptlrpc_all_services);
-       spin_unlock(&ptlrpc_all_services_lock);
+       mutex_lock(&ptlrpc_all_services_mutex);
+       cfs_list_add (&service->srv_list, &ptlrpc_all_services);
+       mutex_unlock(&ptlrpc_all_services_mutex);
+
+       if (proc_entry != NULL)
+               ptlrpc_lprocfs_register_service(proc_entry, service);
 
-        if (proc_entry != NULL)
-                ptlrpc_lprocfs_register_service(proc_entry, service);
+       rc = ptlrpc_service_nrs_setup(service);
+       if (rc != 0)
+               GOTO(failed, rc);
 
-        CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
-               service->srv_name, service->srv_req_portal);
+       CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
+              service->srv_name, service->srv_req_portal);
 
 #ifdef __KERNEL__
        rc = ptlrpc_start_threads(service);
@@ -866,11 +869,11 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req)
         sptlrpc_svc_ctx_decref(req);
 
         if (req != &req->rq_rqbd->rqbd_req) {
-                /* NB request buffers use an embedded
-                 * req if the incoming req unlinked the
-                 * MD; this isn't one of them! */
-                OBD_FREE(req, sizeof(*req));
-        }
+               /* NB request buffers use an embedded
+                * req if the incoming req unlinked the
+                * MD; this isn't one of them! */
+               ptlrpc_request_cache_free(req);
+       }
 }
 
 /**
@@ -981,22 +984,75 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
        }
 }
 
+/** Change request export and move hp request from old export to new */
+void ptlrpc_request_change_export(struct ptlrpc_request *req,
+                                 struct obd_export *export)
+{
+       if (req->rq_export != NULL) {
+               LASSERT(!list_empty(&req->rq_exp_list));
+               /* remove rq_exp_list from last export */
+               spin_lock_bh(&req->rq_export->exp_rpc_lock);
+               list_del_init(&req->rq_exp_list);
+               spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+               /* export has one reference already, so it`s safe to
+                * add req to export queue here and get another
+                * reference for request later */
+               spin_lock_bh(&export->exp_rpc_lock);
+               if (req->rq_ops != NULL) /* hp request */
+                       list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
+               else
+                       list_add(&req->rq_exp_list, &export->exp_reg_rpcs);
+               spin_unlock_bh(&export->exp_rpc_lock);
+
+               class_export_rpc_dec(req->rq_export);
+               class_export_put(req->rq_export);
+       }
+
+       /* request takes one export refcount */
+       req->rq_export = class_export_get(export);
+       class_export_rpc_inc(export);
+
+       return;
+}
+
 /**
  * to finish a request: stop sending more early replies, and release
- * the request. should be called after we finished handling the request.
+ * the request.
  */
 static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
                                         struct ptlrpc_request *req)
 {
        ptlrpc_server_hpreq_fini(req);
 
+       if (req->rq_session.lc_thread != NULL) {
+               lu_context_exit(&req->rq_session);
+               lu_context_fini(&req->rq_session);
+       }
+
+       ptlrpc_server_drop_request(req);
+}
+
+/**
+ * to finish a active request: stop sending more early replies, and release
+ * the request. should be called after we finished handling the request.
+ */
+static void ptlrpc_server_finish_active_request(
+                                       struct ptlrpc_service_part *svcpt,
+                                       struct ptlrpc_request *req)
+{
        spin_lock(&svcpt->scp_req_lock);
+       ptlrpc_nrs_req_stop_nolock(req);
        svcpt->scp_nreqs_active--;
        if (req->rq_hp)
                svcpt->scp_nhreqs_active--;
        spin_unlock(&svcpt->scp_req_lock);
 
-       ptlrpc_server_drop_request(req);
+       ptlrpc_nrs_req_finalize(req);
+
+       if (req->rq_export != NULL)
+               class_export_rpc_dec(req->rq_export);
+
+       ptlrpc_server_finish_request(svcpt, req);
 }
 
 /**
@@ -1293,14 +1349,12 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
        }
        newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
 
-        OBD_ALLOC(reqcopy, sizeof *reqcopy);
-        if (reqcopy == NULL)
-                RETURN(-ENOMEM);
-        OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
-        if (!reqmsg) {
-                OBD_FREE(reqcopy, sizeof *reqcopy);
-                RETURN(-ENOMEM);
-        }
+       reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
+       if (reqcopy == NULL)
+               RETURN(-ENOMEM);
+       OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
+       if (!reqmsg)
+               GOTO(out_free, rc = -ENOMEM);
 
         *reqcopy = *req;
         reqcopy->rq_reply_state = NULL;
@@ -1328,7 +1382,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
                 GOTO(out, rc = -ENODEV);
 
         /* RPC ref */
-        class_export_rpc_get(reqcopy->rq_export);
+       class_export_rpc_inc(reqcopy->rq_export);
         if (reqcopy->rq_export->exp_obd &&
             reqcopy->rq_export->exp_obd->obd_fail)
                 GOTO(out_put, rc = -ENODEV);
@@ -1352,13 +1406,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
         ptlrpc_req_drop_rs(reqcopy);
 
 out_put:
-        class_export_rpc_put(reqcopy->rq_export);
-        class_export_put(reqcopy->rq_export);
+       class_export_rpc_dec(reqcopy->rq_export);
+       class_export_put(reqcopy->rq_export);
 out:
-        sptlrpc_svc_ctx_decref(reqcopy);
-        OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
-        OBD_FREE(reqcopy, sizeof *reqcopy);
-        RETURN(rc);
+       sptlrpc_svc_ctx_decref(reqcopy);
+       OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
+out_free:
+       ptlrpc_request_cache_free(reqcopy);
+       RETURN(rc);
 }
 
 /* Send early replies to everybody expiring within at_early_margin
@@ -1467,46 +1522,118 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
        RETURN(1); /* return "did_something" for liblustre */
 }
 
+/* Check if we are already handling earlier incarnation of this request.
+ * Called under &req->rq_export->exp_rpc_lock locked */
+static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req)
+{
+       struct ptlrpc_request   *tmp = NULL;
+
+       if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
+           (cfs_atomic_read(&req->rq_export->exp_rpc_count) == 0))
+               return 0;
+
+       /* bulk request are aborted upon reconnect, don't try to
+        * find a match */
+       if (req->rq_bulk_write || req->rq_bulk_read)
+               return 0;
+
+       /* This list should not be longer than max_requests in
+        * flights on the client, so it is not all that long.
+        * Also we only hit this codepath in case of a resent
+        * request which makes it even more rarely hit */
+       cfs_list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
+                               rq_exp_list) {
+               /* Found duplicate one */
+               if (tmp->rq_xid == req->rq_xid)
+                       goto found;
+       }
+       cfs_list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
+                               rq_exp_list) {
+               /* Found duplicate one */
+               if (tmp->rq_xid == req->rq_xid)
+                       goto found;
+       }
+       return 0;
+
+found:
+       DEBUG_REQ(D_HA, req, "Found duplicate req in processing\n");
+       DEBUG_REQ(D_HA, tmp, "Request being processed\n");
+       return -EBUSY;
+}
+
 /**
  * Put the request to the export list if the request may become
  * a high priority one.
  */
-static int ptlrpc_server_hpreq_init(struct ptlrpc_service *svc,
+static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
                                    struct ptlrpc_request *req)
 {
-        int rc = 0;
-        ENTRY;
+       cfs_list_t      *list;
+       int              rc, hp = 0;
 
-       if (svc->srv_ops.so_hpreq_handler) {
-               rc = svc->srv_ops.so_hpreq_handler(req);
-                if (rc)
-                        RETURN(rc);
-        }
-        if (req->rq_export && req->rq_ops) {
-                /* Perform request specific check. We should do this check
-                 * before the request is added into exp_hp_rpcs list otherwise
-                 * it may hit swab race at LU-1044. */
-                if (req->rq_ops->hpreq_check)
-                        rc = req->rq_ops->hpreq_check(req);
+       ENTRY;
+
+       if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
+               rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
+               if (rc < 0)
+                       RETURN(rc);
+               LASSERT(rc == 0);
+       }
+       if (req->rq_export) {
+               if (req->rq_ops) {
+                       /* Perform request specific check. We should do this
+                        * check before the request is added into exp_hp_rpcs
+                        * list otherwise it may hit swab race at LU-1044. */
+                       if (req->rq_ops->hpreq_check) {
+                               rc = req->rq_ops->hpreq_check(req);
+                               /**
+                                * XXX: Out of all current
+                                * ptlrpc_hpreq_ops::hpreq_check(), only
+                                * ldlm_cancel_hpreq_check() can return an
+                                * error code; other functions assert in
+                                * similar places, which seems odd.
+                                * What also does not seem right is that
+                                * handlers for those RPCs do not assert
+                                * on the same checks, but rather handle the
+                                * error cases. e.g. see ost_rw_hpreq_check(),
+                                * and ost_brw_read(), ost_brw_write().
+                                */
+                               if (rc < 0)
+                                       RETURN(rc);
+                               LASSERT(rc == 0 || rc == 1);
+                               hp = rc;
+                       }
+                       list = &req->rq_export->exp_hp_rpcs;
+               } else {
+                       list = &req->rq_export->exp_reg_rpcs;
+               }
 
+               /* do search for duplicated xid and the adding to the list
+                * atomically */
                spin_lock_bh(&req->rq_export->exp_rpc_lock);
-               cfs_list_add(&req->rq_exp_list,
-                            &req->rq_export->exp_hp_rpcs);
+               rc = ptlrpc_server_check_resend_in_progress(req);
+               if (rc < 0) {
+                       spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+                       RETURN(rc);
+               }
+               cfs_list_add(&req->rq_exp_list, list);
                spin_unlock_bh(&req->rq_export->exp_rpc_lock);
        }
 
-       RETURN(rc);
+       ptlrpc_nrs_req_initialize(svcpt, req, !!hp);
+
+       RETURN(hp);
 }
 
 /** Remove the request from the export list. */
 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
 {
-        ENTRY;
-        if (req->rq_export && req->rq_ops) {
-                /* refresh lock timeout again so that client has more
-                 * room to send lock cancel RPC. */
-                if (req->rq_ops->hpreq_fini)
-                        req->rq_ops->hpreq_fini(req);
+       ENTRY;
+       if (req->rq_export) {
+               /* refresh lock timeout again so that client has more
+                * room to send lock cancel RPC. */
+               if (req->rq_ops && req->rq_ops->hpreq_fini)
+                       req->rq_ops->hpreq_fini(req);
 
                spin_lock_bh(&req->rq_export->exp_rpc_lock);
                cfs_list_del_init(&req->rq_exp_list);
@@ -1539,77 +1666,17 @@ int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
 }
 EXPORT_SYMBOL(ptlrpc_hpreq_handler);
 
-/**
- * Make the request a high priority one.
- *
- * All the high priority requests are queued in a separate FIFO
- * ptlrpc_service_part::scp_hpreq_pending list which is parallel to
- * ptlrpc_service_part::scp_req_pending list but has a higher priority
- * for handling.
- *
- * \see ptlrpc_server_handle_request().
- */
-static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service_part *svcpt,
-                                        struct ptlrpc_request *req)
-{
-       ENTRY;
-
-       spin_lock(&req->rq_lock);
-        if (req->rq_hp == 0) {
-                int opc = lustre_msg_get_opc(req->rq_reqmsg);
-
-                /* Add to the high priority queue. */
-               cfs_list_move_tail(&req->rq_list, &svcpt->scp_hreq_pending);
-                req->rq_hp = 1;
-                if (opc != OBD_PING)
-                        DEBUG_REQ(D_RPCTRACE, req, "high priority req");
-        }
-       spin_unlock(&req->rq_lock);
-       EXIT;
-}
-
-/**
- * \see ptlrpc_hpreq_reorder_nolock
- */
-void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
-{
-       struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
-       ENTRY;
-
-       spin_lock(&svcpt->scp_req_lock);
-       /* It may happen that the request is already taken for the processing
-        * but still in the export list, or the request is not in the request
-        * queue but in the export list already, do not add it into the
-        * HP list. */
-       if (!cfs_list_empty(&req->rq_list))
-               ptlrpc_hpreq_reorder_nolock(svcpt, req);
-       spin_unlock(&svcpt->scp_req_lock);
-       EXIT;
-}
-EXPORT_SYMBOL(ptlrpc_hpreq_reorder);
-
-/**
- * Add a request to the regular or HP queue; optionally perform HP request
- * initialization.
- */
 static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
                                     struct ptlrpc_request *req)
 {
        int     rc;
        ENTRY;
 
-       rc = ptlrpc_server_hpreq_init(svcpt->scp_service, req);
+       rc = ptlrpc_server_hpreq_init(svcpt, req);
        if (rc < 0)
                RETURN(rc);
 
-       spin_lock(&svcpt->scp_req_lock);
-
-       if (rc)
-               ptlrpc_hpreq_reorder_nolock(svcpt, req);
-       else
-               cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_pending);
-
-       spin_unlock(&svcpt->scp_req_lock);
+       ptlrpc_nrs_req_add(svcpt, req, !!rc);
 
        RETURN(0);
 }
@@ -1619,13 +1686,16 @@ static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
  * User can call it w/o any lock but need to hold
  * ptlrpc_service_part::scp_req_lock to get reliable result
  */
-static int ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
-                                   int force)
+static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
+                                    bool force)
 {
        int running = svcpt->scp_nthrs_running;
 
+       if (!nrs_svcpt_has_hp(svcpt))
+               return false;
+
        if (force)
-               return 1;
+               return true;
 
        if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
                     CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
@@ -1636,20 +1706,20 @@ static int ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
        }
 
        if (svcpt->scp_nreqs_active >= running - 1)
-               return 0;
+               return false;
 
        if (svcpt->scp_nhreqs_active == 0)
-               return 1;
+               return true;
 
-       return cfs_list_empty(&svcpt->scp_req_pending) ||
+       return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
               svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
 }
 
-static int ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
-                                     int force)
+static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
+                                      bool force)
 {
        return ptlrpc_server_allow_high(svcpt, force) &&
-              !cfs_list_empty(&svcpt->scp_hreq_pending);
+              ptlrpc_nrs_req_pending_nolock(svcpt, true);
 }
 
 /**
@@ -1661,13 +1731,13 @@ static int ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
  * User can call it w/o any lock but need to hold
  * ptlrpc_service_part::scp_req_lock to get reliable result
  */
-static int ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
-                                     int force)
+static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
+                                      bool force)
 {
        int running = svcpt->scp_nthrs_running;
 #ifndef __KERNEL__
        if (1) /* always allow to handle normal request for liblustre */
-               return 1;
+               return true;
 #endif
        if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
                     CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
@@ -1679,20 +1749,19 @@ static int ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
 
        if (force ||
            svcpt->scp_nreqs_active < running - 2)
-               return 1;
+               return true;
 
        if (svcpt->scp_nreqs_active >= running - 1)
-               return 0;
+               return false;
 
-       return svcpt->scp_nhreqs_active > 0 ||
-              svcpt->scp_service->srv_ops.so_hpreq_handler == NULL;
+       return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
 }
 
-static int ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
-                                       int force)
+static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
+                                        bool force)
 {
        return ptlrpc_server_allow_normal(svcpt, force) &&
-              !cfs_list_empty(&svcpt->scp_req_pending);
+              ptlrpc_nrs_req_pending_nolock(svcpt, false);
 }
 
 /**
@@ -1703,8 +1772,8 @@ static int ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
  * \see ptlrpc_server_allow_normal
  * \see ptlrpc_server_allow high
  */
-static inline int
-ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, int force)
+static inline bool
+ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
 {
        return ptlrpc_server_high_pending(svcpt, force) ||
               ptlrpc_server_normal_pending(svcpt, force);
@@ -1716,25 +1785,50 @@ ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, int force)
  * Returns a pointer to fetched request.
  */
 static struct ptlrpc_request *
-ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, int force)
+ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
 {
-       struct ptlrpc_request *req;
+       struct ptlrpc_request *req = NULL;
        ENTRY;
 
+       spin_lock(&svcpt->scp_req_lock);
+#ifndef __KERNEL__
+       /* !@%$# liblustre only has 1 thread */
+       if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
+               spin_unlock(&svcpt->scp_req_lock);
+               RETURN(NULL);
+       }
+#endif
+
        if (ptlrpc_server_high_pending(svcpt, force)) {
-               req = cfs_list_entry(svcpt->scp_hreq_pending.next,
-                                    struct ptlrpc_request, rq_list);
-               svcpt->scp_hreq_count++;
-               RETURN(req);
+               req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
+               if (req != NULL) {
+                       svcpt->scp_hreq_count++;
+                       goto got_request;
+               }
        }
 
        if (ptlrpc_server_normal_pending(svcpt, force)) {
-               req = cfs_list_entry(svcpt->scp_req_pending.next,
-                                    struct ptlrpc_request, rq_list);
-               svcpt->scp_hreq_count = 0;
-               RETURN(req);
+               req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
+               if (req != NULL) {
+                       svcpt->scp_hreq_count = 0;
+                       goto got_request;
+               }
        }
+
+       spin_unlock(&svcpt->scp_req_lock);
        RETURN(NULL);
+
+got_request:
+       svcpt->scp_nreqs_active++;
+       if (req->rq_hp)
+               svcpt->scp_nhreqs_active++;
+
+       spin_unlock(&svcpt->scp_req_lock);
+
+       if (likely(req->rq_export))
+               class_export_rpc_inc(req->rq_export);
+
+       RETURN(req);
 }
 
 /**
@@ -1744,7 +1838,8 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, int force)
  * ptlrpc_server_handle_req later on.
  */
 static int
-ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
+ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
+                           struct ptlrpc_thread *thread)
 {
        struct ptlrpc_service   *svc = svcpt->scp_service;
        struct ptlrpc_request   *req;
@@ -1834,7 +1929,6 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
         req->rq_export = class_conn2export(
                 lustre_msg_get_handle(req->rq_reqmsg));
         if (req->rq_export) {
-               class_export_rpc_get(req->rq_export);
                 rc = ptlrpc_check_req(req);
                 if (rc == 0) {
                         rc = sptlrpc_target_export_check(req->rq_export, req);
@@ -1865,23 +1959,33 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
                 goto err_req;
         }
 
-        ptlrpc_at_add_timed(req);
+       req->rq_svc_thread = thread;
+       if (thread != NULL) {
+               /* initialize request session, it is needed for request
+                * processing by target */
+               rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
+                                                      LCT_NOREF);
+               if (rc) {
+                       CERROR("%s: failure to initialize session: rc = %d\n",
+                              thread->t_name, rc);
+                       goto err_req;
+               }
+               req->rq_session.lc_thread = thread;
+               lu_context_enter(&req->rq_session);
+               req->rq_svc_thread->t_env->le_ses = &req->rq_session;
+       }
+
+       ptlrpc_at_add_timed(req);
 
-        /* Move it over to the request processing queue */
+       /* Move it over to the request processing queue */
        rc = ptlrpc_server_request_add(svcpt, req);
-       if (rc) {
-               ptlrpc_server_hpreq_fini(req);
+       if (rc)
                GOTO(err_req, rc);
-       }
-       cfs_waitq_signal(&svcpt->scp_waitq);
+
+       wake_up(&svcpt->scp_waitq);
        RETURN(1);
 
 err_req:
-       if (req->rq_export)
-               class_export_rpc_put(req->rq_export);
-       spin_lock(&svcpt->scp_req_lock);
-       svcpt->scp_nreqs_active++;
-       spin_unlock(&svcpt->scp_req_lock);
        ptlrpc_server_finish_request(svcpt, req);
 
        RETURN(1);
@@ -1895,29 +1999,18 @@ static int
 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
                             struct ptlrpc_thread *thread)
 {
-       struct ptlrpc_service *svc = svcpt->scp_service;
-        struct obd_export     *export = NULL;
-        struct ptlrpc_request *request;
-        struct timeval         work_start;
-        struct timeval         work_end;
-        long                   timediff;
-        int                    rc;
-        int                    fail_opc = 0;
-        ENTRY;
+       struct ptlrpc_service   *svc = svcpt->scp_service;
+       struct ptlrpc_request   *request;
+       struct timeval           work_start;
+       struct timeval           work_end;
+       long                     timediff;
+       int                      fail_opc = 0;
 
-       spin_lock(&svcpt->scp_req_lock);
-#ifndef __KERNEL__
-       /* !@%$# liblustre only has 1 thread */
-       if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
-               spin_unlock(&svcpt->scp_req_lock);
+       ENTRY;
+
+       request = ptlrpc_server_request_get(svcpt, false);
+       if (request == NULL)
                RETURN(0);
-       }
-#endif
-       request = ptlrpc_server_request_get(svcpt, 0);
-       if  (request == NULL) {
-               spin_unlock(&svcpt->scp_req_lock);
-                RETURN(0);
-        }
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
@@ -1925,35 +2018,18 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
                 fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
 
         if (unlikely(fail_opc)) {
-                if (request->rq_export && request->rq_ops) {
-                       spin_unlock(&svcpt->scp_req_lock);
-
+               if (request->rq_export && request->rq_ops)
                        OBD_FAIL_TIMEOUT(fail_opc, 4);
-
-                       spin_lock(&svcpt->scp_req_lock);
-                       request = ptlrpc_server_request_get(svcpt, 0);
-                       if  (request == NULL) {
-                               spin_unlock(&svcpt->scp_req_lock);
-                               RETURN(0);
-                       }
-               }
        }
 
-       cfs_list_del_init(&request->rq_list);
-       svcpt->scp_nreqs_active++;
-       if (request->rq_hp)
-               svcpt->scp_nhreqs_active++;
-
-       spin_unlock(&svcpt->scp_req_lock);
-
         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
 
-        if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
-                libcfs_debug_dumplog();
+       if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+               libcfs_debug_dumplog();
 
-        cfs_gettimeofday(&work_start);
-        timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
-        if (likely(svc->srv_stats != NULL)) {
+       do_gettimeofday(&work_start);
+       timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+       if (likely(svc->srv_stats != NULL)) {
                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
                                     timediff);
                 lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
@@ -1964,23 +2040,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
                                    at_get(&svcpt->scp_at_estimate));
         }
 
-       export = request->rq_export;
-       rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
-        if (rc) {
-                CERROR("Failure to initialize session: %d\n", rc);
-                goto out_req;
-        }
-        request->rq_session.lc_thread = thread;
-        request->rq_session.lc_cookie = 0x5;
-        lu_context_enter(&request->rq_session);
-
-        CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
-
-        request->rq_svc_thread = thread;
-        if (thread)
-                request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
-        if (likely(request->rq_export)) {
+       if (likely(request->rq_export)) {
                if (unlikely(ptlrpc_check_req(request)))
                        goto put_conn;
                 ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
@@ -1999,27 +2059,34 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
                 goto put_conn;
         }
 
-        CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
-               "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
-               (request->rq_export ?
-                (char *)request->rq_export->exp_client_uuid.uuid : "0"),
-               (request->rq_export ?
-                cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
-               lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
-               libcfs_id2str(request->rq_peer),
-               lustre_msg_get_opc(request->rq_reqmsg));
+       CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
+              "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+              (request->rq_export ?
+               (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+              (request->rq_export ?
+               cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+              lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
+              libcfs_id2str(request->rq_peer),
+              lustre_msg_get_opc(request->rq_reqmsg));
 
         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
 
-       rc = svc->srv_ops.so_req_handler(request);
+       CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
 
-        ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
+       /* re-assign request and sesson thread to the current one */
+       request->rq_svc_thread = thread;
+       if (thread != NULL) {
+               LASSERT(request->rq_session.lc_thread != NULL);
+               request->rq_session.lc_thread = thread;
+               request->rq_session.lc_cookie = 0x55;
+               thread->t_env->le_ses = &request->rq_session;
+       }
+       svc->srv_ops.so_req_handler(request);
 
-put_conn:
-        lu_context_exit(&request->rq_session);
-        lu_context_fini(&request->rq_session);
+       ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
 
+put_conn:
        if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
                     DEBUG_REQ(D_WARNING, request, "Request took longer "
                               "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
@@ -2030,14 +2097,14 @@ put_conn:
                                            request->rq_deadline));
        }
 
-        cfs_gettimeofday(&work_end);
-        timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
-        CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
-               "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
-               "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
-                cfs_curproc_comm(),
-                (request->rq_export ?
-                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+       do_gettimeofday(&work_end);
+       timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
+       CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
+              "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+              "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+               current_comm(),
+               (request->rq_export ?
+                (char *)request->rq_export->exp_client_uuid.uuid : "0"),
                 (request->rq_export ?
                  cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
                 lustre_msg_get_status(request->rq_reqmsg),
@@ -2071,10 +2138,7 @@ put_conn:
                           request->rq_arrival_time.tv_sec));
         }
 
-out_req:
-       if (export != NULL)
-               class_export_rpc_put(export);
-       ptlrpc_server_finish_request(svcpt, request);
+       ptlrpc_server_finish_active_request(svcpt, request);
 
        RETURN(1);
 }
@@ -2175,7 +2239,7 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
                 ptlrpc_rs_decref (rs);
                if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
                    svc->srv_is_stopping)
-                       cfs_waitq_broadcast(&svcpt->scp_waitq);
+                       wake_up_all(&svcpt->scp_waitq);
                RETURN(1);
        }
 
@@ -2243,7 +2307,7 @@ liblustre_check_services (void *arg)
                svcpt->scp_nthrs_running++;
 
                do {
-                       rc = ptlrpc_server_handle_req_in(svcpt);
+                       rc = ptlrpc_server_handle_req_in(svcpt, NULL);
                        rc |= ptlrpc_server_handle_reply(svcpt);
                        rc |= ptlrpc_at_check_timed(svcpt);
                        rc |= ptlrpc_server_handle_request(svcpt, NULL);
@@ -2364,12 +2428,12 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
 
        lc_watchdog_disable(thread->t_watchdog);
 
-       cfs_cond_resched();
+       cond_resched();
 
        l_wait_event_exclusive_head(svcpt->scp_waitq,
                                ptlrpc_thread_stopping(thread) ||
                                ptlrpc_server_request_incoming(svcpt) ||
-                               ptlrpc_server_request_pending(svcpt, 0) ||
+                               ptlrpc_server_request_pending(svcpt, false) ||
                                ptlrpc_rqbd_pending(svcpt) ||
                                ptlrpc_at_check(svcpt), &lwi);
 
@@ -2394,14 +2458,14 @@ static int ptlrpc_main(void *arg)
        struct ptlrpc_service           *svc = svcpt->scp_service;
        struct ptlrpc_reply_state       *rs;
 #ifdef WITH_GROUP_INFO
-        cfs_group_info_t *ginfo = NULL;
+       struct group_info *ginfo = NULL;
 #endif
-        struct lu_env *env;
-        int counter = 0, rc = 0;
-        ENTRY;
+       struct lu_env *env;
+       int counter = 0, rc = 0;
+       ENTRY;
 
-        thread->t_pid = cfs_curproc_pid();
-        cfs_daemonize_ctxt(thread->t_name);
+       thread->t_pid = current_pid();
+       unshare_fs_struct();
 
        /* NB: we will call cfs_cpt_bind() for all threads, because we
         * might want to run lustre server only on a subset of system CPUs,
@@ -2413,14 +2477,14 @@ static int ptlrpc_main(void *arg)
        }
 
 #ifdef WITH_GROUP_INFO
-        ginfo = cfs_groups_alloc(0);
-        if (!ginfo) {
-                rc = -ENOMEM;
-                goto out;
-        }
+       ginfo = groups_alloc(0);
+       if (!ginfo) {
+               rc = -ENOMEM;
+               goto out;
+       }
 
-        cfs_set_current_groups(ginfo);
-        cfs_put_group_info(ginfo);
+       set_current_groups(ginfo);
+       put_group_info(ginfo);
 #endif
 
        if (svc->srv_ops.so_thr_init != NULL) {
@@ -2478,14 +2542,14 @@ static int ptlrpc_main(void *arg)
        spin_unlock(&svcpt->scp_lock);
 
        /* wake up our creator in case he's still waiting. */
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
        thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
                                             NULL, NULL);
 
        spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
-       cfs_waitq_signal(&svcpt->scp_rep_waitq);
+       wake_up(&svcpt->scp_rep_waitq);
        spin_unlock(&svcpt->scp_rep_lock);
 
        CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
@@ -2505,7 +2569,11 @@ static int ptlrpc_main(void *arg)
 
                /* Process all incoming reqs before handling any */
                if (ptlrpc_server_request_incoming(svcpt)) {
-                       ptlrpc_server_handle_req_in(svcpt);
+                       lu_context_enter(&env->le_ctx);
+                       env->le_ses = NULL;
+                       ptlrpc_server_handle_req_in(svcpt, thread);
+                       lu_context_exit(&env->le_ctx);
+
                        /* but limit ourselves in case of flood */
                        if (counter++ < 100)
                                continue;
@@ -2515,7 +2583,7 @@ static int ptlrpc_main(void *arg)
                if (ptlrpc_at_check(svcpt))
                        ptlrpc_at_check_timed(svcpt);
 
-               if (ptlrpc_server_request_pending(svcpt, 0)) {
+               if (ptlrpc_server_request_pending(svcpt, false)) {
                        lu_context_enter(&env->le_ctx);
                        ptlrpc_server_handle_request(svcpt, thread);
                        lu_context_exit(&env->le_ctx);
@@ -2562,7 +2630,7 @@ out:
        thread->t_id = rc;
        thread_add_flags(thread, SVC_STOPPED);
 
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        spin_unlock(&svcpt->scp_lock);
 
        return rc;
@@ -2596,7 +2664,7 @@ static int ptlrpc_hr_main(void *arg)
 
        snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
                 hrp->hrp_cpt, hrt->hrt_id);
-       cfs_daemonize_ctxt(threadname);
+       unshare_fs_struct();
 
        rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
        if (rc != 0) {
@@ -2605,7 +2673,7 @@ static int ptlrpc_hr_main(void *arg)
        }
 
        cfs_atomic_inc(&hrp->hrp_nstarted);
-       cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+       wake_up(&ptlrpc_hr.hr_waitq);
 
        while (!ptlrpc_hr.hr_stopping) {
                l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
@@ -2622,7 +2690,7 @@ static int ptlrpc_hr_main(void *arg)
         }
 
        cfs_atomic_inc(&hrp->hrp_nstopped);
-       cfs_waitq_signal(&ptlrpc_hr.hr_waitq);
+       wake_up(&ptlrpc_hr.hr_waitq);
 
        return 0;
 }
@@ -2639,13 +2707,13 @@ static void ptlrpc_stop_hr_threads(void)
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
                for (j = 0; j < hrp->hrp_nthrs; j++)
-                       cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq);
+                       wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
        }
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                if (hrp->hrp_thrs == NULL)
                        continue; /* uninitialized */
-               cfs_wait_event(ptlrpc_hr.hr_waitq,
+               wait_event(ptlrpc_hr.hr_waitq,
                               cfs_atomic_read(&hrp->hrp_nstopped) ==
                               cfs_atomic_read(&hrp->hrp_nstarted));
        }
@@ -2662,15 +2730,18 @@ static int ptlrpc_start_hr_threads(void)
                int     rc = 0;
 
                for (j = 0; j < hrp->hrp_nthrs; j++) {
-                       rc = cfs_create_thread(ptlrpc_hr_main,
-                                              &hrp->hrp_thrs[j],
-                                              CLONE_VM | CLONE_FILES);
-                       if (rc < 0)
+                       struct  ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
+                       rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
+                                                &hrp->hrp_thrs[j],
+                                                "ptlrpc_hr%02d_%03d",
+                                                hrp->hrp_cpt,
+                                                hrt->hrt_id));
+                       if (IS_ERR_VALUE(rc))
                                break;
                }
-               cfs_wait_event(ptlrpc_hr.hr_waitq,
+               wait_event(ptlrpc_hr.hr_waitq,
                               cfs_atomic_read(&hrp->hrp_nstarted) == j);
-               if (rc >= 0)
+               if (!IS_ERR_VALUE(rc))
                        continue;
 
                CERROR("Reply handling thread %d:%d Failed on starting: "
@@ -2700,7 +2771,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
                thread_add_flags(thread, SVC_STOPPING);
        }
 
-       cfs_waitq_broadcast(&svcpt->scp_waitq);
+       wake_up_all(&svcpt->scp_waitq);
 
        while (!cfs_list_empty(&svcpt->scp_threads)) {
                thread = cfs_list_entry(svcpt->scp_threads.next,
@@ -2809,7 +2880,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
        if (thread == NULL)
                RETURN(-ENOMEM);
-       cfs_waitq_init(&thread->t_ctl_waitq);
+       init_waitqueue_head(&thread->t_ctl_waitq);
 
        spin_lock(&svcpt->scp_lock);
        if (!ptlrpc_threads_increasable(svcpt)) {
@@ -2827,7 +2898,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
                if (wait) {
                        CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
                               svc->srv_thread_name, svcpt->scp_thr_nextid);
-                       cfs_schedule();
+                       schedule();
                        goto again;
                }
 
@@ -2853,20 +2924,24 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        }
 
        CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
-       /*
-        * CLONE_VM and CLONE_FILES just avoid a needless copy, because we
-        * just drop the VM and FILES in cfs_daemonize_ctxt() right away.
-        */
-       rc = cfs_create_thread(ptlrpc_main, thread, CFS_DAEMON_FLAGS);
-       if (rc < 0) {
+       rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
+       if (IS_ERR_VALUE(rc)) {
                CERROR("cannot start thread '%s': rc %d\n",
                       thread->t_name, rc);
                spin_lock(&svcpt->scp_lock);
-               cfs_list_del(&thread->t_link);
                --svcpt->scp_nthrs_starting;
-               spin_unlock(&svcpt->scp_lock);
-
-                OBD_FREE(thread, sizeof(*thread));
+               if (thread_is_stopping(thread)) {
+                       /* this ptlrpc_thread is being hanled
+                        * by ptlrpc_svcpt_stop_threads now
+                        */
+                       thread_add_flags(thread, SVC_STOPPED);
+                       wake_up(&thread->t_ctl_waitq);
+                       spin_unlock(&svcpt->scp_lock);
+               } else {
+                       cfs_list_del(&thread->t_link);
+                       spin_unlock(&svcpt->scp_lock);
+                       OBD_FREE_PTR(thread);
+               }
                 RETURN(rc);
         }
 
@@ -2898,7 +2973,7 @@ int ptlrpc_hr_init(void)
        if (ptlrpc_hr.hr_partitions == NULL)
                RETURN(-ENOMEM);
 
-       cfs_waitq_init(&ptlrpc_hr.hr_waitq);
+       init_waitqueue_head(&ptlrpc_hr.hr_waitq);
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                hrp->hrp_cpt = i;
@@ -2920,7 +2995,7 @@ int ptlrpc_hr_init(void)
 
                        hrt->hrt_id = j;
                        hrt->hrt_partition = hrp;
-                       cfs_waitq_init(&hrt->hrt_waitq);
+                       init_waitqueue_head(&hrt->hrt_waitq);
                        spin_lock_init(&hrt->hrt_lock);
                        CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
                }
@@ -3077,19 +3152,12 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc)
 
                        cfs_list_del(&req->rq_list);
                        svcpt->scp_nreqs_incoming--;
-                       svcpt->scp_nreqs_active++;
                        ptlrpc_server_finish_request(svcpt, req);
                }
 
-               while (ptlrpc_server_request_pending(svcpt, 1)) {
-                       req = ptlrpc_server_request_get(svcpt, 1);
-                       cfs_list_del(&req->rq_list);
-                       svcpt->scp_nreqs_active++;
-                       ptlrpc_server_hpreq_fini(req);
-
-                       if (req->rq_export != NULL)
-                               class_export_rpc_put(req->rq_export);
-                       ptlrpc_server_finish_request(svcpt, req);
+               while (ptlrpc_server_request_pending(svcpt, true)) {
+                       req = ptlrpc_server_request_get(svcpt, true);
+                       ptlrpc_server_finish_active_request(svcpt, req);
                }
 
                LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted));
@@ -3166,17 +3234,19 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
 
        service->srv_is_stopping = 1;
 
-       spin_lock(&ptlrpc_all_services_lock);
+       mutex_lock(&ptlrpc_all_services_mutex);
        cfs_list_del_init(&service->srv_list);
-       spin_unlock(&ptlrpc_all_services_lock);
-
-       ptlrpc_lprocfs_unregister_service(service);
+       mutex_unlock(&ptlrpc_all_services_mutex);
 
        ptlrpc_service_del_atimer(service);
        ptlrpc_stop_all_threads(service);
 
        ptlrpc_service_unlink_rqbd(service);
        ptlrpc_service_purge_all(service);
+       ptlrpc_service_nrs_cleanup(service);
+
+       ptlrpc_lprocfs_unregister_service(service);
+
        ptlrpc_service_free(service);
 
        RETURN(0);
@@ -3191,27 +3261,24 @@ EXPORT_SYMBOL(ptlrpc_unregister_service);
  * to be shot, so it's intentionally non-aggressive. */
 int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
 {
-       struct ptlrpc_request           *request;
+       struct ptlrpc_request           *request = NULL;
        struct timeval                  right_now;
        long                            timediff;
 
-       cfs_gettimeofday(&right_now);
+       do_gettimeofday(&right_now);
 
        spin_lock(&svcpt->scp_req_lock);
-       if (!ptlrpc_server_request_pending(svcpt, 1)) {
+       /* How long has the next entry been waiting? */
+       if (ptlrpc_server_high_pending(svcpt, true))
+               request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
+       else if (ptlrpc_server_normal_pending(svcpt, true))
+               request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
+
+       if (request == NULL) {
                spin_unlock(&svcpt->scp_req_lock);
                return 0;
        }
 
-       /* How long has the next entry been waiting? */
-       if (cfs_list_empty(&svcpt->scp_req_pending)) {
-               request = cfs_list_entry(svcpt->scp_hreq_pending.next,
-                                        struct ptlrpc_request, rq_list);
-       } else {
-               request = cfs_list_entry(svcpt->scp_req_pending.next,
-                                        struct ptlrpc_request, rq_list);
-       }
-
        timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
        spin_unlock(&svcpt->scp_req_lock);
 
@@ -3231,7 +3298,7 @@ ptlrpc_service_health_check(struct ptlrpc_service *svc)
        struct ptlrpc_service_part      *svcpt;
        int                             i;
 
-       if (svc == NULL || svc->srv_parts == NULL)
+       if (svc == NULL)
                return 0;
 
        ptlrpc_service_for_each_part(svcpt, i, svc) {