Whamcloud - gitweb
LU-4181 lnet_selftest: bogus lst errors
[fs/lustre-release.git] / lnet / selftest / rpc.c
index 057a081..2845bf6 100644 (file)
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
  *
- * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
- *   Author: Isaac Huang <isaac@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/rpc.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ *
+ * 2012-05-13: Liang Zhen <liang@whamcloud.com>
+ * - percpt data for service to improve smp performance
+ * - code cleanup
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
 
-#include <libcfs/kp30.h>
-#include <libcfs/libcfs.h>
-#include <lnet/lib-lnet.h>
-
 #include "selftest.h"
 
-
 typedef enum {
         SRPC_STATE_NONE,
         SRPC_STATE_NI_INIT,
         SRPC_STATE_EQ_INIT,
-        SRPC_STATE_WI_INIT,
         SRPC_STATE_RUNNING,
         SRPC_STATE_STOPPING,
 } srpc_state_t;
 
-#define SRPC_PEER_HASH_SIZE       101  /* # peer lists */
-#define SRPC_PEER_CREDITS         16   /* >= most LND's default peer credit */
-
-struct smoketest_rpc {
-        spinlock_t        rpc_glock;     /* global lock */
-        srpc_service_t   *rpc_services[SRPC_SERVICE_MAX_ID + 1];
-        struct list_head *rpc_peers;     /* hash table of known peers */
-        lnet_handle_eq_t  rpc_lnet_eq;   /* _the_ LNet event queue */
-        srpc_state_t      rpc_state;
-        srpc_counters_t   rpc_counters;
-        __u64             rpc_matchbits; /* matchbits counter */
+static struct smoketest_rpc {
+       spinlock_t       rpc_glock;     /* global lock */
+       srpc_service_t  *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+       lnet_handle_eq_t rpc_lnet_eq;   /* _the_ LNet event queue */
+       srpc_state_t     rpc_state;
+       srpc_counters_t  rpc_counters;
+       __u64            rpc_matchbits; /* matchbits counter */
 } srpc_data;
 
+static inline int
+srpc_serv_portal(int svc_id)
+{
+       return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
+              SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
+}
+
 /* forward ref's */
 int srpc_handle_rpc (swi_workitem_t *wi);
 
 void srpc_get_counters (srpc_counters_t *cnt)
 {
-        spin_lock(&srpc_data.rpc_glock);
-        *cnt = srpc_data.rpc_counters;
-        spin_unlock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
+       *cnt = srpc_data.rpc_counters;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
 void srpc_set_counters (const srpc_counters_t *cnt)
 {
-        spin_lock(&srpc_data.rpc_glock);
-        srpc_data.rpc_counters = *cnt;
-        spin_unlock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
+       srpc_data.rpc_counters = *cnt;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
-void
-srpc_add_bulk_page (srpc_bulk_t *bk, cfs_page_t *pg, int i)
+static int
+srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
 {
-        LASSERT (i >= 0 && i < bk->bk_niov);
+       nob = min(nob, (int)PAGE_CACHE_SIZE);
+
+       LASSERT(nob > 0);
+       LASSERT(i >= 0 && i < bk->bk_niov);
 
 #ifdef __KERNEL__
-        bk->bk_iovs[i].kiov_offset = 0;
-        bk->bk_iovs[i].kiov_page   = pg;
-        bk->bk_iovs[i].kiov_len    = CFS_PAGE_SIZE;
+       bk->bk_iovs[i].kiov_offset = 0;
+       bk->bk_iovs[i].kiov_page   = pg;
+       bk->bk_iovs[i].kiov_len    = nob;
 #else
-        LASSERT (bk->bk_pages != NULL);
+       LASSERT(bk->bk_pages != NULL);
 
-        bk->bk_pages[i] = pg;
-        bk->bk_iovs[i].iov_len  = CFS_PAGE_SIZE;
-        bk->bk_iovs[i].iov_base = cfs_page_address(pg);
+       bk->bk_pages[i] = pg;
+       bk->bk_iovs[i].iov_len  = nob;
+       bk->bk_iovs[i].iov_base = page_address(pg);
 #endif
-        return;
+       return nob;
 }
 
 void
 srpc_free_bulk (srpc_bulk_t *bk)
 {
         int         i;
-        cfs_page_t *pg;
+       struct page *pg;
 
         LASSERT (bk != NULL);
 #ifndef __KERNEL__
@@ -92,263 +128,275 @@ srpc_free_bulk (srpc_bulk_t *bk)
 #endif
                 if (pg == NULL) break;
 
-                cfs_free_page(pg);
+               __free_page(pg);
         }
 
 #ifndef __KERNEL__
-        LIBCFS_FREE(bk->bk_pages, sizeof(cfs_page_t *) * bk->bk_niov);
+       LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov);
 #endif
         LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
         return;
 }
 
 srpc_bulk_t *
-srpc_alloc_bulk (int npages, int sink)
+srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
 {
-        srpc_bulk_t  *bk;
-        cfs_page_t  **pages;
-        int           i;
-
-        LASSERT (npages > 0 && npages <= LNET_MAX_IOV);
-
-        LIBCFS_ALLOC(bk, offsetof(srpc_bulk_t, bk_iovs[npages]));
-        if (bk == NULL) {
-                CERROR ("Can't allocate descriptor for %d pages\n", npages);
-                return NULL;
-        }
-
-        memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[npages]));
-        bk->bk_sink = sink;
-        bk->bk_niov = npages;
-        bk->bk_len  = npages * CFS_PAGE_SIZE;
+       srpc_bulk_t  *bk;
+       int           i;
+
+       LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
+
+       LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
+                        offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+       if (bk == NULL) {
+               CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
+               return NULL;
+       }
+
+       memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+       bk->bk_sink   = sink;
+       bk->bk_len    = bulk_len;
+       bk->bk_niov   = bulk_npg;
 #ifndef __KERNEL__
-        LIBCFS_ALLOC(pages, sizeof(cfs_page_t *) * npages);
-        if (pages == NULL) {
-                LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[npages]));
-                CERROR ("Can't allocate page array for %d pages\n", npages);
-                return NULL;
-        }
-
-        memset(pages, 0, sizeof(cfs_page_t *) * npages);
-        bk->bk_pages = pages;
-#else
-        UNUSED (pages);
+       {
+               struct page  **pages;
+
+               LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
+                                sizeof(struct page *) * bulk_npg);
+               if (pages == NULL) {
+                       LIBCFS_FREE(bk, offsetof(srpc_bulk_t,
+                                   bk_iovs[bulk_npg]));
+                       CERROR("Can't allocate page array for %d pages\n",
+                               bulk_npg);
+                       return NULL;
+               }
+
+               memset(pages, 0, sizeof(struct page *) * bulk_npg);
+               bk->bk_pages = pages;
+       }
 #endif
 
-        for (i = 0; i < npages; i++) {
-                cfs_page_t *pg = cfs_alloc_page(CFS_ALLOC_STD);
+       for (i = 0; i < bulk_npg; i++) {
+               struct page *pg;
+               int         nob;
 
-                if (pg == NULL) {
-                        CERROR ("Can't allocate page %d of %d\n", i, npages);
-                        srpc_free_bulk(bk);
-                        return NULL;
-                }
+               pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
+               if (pg == NULL) {
+                       CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
+                       srpc_free_bulk(bk);
+                       return NULL;
+               }
 
-                srpc_add_bulk_page(bk, pg, i);
-        }
+               nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+               bulk_len -= nob;
+       }
 
-        return bk;
+       return bk;
 }
 
-
-static inline struct list_head *
-srpc_nid2peerlist (lnet_nid_t nid)
+static inline __u64
+srpc_next_id (void)
 {
-        unsigned int hash = ((unsigned int)nid) % SRPC_PEER_HASH_SIZE;
+       __u64 id;
 
-        return &srpc_data.rpc_peers[hash];
+       spin_lock(&srpc_data.rpc_glock);
+       id = srpc_data.rpc_matchbits++;
+       spin_unlock(&srpc_data.rpc_glock);
+       return id;
 }
 
-static inline srpc_peer_t *
-srpc_create_peer (lnet_nid_t nid)
+static void
+srpc_init_server_rpc(struct srpc_server_rpc *rpc,
+                    struct srpc_service_cd *scd,
+                    struct srpc_buffer *buffer)
 {
-        srpc_peer_t *peer;
-
-        LASSERT (nid != LNET_NID_ANY);
-
-        LIBCFS_ALLOC(peer, sizeof(srpc_peer_t));
-        if (peer == NULL) {
-                CERROR ("Failed to allocate peer structure for %s\n",
-                        libcfs_nid2str(nid));
-                return NULL;
-        }
-
-        memset(peer, 0, sizeof(srpc_peer_t));
-        peer->stp_nid     = nid;
-        peer->stp_credits = SRPC_PEER_CREDITS;
-
-        spin_lock_init(&peer->stp_lock);
-        CFS_INIT_LIST_HEAD(&peer->stp_rpcq);
-        CFS_INIT_LIST_HEAD(&peer->stp_ctl_rpcq);
-        return peer;
+       memset(rpc, 0, sizeof(*rpc));
+       swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
+                         srpc_serv_is_framework(scd->scd_svc) ?
+                         lst_sched_serial : lst_sched_test[scd->scd_cpt]);
+
+       rpc->srpc_ev.ev_fired = 1; /* no event expected now */
+
+       rpc->srpc_scd      = scd;
+       rpc->srpc_reqstbuf = buffer;
+       rpc->srpc_peer     = buffer->buf_peer;
+       rpc->srpc_self     = buffer->buf_self;
+       LNetInvalidateHandle(&rpc->srpc_replymdh);
 }
 
-srpc_peer_t *
-srpc_find_peer_locked (lnet_nid_t nid)
+static void
+srpc_service_fini(struct srpc_service *svc)
 {
-        struct list_head *peer_list = srpc_nid2peerlist(nid);
-        srpc_peer_t      *peer;
-
-        LASSERT (nid != LNET_NID_ANY);
-
-        list_for_each_entry (peer, peer_list, stp_list) {
-                if (peer->stp_nid == nid)
-                        return peer;
-        }
-
-        return NULL;
+       struct srpc_service_cd  *scd;
+       struct srpc_server_rpc  *rpc;
+       struct srpc_buffer      *buf;
+       struct list_head                *q;
+       int                     i;
+
+       if (svc->sv_cpt_data == NULL)
+               return;
+
+       cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
+               while (1) {
+                       if (!list_empty(&scd->scd_buf_posted))
+                               q = &scd->scd_buf_posted;
+                       else if (!list_empty(&scd->scd_buf_blocked))
+                               q = &scd->scd_buf_blocked;
+                       else
+                               break;
+
+                       while (!list_empty(q)) {
+                               buf = list_entry(q->next,
+                                                    struct srpc_buffer,
+                                                    buf_list);
+                               list_del(&buf->buf_list);
+                               LIBCFS_FREE(buf, sizeof(*buf));
+                       }
+               }
+
+               LASSERT(list_empty(&scd->scd_rpc_active));
+
+               while (!list_empty(&scd->scd_rpc_free)) {
+                       rpc = list_entry(scd->scd_rpc_free.next,
+                                            struct srpc_server_rpc,
+                                            srpc_list);
+                       list_del(&rpc->srpc_list);
+                       LIBCFS_FREE(rpc, sizeof(*rpc));
+               }
+       }
+
+       cfs_percpt_free(svc->sv_cpt_data);
+       svc->sv_cpt_data = NULL;
 }
 
-static srpc_peer_t *
-srpc_nid2peer (lnet_nid_t nid)
+static int
+srpc_service_nrpcs(struct srpc_service *svc)
 {
-       srpc_peer_t *peer;
-       srpc_peer_t *new_peer;
+       int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
 
-        spin_lock(&srpc_data.rpc_glock);
-        peer = srpc_find_peer_locked(nid);
-        spin_unlock(&srpc_data.rpc_glock);
-
-        if (peer != NULL)
-                return peer;
-        
-        new_peer = srpc_create_peer(nid);
-
-        spin_lock(&srpc_data.rpc_glock);
-
-        peer = srpc_find_peer_locked(nid);
-        if (peer != NULL) {
-                spin_unlock(&srpc_data.rpc_glock);
-                if (new_peer != NULL)
-                        LIBCFS_FREE(new_peer, sizeof(srpc_peer_t));
-
-                return peer;
-        }
-
-        if (new_peer == NULL) {
-                spin_unlock(&srpc_data.rpc_glock);
-                return NULL;
-        }
-                
-        list_add_tail(&new_peer->stp_list, srpc_nid2peerlist(nid));
-        spin_unlock(&srpc_data.rpc_glock);
-        return new_peer;
+       return srpc_serv_is_framework(svc) ?
+              max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
 }
 
-static inline __u64
-srpc_next_id (void)
-{
-        __u64 id;
-
-        spin_lock(&srpc_data.rpc_glock);
-        id = srpc_data.rpc_matchbits++;
-        spin_unlock(&srpc_data.rpc_glock);
-        return id;
-}
+int srpc_add_buffer(struct swi_workitem *wi);
 
-void
-srpc_init_server_rpc (srpc_server_rpc_t *rpc,
-                      srpc_service_t *sv, srpc_buffer_t *buffer)
+static int
+srpc_service_init(struct srpc_service *svc)
 {
-        memset(rpc, 0, sizeof(*rpc));
-        swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc);
-
-        rpc->srpc_ev.ev_fired = 1; /* no event expected now */
-
-        rpc->srpc_service  = sv;
-        rpc->srpc_reqstbuf = buffer;
-        rpc->srpc_peer     = buffer->buf_peer;
-        rpc->srpc_self     = buffer->buf_self;
-        rpc->srpc_replymdh = LNET_INVALID_HANDLE;
+       struct srpc_service_cd  *scd;
+       struct srpc_server_rpc  *rpc;
+       int                     nrpcs;
+       int                     i;
+       int                     j;
+
+       svc->sv_shuttingdown = 0;
+
+       svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
+                                           sizeof(struct srpc_service_cd));
+       if (svc->sv_cpt_data == NULL)
+               return -ENOMEM;
+
+       svc->sv_ncpts = srpc_serv_is_framework(svc) ?
+                       1 : cfs_cpt_number(lnet_cpt_table());
+       nrpcs = srpc_service_nrpcs(svc);
+
+       cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
+               scd->scd_cpt = i;
+               scd->scd_svc = svc;
+               spin_lock_init(&scd->scd_lock);
+               INIT_LIST_HEAD(&scd->scd_rpc_free);
+               INIT_LIST_HEAD(&scd->scd_rpc_active);
+               INIT_LIST_HEAD(&scd->scd_buf_posted);
+               INIT_LIST_HEAD(&scd->scd_buf_blocked);
+
+               scd->scd_ev.ev_data = scd;
+               scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
+
+               /* NB: don't use lst_sched_serial for adding buffer,
+                * see details in srpc_service_add_buffers() */
+               swi_init_workitem(&scd->scd_buf_wi, scd,
+                                 srpc_add_buffer, lst_sched_test[i]);
+
+               if (i != 0 && srpc_serv_is_framework(svc)) {
+                       /* NB: framework service only needs srpc_service_cd for
+                        * one partition, but we allocate for all to make
+                        * it easier to implement, it will waste a little
+                        * memory but nobody should care about this */
+                       continue;
+               }
+
+               for (j = 0; j < nrpcs; j++) {
+                       LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
+                                        i, sizeof(*rpc));
+                       if (rpc == NULL) {
+                               srpc_service_fini(svc);
+                               return -ENOMEM;
+                       }
+                       list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+               }
+       }
+
+       return 0;
 }
 
 int
-srpc_add_service (srpc_service_t *sv)
+srpc_add_service(struct srpc_service *sv)
 {
-        int                id = sv->sv_id;
-        int                i;
-        srpc_server_rpc_t *rpc;
+       int id = sv->sv_id;
 
-        LASSERT (sv->sv_concur > 0);
-        LASSERT (0 <= id && id <= SRPC_SERVICE_MAX_ID);
+       LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
 
-        spin_lock(&srpc_data.rpc_glock);
+       if (srpc_service_init(sv) != 0)
+               return -ENOMEM;
 
-        LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
-        if (srpc_data.rpc_services[id] != NULL) {
-                spin_unlock(&srpc_data.rpc_glock);
-                return -EBUSY;
-        }
-
-        srpc_data.rpc_services[id] = sv;
-        spin_unlock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
 
-        sv->sv_nprune       = 0;
-        sv->sv_nposted_msg  = 0;
-        sv->sv_shuttingdown = 0;
-        spin_lock_init(&sv->sv_lock);
-        CFS_INIT_LIST_HEAD(&sv->sv_free_rpcq);
-        CFS_INIT_LIST_HEAD(&sv->sv_active_rpcq);
-        CFS_INIT_LIST_HEAD(&sv->sv_posted_msgq);
-        CFS_INIT_LIST_HEAD(&sv->sv_blocked_msgq);
+       LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
 
-        sv->sv_ev.ev_data = sv;
-        sv->sv_ev.ev_type = SRPC_REQUEST_RCVD;
+       if (srpc_data.rpc_services[id] != NULL) {
+               spin_unlock(&srpc_data.rpc_glock);
+               goto failed;
+       }
 
-        for (i = 0; i < sv->sv_concur; i++) {
-                LIBCFS_ALLOC(rpc, sizeof(*rpc));
-                if (rpc == NULL) goto enomem;
-
-                list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
-        }
-
-        CDEBUG (D_NET, "Adding service: id %d, name %s, concurrency %d\n",
-                id, sv->sv_name, sv->sv_concur);
-        return 0;
+       srpc_data.rpc_services[id] = sv;
+       spin_unlock(&srpc_data.rpc_glock);
 
-enomem:
-        while (!list_empty(&sv->sv_free_rpcq)) {
-                rpc = list_entry(sv->sv_free_rpcq.next,
-                                 srpc_server_rpc_t, srpc_list);
-                list_del(&rpc->srpc_list);
-                LIBCFS_FREE(rpc, sizeof(*rpc));
-        }
+       CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
+       return 0;
 
-        spin_lock(&srpc_data.rpc_glock);
-        srpc_data.rpc_services[id] = NULL;
-        spin_unlock(&srpc_data.rpc_glock);
-        return -ENOMEM;
+ failed:
+       srpc_service_fini(sv);
+       return -EBUSY;
 }
 
 int
 srpc_remove_service (srpc_service_t *sv)
 {
-        int id = sv->sv_id;
+       int id = sv->sv_id;
 
-        spin_lock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
 
-        if (srpc_data.rpc_services[id] != sv) {
-                spin_unlock(&srpc_data.rpc_glock);
-                return -ENOENT;
-        }
+       if (srpc_data.rpc_services[id] != sv) {
+               spin_unlock(&srpc_data.rpc_glock);
+               return -ENOENT;
+       }
 
-        srpc_data.rpc_services[id] = NULL;
-        spin_unlock(&srpc_data.rpc_glock);
-        return 0;
+       srpc_data.rpc_services[id] = NULL;
+       spin_unlock(&srpc_data.rpc_glock);
+       return 0;
 }
 
-int
-srpc_post_passive_rdma(int portal, __u64 matchbits, void *buf,
-                       int len, int options, lnet_process_id_t peer,
-                       lnet_handle_md_t *mdh, srpc_event_t *ev)
+static int
+srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
+                      int len, int options, lnet_process_id_t peer,
+                      lnet_handle_md_t *mdh, srpc_event_t *ev)
 {
-        int              rc;
-        lnet_md_t        md;
-        lnet_handle_me_t meh;
+       int              rc;
+       lnet_md_t        md;
+       lnet_handle_me_t meh;
 
-        rc = LNetMEAttach(portal, peer, matchbits, 0,
-                          LNET_UNLINK, LNET_INS_AFTER, &meh);
+       rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
+                         local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
         if (rc != 0) {
                 CERROR ("LNetMEAttach failed: %d\n", rc);
                 LASSERT (rc == -ENOMEM);
@@ -378,8 +426,8 @@ srpc_post_passive_rdma(int portal, __u64 matchbits, void *buf,
         return 0;
 }
 
-int
-srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, 
+static int
+srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
                       int options, lnet_process_id_t peer, lnet_nid_t self,
                       lnet_handle_md_t *mdh, srpc_event_t *ev)
 {
@@ -403,11 +451,11 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
         /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
          * they're only meaningful for MDs attached to an ME (i.e. passive
          * buffers... */
-       if ((options & LNET_MD_OP_PUT) != 0) {
+        if ((options & LNET_MD_OP_PUT) != 0) {
                 rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
                              portal, matchbits, 0, 0);
         } else {
-               LASSERT ((options & LNET_MD_OP_GET) != 0);
+                LASSERT ((options & LNET_MD_OP_GET) != 0);
 
                 rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
         }
@@ -430,244 +478,353 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
         return 0;
 }
 
-int
+static int
 srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
-                        int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
+                       int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
 {
-        int rc;
-        int portal;
-
-        if (service > SRPC_FRAMEWORK_SERVICE_MAX_ID)
-                portal = SRPC_REQUEST_PORTAL;
-        else
-                portal = SRPC_FRAMEWORK_REQUEST_PORTAL;
-
-        rc = srpc_post_active_rdma(portal, service, buf, len, 
-                                   LNET_MD_OP_PUT, peer,
-                                   LNET_NID_ANY, mdh, ev);
-        return rc;
+       return srpc_post_active_rdma(srpc_serv_portal(service), service,
+                                    buf, len, LNET_MD_OP_PUT, peer,
+                                    LNET_NID_ANY, mdh, ev);
 }
 
-int
-srpc_post_passive_rqtbuf(int service, void *buf, int len,
-                         lnet_handle_md_t *mdh, srpc_event_t *ev)
+static int
+srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
+                        lnet_handle_md_t *mdh, srpc_event_t *ev)
 {
-        int               rc;
-        int               portal;
-        lnet_process_id_t any = {.nid = LNET_NID_ANY,
-                                 .pid = LNET_PID_ANY};
+       lnet_process_id_t any = {0};
 
-        if (service > SRPC_FRAMEWORK_SERVICE_MAX_ID)
-                portal = SRPC_REQUEST_PORTAL;
-        else
-                portal = SRPC_FRAMEWORK_REQUEST_PORTAL;
+       any.nid = LNET_NID_ANY;
+       any.pid = LNET_PID_ANY;
 
-        rc = srpc_post_passive_rdma(portal, service, buf, len,
-                                    LNET_MD_OP_PUT, any, mdh, ev);
-        return rc;
+       return srpc_post_passive_rdma(srpc_serv_portal(service),
+                                     local, service, buf, len,
+                                     LNET_MD_OP_PUT, any, mdh, ev);
 }
 
-int
-srpc_service_post_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
+static int
+srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+__must_hold(&scd->scd_lock)
 {
-        srpc_msg_t *msg = &buf->buf_msg;
-        int         rc;
+       struct srpc_service     *sv = scd->scd_svc;
+       struct srpc_msg         *msg = &buf->buf_msg;
+       int                     rc;
 
-        LASSERT (!sv->sv_shuttingdown);
+       LNetInvalidateHandle(&buf->buf_mdh);
+       list_add(&buf->buf_list, &scd->scd_buf_posted);
+       scd->scd_buf_nposted++;
+       spin_unlock(&scd->scd_lock);
 
-        buf->buf_mdh = LNET_INVALID_HANDLE;
-        list_add(&buf->buf_list, &sv->sv_posted_msgq);
-        sv->sv_nposted_msg++;
-        spin_unlock(&sv->sv_lock);
+       rc = srpc_post_passive_rqtbuf(sv->sv_id,
+                                     !srpc_serv_is_framework(sv),
+                                     msg, sizeof(*msg), &buf->buf_mdh,
+                                     &scd->scd_ev);
 
-        rc = srpc_post_passive_rqtbuf(sv->sv_id, msg, sizeof(*msg),
-                                      &buf->buf_mdh, &sv->sv_ev);
+       /* At this point, a RPC (new or delayed) may have arrived in
+        * msg and its event handler has been called. So we must add
+        * buf to scd_buf_posted _before_ dropping scd_lock */
 
-        /* At this point, a RPC (new or delayed) may have arrived in
-         * msg and its event handler has been called. So we must add
-         * buf to sv_posted_msgq _before_ dropping sv_lock */
+       spin_lock(&scd->scd_lock);
 
-        spin_lock(&sv->sv_lock);
+       if (rc == 0) {
+               if (!sv->sv_shuttingdown)
+                       return 0;
 
-        if (rc == 0) {
-                if (sv->sv_shuttingdown) {
-                        spin_unlock(&sv->sv_lock);
+               spin_unlock(&scd->scd_lock);
+               /* srpc_shutdown_service might have tried to unlink me
+                * when my buf_mdh was still invalid */
+               LNetMDUnlink(buf->buf_mdh);
+               spin_lock(&scd->scd_lock);
+               return 0;
+       }
 
-                        /* srpc_shutdown_service might have tried to unlink me
-                         * when my buf_mdh was still invalid */
-                        LNetMDUnlink(buf->buf_mdh);
-
-                        spin_lock(&sv->sv_lock);
-                }
-                return 0;
-        }
+       scd->scd_buf_nposted--;
+       if (sv->sv_shuttingdown)
+               return rc; /* don't allow to change scd_buf_posted */
 
-        sv->sv_nposted_msg--;
-        if (sv->sv_shuttingdown) return rc;
+       list_del(&buf->buf_list);
+       spin_unlock(&scd->scd_lock);
 
-        list_del(&buf->buf_list);
+       LIBCFS_FREE(buf, sizeof(*buf));
 
-        spin_unlock(&sv->sv_lock);
-        LIBCFS_FREE(buf, sizeof(*buf));
-        spin_lock(&sv->sv_lock);
-        return rc; 
+       spin_lock(&scd->scd_lock);
+       return rc;
 }
 
 int
-srpc_service_add_buffers (srpc_service_t *sv, int nbuffer)
+srpc_add_buffer(struct swi_workitem *wi)
 {
-        int                rc;
-        int                posted;
-        srpc_buffer_t     *buf;
-
-        LASSERTF (nbuffer > 0,
-                  "nbuffer must be positive: %d\n", nbuffer);
-
-        for (posted = 0; posted < nbuffer; posted++) {
-                LIBCFS_ALLOC(buf, sizeof(*buf));
-                if (buf == NULL) break;
-
-                spin_lock(&sv->sv_lock);
-                rc = srpc_service_post_buffer(sv, buf);
-                spin_unlock(&sv->sv_lock);
-
-                if (rc != 0) break;
-        }
+       struct srpc_service_cd  *scd = wi->swi_workitem.wi_data;
+       struct srpc_buffer      *buf;
+       int                     rc = 0;
+
+       /* it's called by workitem scheduler threads, these threads
+        * should have been set CPT affinity, so buffers will be posted
+        * on CPT local list of Portal */
+       spin_lock(&scd->scd_lock);
+
+       while (scd->scd_buf_adjust > 0 &&
+              !scd->scd_svc->sv_shuttingdown) {
+               scd->scd_buf_adjust--; /* consume it */
+               scd->scd_buf_posting++;
+
+               spin_unlock(&scd->scd_lock);
+
+               LIBCFS_ALLOC(buf, sizeof(*buf));
+               if (buf == NULL) {
+                       CERROR("Failed to add new buf to service: %s\n",
+                              scd->scd_svc->sv_name);
+                       spin_lock(&scd->scd_lock);
+                       rc = -ENOMEM;
+                       break;
+               }
+
+               spin_lock(&scd->scd_lock);
+               if (scd->scd_svc->sv_shuttingdown) {
+                       spin_unlock(&scd->scd_lock);
+                       LIBCFS_FREE(buf, sizeof(*buf));
+
+                       spin_lock(&scd->scd_lock);
+                       rc = -ESHUTDOWN;
+                       break;
+               }
+
+               rc = srpc_service_post_buffer(scd, buf);
+               if (rc != 0)
+                       break; /* buf has been freed inside */
+
+               LASSERT(scd->scd_buf_posting > 0);
+               scd->scd_buf_posting--;
+               scd->scd_buf_total++;
+               scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
+       }
+
+       if (rc != 0) {
+               scd->scd_buf_err_stamp = cfs_time_current_sec();
+               scd->scd_buf_err = rc;
+
+               LASSERT(scd->scd_buf_posting > 0);
+               scd->scd_buf_posting--;
+       }
+
+       spin_unlock(&scd->scd_lock);
+       return 0;
+}
 
-        return posted;
+int
+srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
+{
+       struct srpc_service_cd  *scd;
+       int                     rc = 0;
+       int                     i;
+
+       LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
+
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
+
+               scd->scd_buf_err = 0;
+               scd->scd_buf_err_stamp = 0;
+               scd->scd_buf_posting = 0;
+               scd->scd_buf_adjust = nbuffer;
+               /* start to post buffers */
+               swi_schedule_workitem(&scd->scd_buf_wi);
+               spin_unlock(&scd->scd_lock);
+
+               /* framework service only post buffer for one partition  */
+               if (srpc_serv_is_framework(sv))
+                       break;
+       }
+
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
+               /*
+                * NB: srpc_service_add_buffers() can be called inside
+                * thread context of lst_sched_serial, and we don't normally
+                * allow to sleep inside thread context of WI scheduler
+                * because it will block current scheduler thread from doing
+                * anything else, even worse, it could deadlock if it's
+                * waiting on result from another WI of the same scheduler.
+                * However, it's safe at here because scd_buf_wi is scheduled
+                * by thread in a different WI scheduler (lst_sched_test),
+                * so we don't have any risk of deadlock, though this could
+                * block all WIs pending on lst_sched_serial for a moment
+                * which is not good but not fatal.
+                */
+               lst_wait_until(scd->scd_buf_err != 0 ||
+                              (scd->scd_buf_adjust == 0 &&
+                               scd->scd_buf_posting == 0),
+                              scd->scd_lock, "waiting for adding buffer\n");
+
+               if (scd->scd_buf_err != 0 && rc == 0)
+                       rc = scd->scd_buf_err;
+
+               spin_unlock(&scd->scd_lock);
+       }
+
+       return rc;
 }
 
 void
-srpc_service_remove_buffers (srpc_service_t *sv, int nbuffer)
+srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
 {
-        LASSERTF (nbuffer > 0,
-                  "nbuffer must be positive: %d\n", nbuffer);
+       struct srpc_service_cd  *scd;
+       int                     num;
+       int                     i;
 
-        spin_lock(&sv->sv_lock);
+       LASSERT(!sv->sv_shuttingdown);
 
-        LASSERT (sv->sv_nprune >= 0);
-        LASSERT (!sv->sv_shuttingdown);
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
 
-        sv->sv_nprune += nbuffer;
+               num = scd->scd_buf_total + scd->scd_buf_posting;
+               scd->scd_buf_adjust -= min(nbuffer, num);
 
-        spin_unlock(&sv->sv_lock);
-        return;
+               spin_unlock(&scd->scd_lock);
+       }
 }
 
 /* returns 1 if sv has finished, otherwise 0 */
 int
-srpc_finish_service (srpc_service_t *sv)
+srpc_finish_service(struct srpc_service *sv)
 {
-        srpc_server_rpc_t *rpc;
-        srpc_buffer_t     *buf;
-
-        spin_lock(&sv->sv_lock);
-
-        LASSERT (sv->sv_shuttingdown); /* srpc_shutdown_service called */
-
-        if (sv->sv_nposted_msg != 0 || !list_empty(&sv->sv_active_rpcq)) {
-                CDEBUG (D_NET,
-                        "waiting for %d posted buffers to unlink and "
-                        "in-flight RPCs to die.\n",
-                        sv->sv_nposted_msg);
-
-                if (!list_empty(&sv->sv_active_rpcq)) {
-                        rpc = list_entry(sv->sv_active_rpcq.next,
-                                         srpc_server_rpc_t, srpc_list);
-                        CDEBUG (D_NETERROR,
-                                "Active RPC on shutdown: sv %s, peer %s, "
-                                "wi %s scheduled %d running %d, "
-                                "ev fired %d type %d status %d lnet %d\n",
-                                sv->sv_name, libcfs_id2str(rpc->srpc_peer),
-                                swi_state2str(rpc->srpc_wi.wi_state),
-                                rpc->srpc_wi.wi_scheduled,
-                                rpc->srpc_wi.wi_running,
-                                rpc->srpc_ev.ev_fired,
-                                rpc->srpc_ev.ev_type,
-                                rpc->srpc_ev.ev_status,
-                                rpc->srpc_ev.ev_lnet);
-                }
-
-                spin_unlock(&sv->sv_lock);
-                return 0;
-        }
-
-        spin_unlock(&sv->sv_lock); /* no lock needed from now on */
-
-        for (;;) {
-                struct list_head *q;
-
-                if (!list_empty(&sv->sv_posted_msgq))
-                        q = &sv->sv_posted_msgq;
-                else if (!list_empty(&sv->sv_blocked_msgq))
-                        q = &sv->sv_blocked_msgq;
-                else
-                        break;
-
-                buf = list_entry(q->next, srpc_buffer_t, buf_list);
-                list_del(&buf->buf_list);
-
-                LIBCFS_FREE(buf, sizeof(*buf));
-        }
-
-        while (!list_empty(&sv->sv_free_rpcq)) {
-                rpc = list_entry(sv->sv_free_rpcq.next,
-                                 srpc_server_rpc_t, srpc_list);
-                list_del(&rpc->srpc_list);
-                LIBCFS_FREE(rpc, sizeof(*rpc));
-        }
-
-        return 1;
+       struct srpc_service_cd  *scd;
+       struct srpc_server_rpc  *rpc;
+       int                     i;
+
+       LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
+
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
+               if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
+                       spin_unlock(&scd->scd_lock);
+                       return 0;
+               }
+
+               if (scd->scd_buf_nposted > 0) {
+                       CDEBUG(D_NET, "waiting for %d posted buffers to "
+                              "unlink\n", scd->scd_buf_nposted);
+                       spin_unlock(&scd->scd_lock);
+                       return 0;
+               }
+
+               if (list_empty(&scd->scd_rpc_active)) {
+                       spin_unlock(&scd->scd_lock);
+                       continue;
+               }
+
+               rpc = list_entry(scd->scd_rpc_active.next,
+                                    struct srpc_server_rpc, srpc_list);
+               CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
+                       "wi %s scheduled %d running %d, "
+                       "ev fired %d type %d status %d lnet %d\n",
+                       rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+                       swi_state2str(rpc->srpc_wi.swi_state),
+                       rpc->srpc_wi.swi_workitem.wi_scheduled,
+                       rpc->srpc_wi.swi_workitem.wi_running,
+                       rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
+                       rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
+               spin_unlock(&scd->scd_lock);
+               return 0;
+       }
+
+       /* no lock needed from now on */
+       srpc_service_fini(sv);
+       return 1;
 }
 
 /* called with sv->sv_lock held */
-void
-srpc_service_recycle_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
+static void
+srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+__must_hold(&scd->scd_lock)
 {
-        if (sv->sv_shuttingdown) goto free;
-
-        if (sv->sv_nprune == 0) {
-                if (srpc_service_post_buffer(sv, buf) != 0)
-                        CWARN ("Failed to post %s buffer\n", sv->sv_name);
-                return;
-        }
+       if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
+               if (srpc_service_post_buffer(scd, buf) != 0) {
+                       CWARN("Failed to post %s buffer\n",
+                             scd->scd_svc->sv_name);
+               }
+               return;
+       }
+
+       /* service is shutting down, or we want to recycle some buffers */
+       scd->scd_buf_total--;
+
+       if (scd->scd_buf_adjust < 0) {
+               scd->scd_buf_adjust++;
+               if (scd->scd_buf_adjust < 0 &&
+                   scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
+                       CDEBUG(D_INFO,
+                              "Try to recyle %d buffers but nothing left\n",
+                              scd->scd_buf_adjust);
+                       scd->scd_buf_adjust = 0;
+               }
+       }
+
+       spin_unlock(&scd->scd_lock);
+       LIBCFS_FREE(buf, sizeof(*buf));
+       spin_lock(&scd->scd_lock);
+}
 
-        sv->sv_nprune--;
-free:
-        spin_unlock(&sv->sv_lock);
-        LIBCFS_FREE(buf, sizeof(*buf));
-        spin_lock(&sv->sv_lock);
+void
+srpc_abort_service(struct srpc_service *sv)
+{
+       struct srpc_service_cd  *scd;
+       struct srpc_server_rpc  *rpc;
+       int                     i;
+
+       CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
+              sv->sv_id, sv->sv_name);
+
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
+
+               /* schedule in-flight RPCs to notice the abort, NB:
+                * racing with incoming RPCs; complete fix should make test
+                * RPCs carry session ID in its headers */
+               list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
+                       rpc->srpc_aborted = 1;
+                       swi_schedule_workitem(&rpc->srpc_wi);
+               }
+
+               spin_unlock(&scd->scd_lock);
+       }
 }
 
 void
-srpc_shutdown_service (srpc_service_t *sv)
+srpc_shutdown_service(srpc_service_t *sv)
 {
-        srpc_server_rpc_t *rpc;
-        srpc_buffer_t     *buf;
+       struct srpc_service_cd  *scd;
+       struct srpc_server_rpc  *rpc;
+       srpc_buffer_t           *buf;
+       int                     i;
 
-        spin_lock(&sv->sv_lock);
+       CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
+              sv->sv_id, sv->sv_name);
 
-        CDEBUG (D_NET, "Shutting down service: id %d, name %s\n",
-                sv->sv_id, sv->sv_name);
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
+               spin_lock(&scd->scd_lock);
 
-        sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
+       sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
 
-        /* schedule in-flight RPCs to notice the shutdown */
-        list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
-                swi_schedule_workitem(&rpc->srpc_wi);
-        }
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
+               spin_unlock(&scd->scd_lock);
 
-        spin_unlock(&sv->sv_lock);
+       cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+               spin_lock(&scd->scd_lock);
 
-        /* OK to traverse sv_posted_msgq without lock, since no one
-         * touches sv_posted_msgq now */
-        list_for_each_entry (buf, &sv->sv_posted_msgq, buf_list)
-                LNetMDUnlink(buf->buf_mdh);
+               /* schedule in-flight RPCs to notice the shutdown */
+               list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
+                       swi_schedule_workitem(&rpc->srpc_wi);
 
-        return;
+               spin_unlock(&scd->scd_lock);
+
+               /* OK to traverse scd_buf_posted without lock, since no one
+                * touches scd_buf_posted now */
+               list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
+                       LNetMDUnlink(buf->buf_mdh);
+       }
 }
 
-int
+static int
 srpc_send_request (srpc_client_rpc_t *rpc)
 {
         srpc_event_t *ev = &rpc->crpc_reqstev;
@@ -687,7 +844,7 @@ srpc_send_request (srpc_client_rpc_t *rpc)
         return rc;
 }
 
-int
+static int
 srpc_prepare_reply (srpc_client_rpc_t *rpc)
 {
         srpc_event_t *ev = &rpc->crpc_replyev;
@@ -700,7 +857,7 @@ srpc_prepare_reply (srpc_client_rpc_t *rpc)
 
         *id = srpc_next_id();
 
-        rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, *id,
+       rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
                                     &rpc->crpc_replymsg, sizeof(srpc_msg_t),
                                     LNET_MD_OP_PUT, rpc->crpc_dest,
                                     &rpc->crpc_replymdh, ev);
@@ -711,7 +868,7 @@ srpc_prepare_reply (srpc_client_rpc_t *rpc)
         return rc;
 }
 
-int
+static int
 srpc_prepare_bulk (srpc_client_rpc_t *rpc)
 {
         srpc_bulk_t  *bk = &rpc->crpc_bulk;
@@ -737,7 +894,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc)
 
         *id = srpc_next_id();
 
-        rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, *id,
+       rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
                                     &bk->bk_iovs[0], bk->bk_niov, opt,
                                     rpc->crpc_dest, &bk->bk_mdh, ev);
         if (rc != 0) {
@@ -747,7 +904,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc)
         return rc;
 }
 
-int
+static int
 srpc_do_bulk (srpc_server_rpc_t *rpc)
 {
         srpc_event_t  *ev = &rpc->srpc_ev;
@@ -778,97 +935,85 @@ srpc_do_bulk (srpc_server_rpc_t *rpc)
         return rc;
 }
 
-/* called with srpc_service_t::sv_lock held */
-inline void
-srpc_schedule_server_rpc (srpc_server_rpc_t *rpc)
-{
-        srpc_service_t *sv = rpc->srpc_service;
-
-        if (sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID)
-                swi_schedule_workitem(&rpc->srpc_wi);
-        else    /* framework RPCs are handled one by one */
-                swi_schedule_serial_workitem(&rpc->srpc_wi);
-
-        return;
-}
-
 /* only called from srpc_handle_rpc */
-void
-srpc_server_rpc_done (srpc_server_rpc_t *rpc, int status)
+static void
+srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
 {
-        srpc_service_t *sv = rpc->srpc_service;
-        srpc_buffer_t  *buffer;
+       struct srpc_service_cd  *scd = rpc->srpc_scd;
+       struct srpc_service     *sv  = scd->scd_svc;
+       srpc_buffer_t           *buffer;
 
-        LASSERT (status != 0 || rpc->srpc_wi.wi_state == SWI_STATE_DONE);
+        LASSERT (status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
 
         rpc->srpc_status = status;
 
-        CDEBUG (status == 0 ? D_NET : D_NETERROR,
-                "Server RPC done: service %s, peer %s, status %s:%d\n",
-                sv->sv_name, libcfs_id2str(rpc->srpc_peer),
-                swi_state2str(rpc->srpc_wi.wi_state), status);
+        CDEBUG_LIMIT (status == 0 ? D_NET : D_NETERROR,
+                "Server RPC %p done: service %s, peer %s, status %s:%d\n",
+                rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+                swi_state2str(rpc->srpc_wi.swi_state), status);
 
         if (status != 0) {
-                spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.rpcs_dropped++;
-                spin_unlock(&srpc_data.rpc_glock);
-        }
-
-        if (rpc->srpc_done != NULL)
-                (*rpc->srpc_done) (rpc);
-        LASSERT (rpc->srpc_bulk == NULL);
-
-        spin_lock(&sv->sv_lock);
-
-        if (rpc->srpc_reqstbuf != NULL) {
-                /* NB might drop sv_lock in srpc_service_recycle_buffer, but
-                 * sv won't go away for sv_active_rpcq must not be empty */
-                srpc_service_recycle_buffer(sv, rpc->srpc_reqstbuf);
-                rpc->srpc_reqstbuf = NULL;
-        }
-
-        list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
-
-        /*
-         * No one can schedule me now since:
-         * - I'm not on sv_active_rpcq.
-         * - all LNet events have been fired.
-         * Cancel pending schedules and prevent future schedule attempts:
-         */
-        LASSERT (rpc->srpc_ev.ev_fired);
-        swi_kill_workitem(&rpc->srpc_wi);
-
-        if (!sv->sv_shuttingdown && !list_empty(&sv->sv_blocked_msgq)) {
-                buffer = list_entry(sv->sv_blocked_msgq.next,
-                                    srpc_buffer_t, buf_list);
-                list_del(&buffer->buf_list);
-
-                srpc_init_server_rpc(rpc, sv, buffer);
-                list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
-                srpc_schedule_server_rpc(rpc);
-        } else {
-                list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
-        }
-
-        spin_unlock(&sv->sv_lock);
-        return;
+               spin_lock(&srpc_data.rpc_glock);
+               srpc_data.rpc_counters.rpcs_dropped++;
+               spin_unlock(&srpc_data.rpc_glock);
+       }
+
+       if (rpc->srpc_done != NULL)
+               (*rpc->srpc_done) (rpc);
+       LASSERT(rpc->srpc_bulk == NULL);
+
+       spin_lock(&scd->scd_lock);
+
+       if (rpc->srpc_reqstbuf != NULL) {
+               /* NB might drop sv_lock in srpc_service_recycle_buffer, but
+                * sv won't go away for scd_rpc_active must not be empty */
+               srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
+               rpc->srpc_reqstbuf = NULL;
+       }
+
+       list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
+
+       /*
+        * No one can schedule me now since:
+        * - I'm not on scd_rpc_active.
+        * - all LNet events have been fired.
+        * Cancel pending schedules and prevent future schedule attempts:
+        */
+       LASSERT(rpc->srpc_ev.ev_fired);
+       swi_exit_workitem(&rpc->srpc_wi);
+
+       if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
+               buffer = list_entry(scd->scd_buf_blocked.next,
+                                       srpc_buffer_t, buf_list);
+               list_del(&buffer->buf_list);
+
+               srpc_init_server_rpc(rpc, scd, buffer);
+               list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
+               swi_schedule_workitem(&rpc->srpc_wi);
+       } else {
+               list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+       }
+
+       spin_unlock(&scd->scd_lock);
+       return;
 }
 
 /* handles an incoming RPC */
 int
-srpc_handle_rpc (swi_workitem_t *wi)
+srpc_handle_rpc(swi_workitem_t *wi)
 {
-        srpc_server_rpc_t *rpc = wi->wi_data;
-        srpc_service_t    *sv = rpc->srpc_service;
-        srpc_event_t      *ev = &rpc->srpc_ev;
-        int                rc = 0;
+       struct srpc_server_rpc  *rpc = wi->swi_workitem.wi_data;
+       struct srpc_service_cd  *scd = rpc->srpc_scd;
+       struct srpc_service     *sv = scd->scd_svc;
+       srpc_event_t            *ev = &rpc->srpc_ev;
+       int                     rc = 0;
 
-        LASSERT (wi == &rpc->srpc_wi);
+       LASSERT(wi == &rpc->srpc_wi);
 
-        spin_lock(&sv->sv_lock);
+       spin_lock(&scd->scd_lock);
 
-        if (sv->sv_shuttingdown) {
-                spin_unlock(&sv->sv_lock);
+       if (sv->sv_shuttingdown || rpc->srpc_aborted) {
+               spin_unlock(&scd->scd_lock);
 
                 if (rpc->srpc_bulk != NULL)
                         LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
@@ -881,9 +1026,9 @@ srpc_handle_rpc (swi_workitem_t *wi)
                 return 0;
         }
 
-        spin_unlock(&sv->sv_lock);
+       spin_unlock(&scd->scd_lock);
 
-        switch (wi->wi_state) {
+        switch (wi->swi_state) {
         default:
                 LBUG ();
         case SWI_STATE_NEWBORN: {
@@ -893,24 +1038,30 @@ srpc_handle_rpc (swi_workitem_t *wi)
                 msg = &rpc->srpc_reqstbuf->buf_msg;
                 reply = &rpc->srpc_replymsg.msg_body.reply;
 
-                if (msg->msg_version != SRPC_MSG_VERSION &&
-                    msg->msg_version != __swab32(SRPC_MSG_VERSION)) {
-                        CWARN ("Version mismatch: %u, %u expected, from %s\n",
-                               msg->msg_version, SRPC_MSG_VERSION,
-                               libcfs_id2str(rpc->srpc_peer));
-                        reply->status = EPROTO;
-                } else {
-                        reply->status = 0;
-                        rc = (*sv->sv_handler) (rpc);
-                        LASSERT (reply->status == 0 || !rpc->srpc_bulk);
+                if (msg->msg_magic == 0) {
+                        /* moaned already in srpc_lnet_ev_handler */
+                       srpc_server_rpc_done(rpc, EBADMSG);
+                       return 1;
+               }
+
+               srpc_unpack_msg_hdr(msg);
+               if (msg->msg_version != SRPC_MSG_VERSION) {
+                       CWARN("Version mismatch: %u, %u expected, from %s\n",
+                             msg->msg_version, SRPC_MSG_VERSION,
+                             libcfs_id2str(rpc->srpc_peer));
+                       reply->status = EPROTO;
+                       /* drop through and send reply */
+               } else {
+                       reply->status = 0;
+                       rc = (*sv->sv_handler)(rpc);
+                       LASSERT(reply->status == 0 || !rpc->srpc_bulk);
+                       if (rc != 0) {
+                               srpc_server_rpc_done(rpc, rc);
+                               return 1;
+                       }
                 }
 
-                if (rc != 0) {
-                        srpc_server_rpc_done(rpc, rc);
-                        return 1;
-                }
-
-                wi->wi_state = SWI_STATE_BULK_STARTED;
+                wi->swi_state = SWI_STATE_BULK_STARTED;
 
                 if (rpc->srpc_bulk != NULL) {
                         rc = srpc_do_bulk(rpc);
@@ -936,7 +1087,7 @@ srpc_handle_rpc (swi_workitem_t *wi)
                         }
                 }
 
-                wi->wi_state = SWI_STATE_REPLY_SUBMITTED;
+                wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
                 rc = srpc_send_reply(rpc);
                 if (rc == 0)
                         return 0; /* wait for reply */
@@ -944,9 +1095,15 @@ srpc_handle_rpc (swi_workitem_t *wi)
                 return 1;
 
         case SWI_STATE_REPLY_SUBMITTED:
-                LASSERT (ev->ev_fired);
+                if (!ev->ev_fired) {
+                        CERROR("RPC %p: bulk %p, service %d\n",
+                              rpc, rpc->srpc_bulk, sv->sv_id);
+                        CERROR("Event: status %d, type %d, lnet %d\n",
+                               ev->ev_status, ev->ev_type, ev->ev_lnet);
+                        LASSERT (ev->ev_fired);
+                }
 
-                wi->wi_state = SWI_STATE_DONE;
+                wi->swi_state = SWI_STATE_DONE;
                 srpc_server_rpc_done(rpc, ev->ev_status);
                 return 1;
         }
@@ -954,7 +1111,7 @@ srpc_handle_rpc (swi_workitem_t *wi)
         return 0;
 }
 
-void
+static void
 srpc_client_rpc_expired (void *data)
 {
         srpc_client_rpc_t *rpc = data;
@@ -963,106 +1120,73 @@ srpc_client_rpc_expired (void *data)
                rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
                rpc->crpc_timeout);
 
-        spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
-        rpc->crpc_timeout = 0;
-        srpc_abort_rpc(rpc, -ETIMEDOUT);
+       rpc->crpc_timeout = 0;
+       srpc_abort_rpc(rpc, -ETIMEDOUT);
 
-        spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 
-        spin_lock(&srpc_data.rpc_glock);
-        srpc_data.rpc_counters.rpcs_expired++;
-        spin_unlock(&srpc_data.rpc_glock);
-        return;
+       spin_lock(&srpc_data.rpc_glock);
+       srpc_data.rpc_counters.rpcs_expired++;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
-inline void
-srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
+static void
+srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
 {
-        stt_timer_t *timer = &rpc->crpc_timer;
-
-        if (rpc->crpc_timeout == 0) return;
-
-        CFS_INIT_LIST_HEAD(&timer->stt_list);
-        timer->stt_data    = rpc;
-        timer->stt_func    = srpc_client_rpc_expired;
-        timer->stt_expires = cfs_time_add(rpc->crpc_timeout, 
-                                          cfs_time_current_sec());
-        stt_add_timer(timer);
-        return;
+       stt_timer_t *timer = &rpc->crpc_timer;
+
+       if (rpc->crpc_timeout == 0)
+               return;
+
+       INIT_LIST_HEAD(&timer->stt_list);
+       timer->stt_data    = rpc;
+       timer->stt_func    = srpc_client_rpc_expired;
+       timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
+                                         cfs_time_current_sec());
+       stt_add_timer(timer);
+       return;
 }
 
-/* 
+/*
  * Called with rpc->crpc_lock held.
  *
  * Upon exit the RPC expiry timer is not queued and the handler is not
  * running on any CPU. */
-void
+static void
 srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
-{     
-        /* timer not planted or already exploded */
-        if (rpc->crpc_timeout == 0) return;
+{
+       /* timer not planted or already exploded */
+       if (rpc->crpc_timeout == 0)
+               return;
 
-        /* timer sucessfully defused */
-        if (stt_del_timer(&rpc->crpc_timer)) return;
+       /* timer sucessfully defused */
+       if (stt_del_timer(&rpc->crpc_timer))
+               return;
 
 #ifdef __KERNEL__
-        /* timer detonated, wait for it to explode */
-        while (rpc->crpc_timeout != 0) {
-                spin_unlock(&rpc->crpc_lock);
+       /* timer detonated, wait for it to explode */
+       while (rpc->crpc_timeout != 0) {
+               spin_unlock(&rpc->crpc_lock);
 
-                cfs_schedule(); 
+               schedule();
 
-                spin_lock(&rpc->crpc_lock);
-        }
+               spin_lock(&rpc->crpc_lock);
+       }
 #else
-        LBUG(); /* impossible in single-threaded runtime */
+       LBUG(); /* impossible in single-threaded runtime */
 #endif
-        return;
 }
 
-void
-srpc_check_sends (srpc_peer_t *peer, int credits)
-{
-        struct list_head  *q;
-        srpc_client_rpc_t *rpc;
-
-        LASSERT (credits >= 0);
-        LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
-        spin_lock(&peer->stp_lock);
-        peer->stp_credits += credits;
-
-        while (peer->stp_credits) {
-                if (!list_empty(&peer->stp_ctl_rpcq))
-                        q = &peer->stp_ctl_rpcq;
-                else if (!list_empty(&peer->stp_rpcq))
-                        q = &peer->stp_rpcq;
-                else
-                        break;
-
-                peer->stp_credits--;
-
-                rpc = list_entry(q->next, srpc_client_rpc_t, crpc_privl);
-                list_del_init(&rpc->crpc_privl);
-                srpc_client_rpc_decref(rpc);  /* --ref for peer->*rpcq */
-
-                swi_schedule_workitem(&rpc->crpc_wi);
-        }
-
-        spin_unlock(&peer->stp_lock);
-        return;
-}
-
-void
+static void
 srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
 {
-        swi_workitem_t *wi = &rpc->crpc_wi;
-        srpc_peer_t    *peer = rpc->crpc_peer;
+       swi_workitem_t *wi = &rpc->crpc_wi;
 
-        LASSERT (status != 0 || wi->wi_state == SWI_STATE_DONE);
+       LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
 
-        spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
         rpc->crpc_closed = 1;
         if (rpc->crpc_status == 0)
@@ -1070,29 +1194,26 @@ srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
 
         srpc_del_client_rpc_timer(rpc);
 
-        CDEBUG ((status == 0) ? D_NET : D_NETERROR,
+        CDEBUG_LIMIT ((status == 0) ? D_NET : D_NETERROR,
                 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
                 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-                swi_state2str(wi->wi_state), rpc->crpc_aborted, status);
+                swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
 
         /*
          * No one can schedule me now since:
          * - RPC timer has been defused.
          * - all LNet events have been fired.
-         * - crpc_closed has been set, preventing srpc_abort_rpc from 
+         * - crpc_closed has been set, preventing srpc_abort_rpc from
          *   scheduling me.
          * Cancel pending schedules and prevent future schedule attempts:
          */
         LASSERT (!srpc_event_pending(rpc));
-        swi_kill_workitem(wi);
+       swi_exit_workitem(wi);
 
-        spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 
-        (*rpc->crpc_done) (rpc);
-
-        if (peer != NULL)
-                srpc_check_sends(peer, 1);
-        return;
+       (*rpc->crpc_done)(rpc);
+       return;
 }
 
 /* sends an outgoing RPC */
@@ -1100,23 +1221,30 @@ int
 srpc_send_rpc (swi_workitem_t *wi)
 {
         int                rc = 0;
-        srpc_client_rpc_t *rpc = wi->wi_data;
-        srpc_msg_t        *reply = &rpc->crpc_replymsg;
-        int                do_bulk = rpc->crpc_bulk.bk_niov > 0;
+       srpc_client_rpc_t *rpc;
+       srpc_msg_t        *reply;
+       int                do_bulk;
+
+       LASSERT(wi != NULL);
+
+       rpc = wi->swi_workitem.wi_data;
 
         LASSERT (rpc != NULL);
         LASSERT (wi == &rpc->crpc_wi);
 
-        spin_lock(&rpc->crpc_lock);
+       reply = &rpc->crpc_replymsg;
+       do_bulk = rpc->crpc_bulk.bk_niov > 0;
 
-        if (rpc->crpc_aborted) {
-                spin_unlock(&rpc->crpc_lock);
-                goto abort;
-        }
+       spin_lock(&rpc->crpc_lock);
 
-        spin_unlock(&rpc->crpc_lock);
+       if (rpc->crpc_aborted) {
+               spin_unlock(&rpc->crpc_lock);
+               goto abort;
+       }
 
-        switch (wi->wi_state) {
+       spin_unlock(&rpc->crpc_lock);
+
+        switch (wi->swi_state) {
         default:
                 LBUG ();
         case SWI_STATE_NEWBORN:
@@ -1131,20 +1259,20 @@ srpc_send_rpc (swi_workitem_t *wi)
                 rc = srpc_prepare_bulk(rpc);
                 if (rc != 0) break;
 
-                wi->wi_state = SWI_STATE_REQUEST_SUBMITTED;
+                wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
                 rc = srpc_send_request(rpc);
                 break;
 
         case SWI_STATE_REQUEST_SUBMITTED:
                 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
-                 * order; however, they're processed in a strict order: 
+                 * order; however, they're processed in a strict order:
                  * rqt, rpy, and bulk. */
                 if (!rpc->crpc_reqstev.ev_fired) break;
 
                 rc = rpc->crpc_reqstev.ev_status;
                 if (rc != 0) break;
 
-                wi->wi_state = SWI_STATE_REQUEST_SENT;
+                wi->swi_state = SWI_STATE_REQUEST_SENT;
                 /* perhaps more events, fall thru */
         case SWI_STATE_REQUEST_SENT: {
                 srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
@@ -1154,10 +1282,10 @@ srpc_send_rpc (swi_workitem_t *wi)
                 rc = rpc->crpc_replyev.ev_status;
                 if (rc != 0) break;
 
-                if ((reply->msg_type != type && 
-                     reply->msg_type != __swab32(type)) ||
-                    (reply->msg_magic != SRPC_MSG_MAGIC &&
-                     reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
+               srpc_unpack_msg_hdr(reply);
+               if (reply->msg_type != type ||
+                   (reply->msg_magic != SRPC_MSG_MAGIC &&
+                    reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
                         CWARN ("Bad message from %s: type %u (%d expected),"
                                " magic %u (%d expected).\n",
                                libcfs_id2str(rpc->crpc_dest),
@@ -1175,7 +1303,7 @@ srpc_send_rpc (swi_workitem_t *wi)
                         LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
                 }
 
-                wi->wi_state = SWI_STATE_REPLY_RECEIVED;
+                wi->swi_state = SWI_STATE_REPLY_RECEIVED;
         }
         case SWI_STATE_REPLY_RECEIVED:
                 if (do_bulk && !rpc->crpc_bulkev.ev_fired) break;
@@ -1190,16 +1318,16 @@ srpc_send_rpc (swi_workitem_t *wi)
                     rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
                         rc = 0;
 
-                wi->wi_state = SWI_STATE_DONE;
+                wi->swi_state = SWI_STATE_DONE;
                 srpc_client_rpc_done(rpc, rc);
                 return 1;
         }
 
-        if (rc != 0) {
-                spin_lock(&rpc->crpc_lock);
-                srpc_abort_rpc(rpc, rc);
-                spin_unlock(&rpc->crpc_lock);
-        }
+       if (rc != 0) {
+               spin_lock(&rpc->crpc_lock);
+               srpc_abort_rpc(rpc, rc);
+               spin_unlock(&rpc->crpc_lock);
+       }
 
 abort:
         if (rpc->crpc_aborted) {
@@ -1223,7 +1351,7 @@ srpc_create_client_rpc (lnet_process_id_t peer, int service,
 {
         srpc_client_rpc_t *rpc;
 
-       LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+        LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
                                    crpc_bulk.bk_iovs[nbulkiov]));
         if (rpc == NULL)
                 return NULL;
@@ -1234,38 +1362,9 @@ srpc_create_client_rpc (lnet_process_id_t peer, int service,
 }
 
 /* called with rpc->crpc_lock held */
-static inline void
-srpc_queue_rpc (srpc_peer_t *peer, srpc_client_rpc_t *rpc)
-{
-        int service = rpc->crpc_service;
-
-        LASSERT (peer->stp_nid == rpc->crpc_dest.nid);
-        LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
-
-        rpc->crpc_peer = peer;
-
-        spin_lock(&peer->stp_lock);
-
-        /* Framework RPCs that alter session state shall take precedence
-         * over test RPCs and framework query RPCs */
-        if (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID &&
-            service != SRPC_SERVICE_DEBUG &&
-            service != SRPC_SERVICE_QUERY_STAT)
-                list_add_tail(&rpc->crpc_privl, &peer->stp_ctl_rpcq);
-        else
-                list_add_tail(&rpc->crpc_privl, &peer->stp_rpcq);
-
-        srpc_client_rpc_addref(rpc); /* ++ref for peer->*rpcq */
-        spin_unlock(&peer->stp_lock);
-        return;
-}
-
-/* called with rpc->crpc_lock held */
 void
 srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
 {
-        srpc_peer_t *peer = rpc->crpc_peer;
-
         LASSERT (why != 0);
 
         if (rpc->crpc_aborted || /* already aborted */
@@ -1275,23 +1374,10 @@ srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
         CDEBUG (D_NET,
                 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
                 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
-                swi_state2str(rpc->crpc_wi.wi_state), why);
+                swi_state2str(rpc->crpc_wi.swi_state), why);
 
         rpc->crpc_aborted = 1;
         rpc->crpc_status  = why;
-
-        if (peer != NULL) {
-                spin_lock(&peer->stp_lock);
-
-                if (!list_empty(&rpc->crpc_privl)) { /* still queued */
-                        list_del_init(&rpc->crpc_privl);
-                        srpc_client_rpc_decref(rpc); /* --ref for peer->*rpcq */
-                        rpc->crpc_peer = NULL;       /* no credit taken */
-                }
-
-                spin_unlock(&peer->stp_lock);
-        }
-
         swi_schedule_workitem(&rpc->crpc_wi);
         return;
 }
@@ -1300,59 +1386,44 @@ srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
 void
 srpc_post_rpc (srpc_client_rpc_t *rpc)
 {
-        srpc_peer_t *peer;
-
         LASSERT (!rpc->crpc_aborted);
-        LASSERT (rpc->crpc_peer == NULL);
         LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
-        LASSERT ((rpc->crpc_bulk.bk_len & ~CFS_PAGE_MASK) == 0);
 
         CDEBUG (D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
                 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
                 rpc->crpc_timeout);
 
         srpc_add_client_rpc_timer(rpc);
-
-        peer = srpc_nid2peer(rpc->crpc_dest.nid);
-        if (peer == NULL) {
-                srpc_abort_rpc(rpc, -ENOMEM);
-                return;
-        }
-
-        srpc_queue_rpc(peer, rpc);
-
-        spin_unlock(&rpc->crpc_lock);
-        srpc_check_sends(peer, 0);
-        spin_lock(&rpc->crpc_lock);
+        swi_schedule_workitem(&rpc->crpc_wi);
         return;
 }
 
 
 int
-srpc_send_reply (srpc_server_rpc_t *rpc)
+srpc_send_reply(struct srpc_server_rpc *rpc)
 {
-        srpc_event_t   *ev = &rpc->srpc_ev;
-        srpc_msg_t     *msg = &rpc->srpc_replymsg;
-        srpc_buffer_t  *buffer = rpc->srpc_reqstbuf;
-        srpc_service_t *sv = rpc->srpc_service;
-        __u64           rpyid;
-        int             rc;
-
-        LASSERT (buffer != NULL);
-        rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
-
-        spin_lock(&sv->sv_lock);
-
-        if (!sv->sv_shuttingdown &&
-            sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID) {
-                /* Repost buffer before replying since test client
-                 * might send me another RPC once it gets the reply */
-                if (srpc_service_post_buffer(sv, buffer) != 0)
-                        CWARN ("Failed to repost %s buffer\n", sv->sv_name);
-                rpc->srpc_reqstbuf = NULL;
-        }
+       srpc_event_t            *ev = &rpc->srpc_ev;
+       struct srpc_msg         *msg = &rpc->srpc_replymsg;
+       struct srpc_buffer      *buffer = rpc->srpc_reqstbuf;
+       struct srpc_service_cd  *scd = rpc->srpc_scd;
+       struct srpc_service     *sv = scd->scd_svc;
+       __u64                   rpyid;
+       int                     rc;
 
-        spin_unlock(&sv->sv_lock);
+       LASSERT(buffer != NULL);
+       rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
+
+       spin_lock(&scd->scd_lock);
+
+       if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
+               /* Repost buffer before replying since test client
+                * might send me another RPC once it gets the reply */
+               if (srpc_service_post_buffer(scd, buffer) != 0)
+                       CWARN("Failed to repost %s buffer\n", sv->sv_name);
+               rpc->srpc_reqstbuf = NULL;
+       }
+
+       spin_unlock(&scd->scd_lock);
 
         ev->ev_fired = 0;
         ev->ev_data  = rpc;
@@ -1372,61 +1443,79 @@ srpc_send_reply (srpc_server_rpc_t *rpc)
 }
 
 /* when in kernel always called with LNET_LOCK() held, and in thread context */
-void 
-srpc_lnet_ev_handler (lnet_event_t *ev)
+static void
+srpc_lnet_ev_handler(lnet_event_t *ev)
 {
-        srpc_event_t      *rpcev = ev->md.user_ptr;
-        srpc_client_rpc_t *crpc;
-        srpc_server_rpc_t *srpc;
-        srpc_buffer_t     *buffer;
-        srpc_service_t    *sv;
-        srpc_msg_t        *msg;
-        srpc_msg_type_t    type;
-
-        LASSERT (!in_interrupt());
-
-        if (ev->status != 0) {
-                spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.errors++;
-                spin_unlock(&srpc_data.rpc_glock);
-        }
+       struct srpc_service_cd  *scd;
+       srpc_event_t      *rpcev = ev->md.user_ptr;
+       srpc_client_rpc_t *crpc;
+       srpc_server_rpc_t *srpc;
+       srpc_buffer_t     *buffer;
+       srpc_service_t    *sv;
+       srpc_msg_t        *msg;
+       srpc_msg_type_t    type;
+
+       LASSERT (!in_interrupt());
+
+       if (ev->status != 0) {
+               __u32 errors;
+
+               spin_lock(&srpc_data.rpc_glock);
+               if (ev->status != -ECANCELED) /* cancellation is not error */
+                       srpc_data.rpc_counters.errors++;
+               errors = srpc_data.rpc_counters.errors;
+               spin_unlock(&srpc_data.rpc_glock);
+
+               CNETERR("LNet event status %d type %d, RPC errors %u\n",
+                       ev->status, ev->type, errors);
+       }
 
         rpcev->ev_lnet = ev->type;
 
         switch (rpcev->ev_type) {
         default:
+                CERROR("Unknown event: status %d, type %d, lnet %d\n",
+                       rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
                 LBUG ();
         case SRPC_REQUEST_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        spin_lock(&srpc_data.rpc_glock);
-                        srpc_data.rpc_counters.rpcs_sent++;
-                        spin_unlock(&srpc_data.rpc_glock);
+                       spin_lock(&srpc_data.rpc_glock);
+                       srpc_data.rpc_counters.rpcs_sent++;
+                       spin_unlock(&srpc_data.rpc_glock);
                 }
         case SRPC_REPLY_RCVD:
         case SRPC_BULK_REQ_RCVD:
                 crpc = rpcev->ev_data;
 
-                LASSERT (rpcev == &crpc->crpc_reqstev ||
-                         rpcev == &crpc->crpc_replyev ||
-                         rpcev == &crpc->crpc_bulkev);
+                if (rpcev != &crpc->crpc_reqstev &&
+                    rpcev != &crpc->crpc_replyev &&
+                    rpcev != &crpc->crpc_bulkev) {
+                        CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
+                               rpcev, crpc, &crpc->crpc_reqstev,
+                               &crpc->crpc_replyev, &crpc->crpc_bulkev);
+                        CERROR("Bad event: status %d, type %d, lnet %d\n",
+                               rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
+                        LBUG ();
+                }
 
-                spin_lock(&crpc->crpc_lock);
+               spin_lock(&crpc->crpc_lock);
 
-                LASSERT (rpcev->ev_fired == 0);
-                rpcev->ev_fired  = 1;
-                rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? 
-                                                -EINTR : ev->status;
-                swi_schedule_workitem(&crpc->crpc_wi);
+               LASSERT(rpcev->ev_fired == 0);
+               rpcev->ev_fired  = 1;
+               rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+                                               -EINTR : ev->status;
+               swi_schedule_workitem(&crpc->crpc_wi);
 
-                spin_unlock(&crpc->crpc_lock);
-                break;
+               spin_unlock(&crpc->crpc_lock);
+               break;
 
-        case SRPC_REQUEST_RCVD:
-                sv = rpcev->ev_data;
+       case SRPC_REQUEST_RCVD:
+               scd = rpcev->ev_data;
+               sv = scd->scd_svc;
 
-                LASSERT (rpcev == &sv->sv_ev);
+               LASSERT(rpcev == &scd->scd_ev);
 
-                spin_lock(&sv->sv_lock);
+               spin_lock(&scd->scd_lock);
 
                 LASSERT (ev->unlinked);
                 LASSERT (ev->type == LNET_EVENT_PUT ||
@@ -1438,22 +1527,37 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                 buffer->buf_peer = ev->initiator;
                 buffer->buf_self = ev->target.nid;
 
-                sv->sv_nposted_msg--;
-                LASSERT (sv->sv_nposted_msg >= 0);
-
-                if (sv->sv_shuttingdown) {
-                        /* Leave buffer on sv->sv_posted_msgq since 
-                         * srpc_finish_service needs to traverse it. */
-                        spin_unlock(&sv->sv_lock);
-                        break;
-                }
-
-                list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
-                msg = &buffer->buf_msg;
-                type = srpc_service2request(sv->sv_id);
+               LASSERT(scd->scd_buf_nposted > 0);
+               scd->scd_buf_nposted--;
+
+               if (sv->sv_shuttingdown) {
+                       /* Leave buffer on scd->scd_buf_nposted since
+                        * srpc_finish_service needs to traverse it. */
+                       spin_unlock(&scd->scd_lock);
+                       break;
+               }
+
+               if (scd->scd_buf_err_stamp != 0 &&
+                   scd->scd_buf_err_stamp < cfs_time_current_sec()) {
+                       /* re-enable adding buffer */
+                       scd->scd_buf_err_stamp = 0;
+                       scd->scd_buf_err = 0;
+               }
+
+               if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
+                   scd->scd_buf_adjust == 0 &&
+                   scd->scd_buf_nposted < scd->scd_buf_low) {
+                       scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
+                                                 SFW_TEST_WI_MIN);
+                       swi_schedule_workitem(&scd->scd_buf_wi);
+               }
+
+               list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
+               msg = &buffer->buf_msg;
+               type = srpc_service2request(sv->sv_id);
 
                 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
-                    (msg->msg_type != type && 
+                    (msg->msg_type != type &&
                      msg->msg_type != __swab32(type)) ||
                     (msg->msg_magic != SRPC_MSG_MAGIC &&
                      msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
@@ -1463,39 +1567,32 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                                 ev->status, ev->mlength,
                                 msg->msg_type, msg->msg_magic);
 
-                        /* NB might drop sv_lock in srpc_service_recycle_buffer,
-                         * sv_nposted_msg++ as an implicit reference to prevent
-                         * sv from disappearing under me */
-                        sv->sv_nposted_msg++;
-                        srpc_service_recycle_buffer(sv, buffer);
-                        sv->sv_nposted_msg--;
-                        spin_unlock(&sv->sv_lock);
-
-                        if (ev->status == 0) { /* status!=0 counted already */
-                                spin_lock(&srpc_data.rpc_glock);
-                                srpc_data.rpc_counters.errors++;
-                                spin_unlock(&srpc_data.rpc_glock);
-                        }
-                        break;
+                        /* NB can't call srpc_service_recycle_buffer here since
+                         * it may call LNetM[DE]Attach. The invalid magic tells
+                         * srpc_handle_rpc to drop this RPC */
+                        msg->msg_magic = 0;
                 }
 
-                if (!list_empty(&sv->sv_free_rpcq)) {
-                        srpc = list_entry(sv->sv_free_rpcq.next,
-                                          srpc_server_rpc_t, srpc_list);
-                        list_del(&srpc->srpc_list);
-
-                        srpc_init_server_rpc(srpc, sv, buffer);
-                        list_add_tail(&srpc->srpc_list, &sv->sv_active_rpcq);
-                        srpc_schedule_server_rpc(srpc);
-                } else {
-                        list_add_tail(&buffer->buf_list, &sv->sv_blocked_msgq);
-                }
-
-                spin_unlock(&sv->sv_lock);
-
-                spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.rpcs_rcvd++;
-                spin_unlock(&srpc_data.rpc_glock);
+               if (!list_empty(&scd->scd_rpc_free)) {
+                       srpc = list_entry(scd->scd_rpc_free.next,
+                                         struct srpc_server_rpc,
+                                         srpc_list);
+                       list_del(&srpc->srpc_list);
+
+                       srpc_init_server_rpc(srpc, scd, buffer);
+                       list_add_tail(&srpc->srpc_list,
+                                     &scd->scd_rpc_active);
+                       swi_schedule_workitem(&srpc->srpc_wi);
+               } else {
+                       list_add_tail(&buffer->buf_list,
+                                     &scd->scd_buf_blocked);
+               }
+
+               spin_unlock(&scd->scd_lock);
+
+               spin_lock(&srpc_data.rpc_glock);
+               srpc_data.rpc_counters.rpcs_rcvd++;
+               spin_unlock(&srpc_data.rpc_glock);
                 break;
 
         case SRPC_BULK_GET_RPLD:
@@ -1503,37 +1600,36 @@ srpc_lnet_ev_handler (lnet_event_t *ev)
                          ev->type == LNET_EVENT_REPLY ||
                          ev->type == LNET_EVENT_UNLINK);
 
-                if (ev->type == LNET_EVENT_SEND && 
-                    ev->status == 0 && !ev->unlinked)
-                        break; /* wait for the final LNET_EVENT_REPLY */
+                if (!ev->unlinked)
+                        break; /* wait for final event */
 
         case SRPC_BULK_PUT_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        spin_lock(&srpc_data.rpc_glock);
+                       spin_lock(&srpc_data.rpc_glock);
 
-                        if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
-                                srpc_data.rpc_counters.bulk_get += ev->mlength;
-                        else
-                                srpc_data.rpc_counters.bulk_put += ev->mlength;
+                       if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
+                               srpc_data.rpc_counters.bulk_get += ev->mlength;
+                       else
+                               srpc_data.rpc_counters.bulk_put += ev->mlength;
 
-                        spin_unlock(&srpc_data.rpc_glock);
-                }
-        case SRPC_REPLY_SENT:
-                srpc = rpcev->ev_data;
-                sv = srpc->srpc_service;
-
-                LASSERT (rpcev == &srpc->srpc_ev);
-
-                spin_lock(&sv->sv_lock);
-                rpcev->ev_fired  = 1;
-                rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? 
-                                                -EINTR : ev->status;
-                srpc_schedule_server_rpc(srpc);
-                spin_unlock(&sv->sv_lock);
-                break;
-        }
+                       spin_unlock(&srpc_data.rpc_glock);
+               }
+       case SRPC_REPLY_SENT:
+               srpc = rpcev->ev_data;
+               scd  = srpc->srpc_scd;
 
-        return;
+               LASSERT(rpcev == &srpc->srpc_ev);
+
+               spin_lock(&scd->scd_lock);
+
+               rpcev->ev_fired  = 1;
+               rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+                                  -EINTR : ev->status;
+               swi_schedule_workitem(&srpc->srpc_wi);
+
+               spin_unlock(&scd->scd_lock);
+               break;
+       }
 }
 
 #ifndef __KERNEL__
@@ -1548,15 +1644,15 @@ srpc_check_event (int timeout)
         rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
                         timeout * 1000, &ev, &i);
         if (rc == 0) return 0;
-        
+
         LASSERT (rc == -EOVERFLOW || rc == 1);
-        
+
         /* We can't affort to miss any events... */
         if (rc == -EOVERFLOW) {
                 CERROR ("Dropped an event!!!\n");
                 abort();
         }
-                
+
         srpc_lnet_ev_handler(&ev);
         return 1;
 }
@@ -1566,11 +1662,10 @@ srpc_check_event (int timeout)
 int
 srpc_startup (void)
 {
-        int i;
-        int rc;
+       int rc;
 
-        memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
-        spin_lock_init(&srpc_data.rpc_glock);
+       memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
+       spin_lock_init(&srpc_data.rpc_glock);
 
         /* 1 second pause to avoid timestamp reuse */
         cfs_pause(cfs_time_seconds(1));
@@ -1578,33 +1673,24 @@ srpc_startup (void)
 
         srpc_data.rpc_state = SRPC_STATE_NONE;
 
-        LIBCFS_ALLOC(srpc_data.rpc_peers,
-                     sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);
-        if (srpc_data.rpc_peers == NULL) {
-                CERROR ("Failed to alloc peer hash.\n");
-                return -ENOMEM;
-        }
-
-        for (i = 0; i < SRPC_PEER_HASH_SIZE; i++)
-                CFS_INIT_LIST_HEAD(&srpc_data.rpc_peers[i]);
-
 #ifdef __KERNEL__
-        rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+       rc = LNetNIInit(LNET_PID_LUSTRE);
 #else
-        rc = LNetNIInit(getpid());
+        if (the_lnet.ln_server_mode_flag)
+               rc = LNetNIInit(LNET_PID_LUSTRE);
+        else
+                rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
 #endif
         if (rc < 0) {
                 CERROR ("LNetNIInit() has failed: %d\n", rc);
-                LIBCFS_FREE(srpc_data.rpc_peers,
-                            sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);
-                return rc;
+               return rc;
         }
 
         srpc_data.rpc_state = SRPC_STATE_NI_INIT;
 
-        srpc_data.rpc_lnet_eq = LNET_EQ_NONE;
+        LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
 #ifdef __KERNEL__
-        rc = LNetEQAlloc(16, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
+       rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
 #else
         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
 #endif
@@ -1613,17 +1699,13 @@ srpc_startup (void)
                 goto bail;
         }
 
-        rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
-        LASSERT (rc == 0);
+       rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
+       LASSERT(rc == 0);
+       rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
+       LASSERT(rc == 0);
 
         srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
 
-        rc = swi_startup();
-        if (rc != 0)
-                goto bail;
-
-        srpc_data.rpc_state = SRPC_STATE_WI_INIT;
-
         rc = stt_startup();
 
 bail:
@@ -1649,7 +1731,7 @@ srpc_shutdown (void)
         default:
                 LBUG ();
         case SRPC_STATE_RUNNING:
-                spin_lock(&srpc_data.rpc_glock);
+               spin_lock(&srpc_data.rpc_glock);
 
                 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
                         srpc_service_t *sv = srpc_data.rpc_services[i];
@@ -1659,42 +1741,20 @@ srpc_shutdown (void)
                                   i, sv->sv_name);
                 }
 
-                spin_unlock(&srpc_data.rpc_glock);
+               spin_unlock(&srpc_data.rpc_glock);
 
                 stt_shutdown();
 
-        case SRPC_STATE_WI_INIT:
-                swi_shutdown();
-
         case SRPC_STATE_EQ_INIT:
                 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
+               rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
                 LASSERT (rc == 0);
                 rc = LNetEQFree(srpc_data.rpc_lnet_eq);
                 LASSERT (rc == 0); /* the EQ should have no user by now */
 
         case SRPC_STATE_NI_INIT:
                 LNetNIFini();
-                break;
-        }
-
-        /* srpc_peer_t's are kept in hash until shutdown */
-        for (i = 0; i < SRPC_PEER_HASH_SIZE; i++) {
-                srpc_peer_t *peer;
-
-                while (!list_empty(&srpc_data.rpc_peers[i])) {
-                        peer = list_entry(srpc_data.rpc_peers[i].next,
-                                          srpc_peer_t, stp_list);
-                        list_del(&peer->stp_list);
-
-                        LASSERT (list_empty(&peer->stp_rpcq));
-                        LASSERT (list_empty(&peer->stp_ctl_rpcq));
-                        LASSERT (peer->stp_credits == SRPC_PEER_CREDITS);
-
-                        LIBCFS_FREE(peer, sizeof(srpc_peer_t));
-                }
         }
 
-        LIBCFS_FREE(srpc_data.rpc_peers,
-                    sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);
         return;
 }