*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "selftest.h"
-typedef enum {
+enum srpc_state {
SRPC_STATE_NONE,
SRPC_STATE_NI_INIT,
SRPC_STATE_EQ_INIT,
SRPC_STATE_RUNNING,
SRPC_STATE_STOPPING,
-} srpc_state_t;
+};
-struct smoketest_rpc {
+static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
- srpc_counters_t rpc_counters;
- __u64 rpc_matchbits; /* matchbits counter */
+ struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ struct lnet_handle_eq rpc_lnet_eq; /* _the_ LNet event queue */
+ enum srpc_state rpc_state;
+ struct srpc_counters rpc_counters;
+ __u64 rpc_matchbits; /* matchbits counter */
} srpc_data;
static inline int
}
/* forward ref's */
-int srpc_handle_rpc (swi_workitem_t *wi);
+static int srpc_handle_rpc(struct swi_workitem *wi);
-void srpc_get_counters (srpc_counters_t *cnt)
+void srpc_get_counters(struct srpc_counters *cnt)
{
spin_lock(&srpc_data.rpc_glock);
*cnt = srpc_data.rpc_counters;
spin_unlock(&srpc_data.rpc_glock);
}
-void srpc_set_counters (const srpc_counters_t *cnt)
+void srpc_set_counters(const struct srpc_counters *cnt)
{
spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters = *cnt;
spin_unlock(&srpc_data.rpc_glock);
}
-int
-srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
+static int
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
+ int nob)
{
- nob = min(nob, (int)PAGE_CACHE_SIZE);
-
- LASSERT(nob > 0);
- LASSERT(i >= 0 && i < bk->bk_niov);
+ LASSERT(off < PAGE_SIZE);
+ LASSERT(nob > 0 && nob <= PAGE_SIZE);
-#ifdef __KERNEL__
- bk->bk_iovs[i].kiov_offset = 0;
+ bk->bk_iovs[i].kiov_offset = off;
bk->bk_iovs[i].kiov_page = pg;
bk->bk_iovs[i].kiov_len = nob;
-#else
- LASSERT(bk->bk_pages != NULL);
-
- bk->bk_pages[i] = pg;
- bk->bk_iovs[i].iov_len = nob;
- bk->bk_iovs[i].iov_base = page_address(pg);
-#endif
return nob;
}
void
-srpc_free_bulk (srpc_bulk_t *bk)
+srpc_free_bulk(struct srpc_bulk *bk)
{
int i;
struct page *pg;
LASSERT (bk != NULL);
-#ifndef __KERNEL__
- LASSERT (bk->bk_pages != NULL);
-#endif
for (i = 0; i < bk->bk_niov; i++) {
-#ifdef __KERNEL__
pg = bk->bk_iovs[i].kiov_page;
-#else
- pg = bk->bk_pages[i];
-#endif
if (pg == NULL) break;
__free_page(pg);
}
-#ifndef __KERNEL__
- LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov);
-#endif
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
+ LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
return;
}
-srpc_bulk_t *
-srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
+struct srpc_bulk *
+srpc_alloc_bulk(int cpt, unsigned bulk_off, unsigned bulk_npg,
+ unsigned bulk_len, int sink)
{
- srpc_bulk_t *bk;
+ struct srpc_bulk *bk;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
if (bk == NULL) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
- memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
-#ifndef __KERNEL__
- {
- struct page **pages;
-
- LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
- sizeof(struct page *) * bulk_npg);
- if (pages == NULL) {
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t,
- bk_iovs[bulk_npg]));
- CERROR("Can't allocate page array for %d pages\n",
- bulk_npg);
- return NULL;
- }
-
- memset(pages, 0, sizeof(struct page *) * bulk_npg);
- bk->bk_pages = pages;
- }
-#endif
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
int nob;
- pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
+ pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
if (pg == NULL) {
CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
srpc_free_bulk(bk);
return NULL;
}
- nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+ nob = min_t(unsigned, bulk_off + bulk_len, PAGE_SIZE) -
+ bulk_off;
+
+ srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
bulk_len -= nob;
+ bulk_off = 0;
}
return bk;
return id;
}
-void
+static void
srpc_init_server_rpc(struct srpc_server_rpc *rpc,
struct srpc_service_cd *scd,
struct srpc_buffer *buffer)
rpc->srpc_reqstbuf = buffer;
rpc->srpc_peer = buffer->buf_peer;
rpc->srpc_self = buffer->buf_self;
- LNetInvalidateHandle(&rpc->srpc_replymdh);
+ LNetInvalidateMDHandle(&rpc->srpc_replymdh);
}
static void
}
int
-srpc_remove_service (srpc_service_t *sv)
+srpc_remove_service(struct srpc_service *sv)
{
int id = sv->sv_id;
return 0;
}
-int
+static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
- int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ int len, int options, struct lnet_process_id peer,
+ struct lnet_handle_md *mdh, struct srpc_event *ev)
{
int rc;
- lnet_md_t md;
- lnet_handle_me_t meh;
+ struct lnet_md md;
+ struct lnet_handle_me meh;
rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
}
CDEBUG (D_NET,
- "Posted passive RDMA: peer %s, portal %d, matchbits "LPX64"\n",
+ "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
libcfs_id2str(peer), portal, matchbits);
return 0;
}
-int
+static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
- int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ int options, struct lnet_process_id peer,
+ lnet_nid_t self, struct lnet_handle_md *mdh,
+ struct srpc_event *ev)
{
- int rc;
- lnet_md_t md;
+ int rc;
+ struct lnet_md md;
md.user_ptr = ev;
md.start = buf;
}
if (rc != 0) {
- CERROR ("LNet%s(%s, %d, "LPD64") failed: %d\n",
+ CERROR ("LNet%s(%s, %d, %lld) failed: %d\n",
((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
libcfs_id2str(peer), portal, matchbits, rc);
LASSERT (rc == 0);
} else {
CDEBUG (D_NET,
- "Posted active RDMA: peer %s, portal %u, matchbits "LPX64"\n",
+ "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
libcfs_id2str(peer), portal, matchbits);
}
return 0;
}
-int
-srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
- int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- return srpc_post_active_rdma(srpc_serv_portal(service), service,
- buf, len, LNET_MD_OP_PUT, peer,
- LNET_NID_ANY, mdh, ev);
-}
-
-int
+static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ struct lnet_handle_md *mdh, struct srpc_event *ev)
{
- lnet_process_id_t any = {0};
+ struct lnet_process_id any = {0};
any.nid = LNET_NID_ANY;
any.pid = LNET_PID_ANY;
LNET_MD_OP_PUT, any, mdh, ev);
}
-int
+static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+__must_hold(&scd->scd_lock)
{
struct srpc_service *sv = scd->scd_svc;
struct srpc_msg *msg = &buf->buf_msg;
int rc;
- LNetInvalidateHandle(&buf->buf_mdh);
+ LNetInvalidateMDHandle(&buf->buf_mdh);
list_add(&buf->buf_list, &scd->scd_buf_posted);
scd->scd_buf_nposted++;
spin_unlock(&scd->scd_lock);
}
if (rc != 0) {
- scd->scd_buf_err_stamp = cfs_time_current_sec();
+ scd->scd_buf_err_stamp = ktime_get_real_seconds();
scd->scd_buf_err = rc;
LASSERT(scd->scd_buf_posting > 0);
}
if (scd->scd_buf_nposted > 0) {
- CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
- scd->scd_buf_nposted);
+ CDEBUG(D_NET, "waiting for %d posted buffers to "
+ "unlink\n", scd->scd_buf_nposted);
spin_unlock(&scd->scd_lock);
return 0;
}
}
/* called with sv->sv_lock held */
-void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+static void
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+ struct srpc_buffer *buf)
+__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
if (srpc_service_post_buffer(scd, buf) != 0) {
}
void
-srpc_shutdown_service(srpc_service_t *sv)
+srpc_shutdown_service(struct srpc_service *sv)
{
struct srpc_service_cd *scd;
struct srpc_server_rpc *rpc;
- srpc_buffer_t *buf;
+ struct srpc_buffer *buf;
int i;
CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
}
}
-int
-srpc_send_request (srpc_client_rpc_t *rpc)
+static int
+srpc_send_request(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_reqstev;
+ struct srpc_event *ev = &rpc->crpc_reqstev;
int rc;
ev->ev_fired = 0;
ev->ev_data = rpc;
ev->ev_type = SRPC_REQUEST_SENT;
- rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
- &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
- &rpc->crpc_reqstmdh, ev);
+ rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
+ rpc->crpc_service, &rpc->crpc_reqstmsg,
+ sizeof(struct srpc_msg), LNET_MD_OP_PUT,
+ rpc->crpc_dest, LNET_NID_ANY,
+ &rpc->crpc_reqstmdh, ev);
if (rc != 0) {
LASSERT (rc == -ENOMEM);
ev->ev_fired = 1; /* no more event expected */
return rc;
}
-int
-srpc_prepare_reply (srpc_client_rpc_t *rpc)
+static int
+srpc_prepare_reply(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_replyev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
- int rc;
+ struct srpc_event *ev = &rpc->crpc_replyev;
+ u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
+ int rc;
ev->ev_fired = 0;
ev->ev_data = rpc;
*id = srpc_next_id();
rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg, sizeof(srpc_msg_t),
+ &rpc->crpc_replymsg,
+ sizeof(struct srpc_msg),
LNET_MD_OP_PUT, rpc->crpc_dest,
&rpc->crpc_replymdh, ev);
if (rc != 0) {
return rc;
}
-int
-srpc_prepare_bulk (srpc_client_rpc_t *rpc)
+static int
+srpc_prepare_bulk(struct srpc_client_rpc *rpc)
{
- srpc_bulk_t *bk = &rpc->crpc_bulk;
- srpc_event_t *ev = &rpc->crpc_bulkev;
+ struct srpc_bulk *bk = &rpc->crpc_bulk;
+ struct srpc_event *ev = &rpc->crpc_bulkev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
int rc;
int opt;
if (bk->bk_niov == 0) return 0; /* nothing to do */
opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
return rc;
}
-int
-srpc_do_bulk (srpc_server_rpc_t *rpc)
+static int
+srpc_do_bulk(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_event *ev = &rpc->srpc_ev;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
__u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
int rc;
int opt;
LASSERT (bk != NULL);
opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
}
/* only called from srpc_handle_rpc */
-void
-srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
+static void
+srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_buffer_t *buffer;
+ struct srpc_buffer *buffer;
LASSERT (status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
+ struct srpc_buffer, buf_list);
list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
}
/* handles an incoming RPC */
-int
-srpc_handle_rpc(swi_workitem_t *wi)
+static int srpc_handle_rpc(struct swi_workitem *wi)
{
struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
int rc = 0;
LASSERT(wi == &rpc->srpc_wi);
default:
LBUG ();
case SWI_STATE_NEWBORN: {
- srpc_msg_t *msg;
- srpc_generic_reply_t *reply;
+ struct srpc_msg *msg;
+ struct srpc_generic_reply *reply;
msg = &rpc->srpc_reqstbuf->buf_msg;
reply = &rpc->srpc_replymsg.msg_body.reply;
return 0;
}
-void
+static void
srpc_client_rpc_expired (void *data)
{
- srpc_client_rpc_t *rpc = data;
+ struct srpc_client_rpc *rpc = data;
CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
spin_unlock(&srpc_data.rpc_glock);
}
-inline void
-srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
+static void
+srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
{
- stt_timer_t *timer = &rpc->crpc_timer;
+ struct stt_timer *timer = &rpc->crpc_timer;
if (rpc->crpc_timeout == 0)
return;
INIT_LIST_HEAD(&timer->stt_list);
timer->stt_data = rpc;
timer->stt_func = srpc_client_rpc_expired;
- timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
- cfs_time_current_sec());
+ timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
stt_add_timer(timer);
return;
}
*
* Upon exit the RPC expiry timer is not queued and the handler is not
* running on any CPU. */
-void
-srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
+static void
+srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
{
/* timer not planted or already exploded */
if (rpc->crpc_timeout == 0)
return;
- /* timer sucessfully defused */
+ /* timer successfully defused */
if (stt_del_timer(&rpc->crpc_timer))
return;
-#ifdef __KERNEL__
/* timer detonated, wait for it to explode */
while (rpc->crpc_timeout != 0) {
spin_unlock(&rpc->crpc_lock);
spin_lock(&rpc->crpc_lock);
}
-#else
- LBUG(); /* impossible in single-threaded runtime */
-#endif
}
-void
-srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
+static void
+srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
{
- swi_workitem_t *wi = &rpc->crpc_wi;
+ struct swi_workitem *wi = &rpc->crpc_wi;
LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
/* sends an outgoing RPC */
int
-srpc_send_rpc (swi_workitem_t *wi)
+srpc_send_rpc(struct swi_workitem *wi)
{
int rc = 0;
- srpc_client_rpc_t *rpc;
- srpc_msg_t *reply;
+ struct srpc_client_rpc *rpc;
+ struct srpc_msg *reply;
int do_bulk;
LASSERT(wi != NULL);
wi->swi_state = SWI_STATE_REQUEST_SENT;
/* perhaps more events, fall thru */
case SWI_STATE_REQUEST_SENT: {
- srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
+ enum srpc_msg_type type;
+
+ type = srpc_service2reply(rpc->crpc_service);
if (!rpc->crpc_replyev.ev_fired) break;
return 0;
}
-srpc_client_rpc_t *
-srpc_create_client_rpc (lnet_process_id_t peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+struct srpc_client_rpc *
+srpc_create_client_rpc(struct lnet_process_id peer, int service,
+ int nbulkiov, int bulklen,
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
- LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+ LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
if (rpc == NULL)
return NULL;
/* called with rpc->crpc_lock held */
void
-srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
+srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
{
LASSERT (why != 0);
/* called with rpc->crpc_lock held */
void
-srpc_post_rpc (srpc_client_rpc_t *rpc)
+srpc_post_rpc(struct srpc_client_rpc *rpc)
{
LASSERT (!rpc->crpc_aborted);
LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
int
srpc_send_reply(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
struct srpc_msg *msg = &rpc->srpc_replymsg;
struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
struct srpc_service_cd *scd = rpc->srpc_scd;
}
/* when in kernel always called with LNET_LOCK() held, and in thread context */
-void
-srpc_lnet_ev_handler(lnet_event_t *ev)
+static void
+srpc_lnet_ev_handler(struct lnet_event *ev)
{
struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
- srpc_server_rpc_t *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
+ struct srpc_event *rpcev = ev->md.user_ptr;
+ struct srpc_client_rpc *crpc;
+ struct srpc_server_rpc *srpc;
+ struct srpc_buffer *buffer;
+ struct srpc_service *sv;
+ struct srpc_msg *msg;
+ enum srpc_msg_type type;
LASSERT (!in_interrupt());
if (ev->status != 0) {
+ __u32 errors;
+
spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.errors++;
+ if (ev->status != -ECANCELED) /* cancellation is not error */
+ srpc_data.rpc_counters.errors++;
+ errors = srpc_data.rpc_counters.errors;
spin_unlock(&srpc_data.rpc_glock);
+
+ CNETERR("LNet event status %d type %d, RPC errors %u\n",
+ ev->status, ev->type, errors);
}
rpcev->ev_lnet = ev->type;
LASSERT (ev->type != LNET_EVENT_UNLINK ||
sv->sv_shuttingdown);
- buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
- buffer->buf_peer = ev->initiator;
+ buffer = container_of(ev->md.start, struct srpc_buffer,
+ buf_msg);
+ buffer->buf_peer = ev->source;
buffer->buf_self = ev->target.nid;
LASSERT(scd->scd_buf_nposted > 0);
}
if (scd->scd_buf_err_stamp != 0 &&
- scd->scd_buf_err_stamp < cfs_time_current_sec()) {
+ scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
/* re-enable adding buffer */
scd->scd_buf_err_stamp = 0;
scd->scd_buf_err = 0;
}
}
-#ifndef __KERNEL__
-
-int
-srpc_check_event (int timeout)
-{
- lnet_event_t ev;
- int rc;
- int i;
-
- rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
- timeout * 1000, &ev, &i);
- if (rc == 0) return 0;
-
- LASSERT (rc == -EOVERFLOW || rc == 1);
-
- /* We can't affort to miss any events... */
- if (rc == -EOVERFLOW) {
- CERROR ("Dropped an event!!!\n");
- abort();
- }
-
- srpc_lnet_ev_handler(&ev);
- return 1;
-}
-
-#endif
int
srpc_startup (void)
memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
spin_lock_init(&srpc_data.rpc_glock);
- /* 1 second pause to avoid timestamp reuse */
- cfs_pause(cfs_time_seconds(1));
- srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
+ /* 1 second pause to avoid timestamp reuse */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48;
- srpc_data.rpc_state = SRPC_STATE_NONE;
+ srpc_data.rpc_state = SRPC_STATE_NONE;
-#ifdef __KERNEL__
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
-#else
- if (the_lnet.ln_server_mode_flag)
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
- else
- rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
-#endif
+ rc = LNetNIInit(LNET_PID_LUSTRE);
if (rc < 0) {
CERROR ("LNetNIInit() has failed: %d\n", rc);
return rc;
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
- LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
-#ifdef __KERNEL__
+ LNetInvalidateEQHandle(&srpc_data.rpc_lnet_eq);
rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
-#else
- rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
-#endif
if (rc != 0) {
CERROR("LNetEQAlloc() has failed: %d\n", rc);
goto bail;
spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- srpc_service_t *sv = srpc_data.rpc_services[i];
+ struct srpc_service *sv = srpc_data.rpc_services[i];
LASSERTF (sv == NULL,
"service not empty: id %d, name %s\n",