* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
SRPC_STATE_STOPPING,
} srpc_state_t;
-struct smoketest_rpc {
+static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
spin_unlock(&srpc_data.rpc_glock);
}
-int
+static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
nob = min(nob, (int)PAGE_CACHE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
-#ifdef __KERNEL__
bk->bk_iovs[i].kiov_offset = 0;
bk->bk_iovs[i].kiov_page = pg;
bk->bk_iovs[i].kiov_len = nob;
-#else
- LASSERT(bk->bk_pages != NULL);
-
- bk->bk_pages[i] = pg;
- bk->bk_iovs[i].iov_len = nob;
- bk->bk_iovs[i].iov_base = page_address(pg);
-#endif
return nob;
}
struct page *pg;
LASSERT (bk != NULL);
-#ifndef __KERNEL__
- LASSERT (bk->bk_pages != NULL);
-#endif
for (i = 0; i < bk->bk_niov; i++) {
-#ifdef __KERNEL__
pg = bk->bk_iovs[i].kiov_page;
-#else
- pg = bk->bk_pages[i];
-#endif
if (pg == NULL) break;
__free_page(pg);
}
-#ifndef __KERNEL__
- LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov);
-#endif
LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
return;
}
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
-#ifndef __KERNEL__
- {
- struct page **pages;
-
- LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
- sizeof(struct page *) * bulk_npg);
- if (pages == NULL) {
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t,
- bk_iovs[bulk_npg]));
- CERROR("Can't allocate page array for %d pages\n",
- bulk_npg);
- return NULL;
- }
-
- memset(pages, 0, sizeof(struct page *) * bulk_npg);
- bk->bk_pages = pages;
- }
-#endif
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
int nob;
- pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
+ pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
if (pg == NULL) {
CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
srpc_free_bulk(bk);
return id;
}
-void
+static void
srpc_init_server_rpc(struct srpc_server_rpc *rpc,
struct srpc_service_cd *scd,
struct srpc_buffer *buffer)
return 0;
}
-int
+static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
lnet_handle_md_t *mdh, srpc_event_t *ev)
}
CDEBUG (D_NET,
- "Posted passive RDMA: peer %s, portal %d, matchbits "LPX64"\n",
+ "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
libcfs_id2str(peer), portal, matchbits);
return 0;
}
-int
+static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
lnet_handle_md_t *mdh, srpc_event_t *ev)
}
if (rc != 0) {
- CERROR ("LNet%s(%s, %d, "LPD64") failed: %d\n",
+ CERROR ("LNet%s(%s, %d, %lld) failed: %d\n",
((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
libcfs_id2str(peer), portal, matchbits, rc);
LASSERT (rc == 0);
} else {
CDEBUG (D_NET,
- "Posted active RDMA: peer %s, portal %u, matchbits "LPX64"\n",
+ "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
libcfs_id2str(peer), portal, matchbits);
}
return 0;
}
-int
-srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
- int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
-{
- return srpc_post_active_rdma(srpc_serv_portal(service), service,
- buf, len, LNET_MD_OP_PUT, peer,
- LNET_NID_ANY, mdh, ev);
-}
-
-int
+static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
lnet_handle_md_t *mdh, srpc_event_t *ev)
{
LNET_MD_OP_PUT, any, mdh, ev);
}
-int
+static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
}
/* called with sv->sv_lock held */
-void
+static void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
__must_hold(&scd->scd_lock)
{
}
}
-int
+static int
srpc_send_request (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_reqstev;
ev->ev_data = rpc;
ev->ev_type = SRPC_REQUEST_SENT;
- rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
- &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
- &rpc->crpc_reqstmdh, ev);
+ rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
+ rpc->crpc_service, &rpc->crpc_reqstmsg,
+ sizeof(srpc_msg_t), LNET_MD_OP_PUT,
+ rpc->crpc_dest, LNET_NID_ANY,
+ &rpc->crpc_reqstmdh, ev);
if (rc != 0) {
LASSERT (rc == -ENOMEM);
ev->ev_fired = 1; /* no more event expected */
return rc;
}
-int
+static int
srpc_prepare_reply (srpc_client_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->crpc_replyev;
return rc;
}
-int
+static int
srpc_prepare_bulk (srpc_client_rpc_t *rpc)
{
srpc_bulk_t *bk = &rpc->crpc_bulk;
if (bk->bk_niov == 0) return 0; /* nothing to do */
opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
return rc;
}
-int
+static int
srpc_do_bulk (srpc_server_rpc_t *rpc)
{
srpc_event_t *ev = &rpc->srpc_ev;
LASSERT (bk != NULL);
opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
-#ifdef __KERNEL__
opt |= LNET_MD_KIOV;
-#else
- opt |= LNET_MD_IOVEC;
-#endif
ev->ev_fired = 0;
ev->ev_data = rpc;
}
/* only called from srpc_handle_rpc */
-void
+static void
srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
return 0;
}
-void
+static void
srpc_client_rpc_expired (void *data)
{
srpc_client_rpc_t *rpc = data;
spin_unlock(&srpc_data.rpc_glock);
}
-inline void
+static void
srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
{
stt_timer_t *timer = &rpc->crpc_timer;
*
* Upon exit the RPC expiry timer is not queued and the handler is not
* running on any CPU. */
-void
+static void
srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
{
/* timer not planted or already exploded */
if (rpc->crpc_timeout == 0)
return;
- /* timer sucessfully defused */
+ /* timer successfully defused */
if (stt_del_timer(&rpc->crpc_timer))
return;
-#ifdef __KERNEL__
/* timer detonated, wait for it to explode */
while (rpc->crpc_timeout != 0) {
spin_unlock(&rpc->crpc_lock);
spin_lock(&rpc->crpc_lock);
}
-#else
- LBUG(); /* impossible in single-threaded runtime */
-#endif
}
-void
+static void
srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
{
swi_workitem_t *wi = &rpc->crpc_wi;
}
/* when in kernel always called with LNET_LOCK() held, and in thread context */
-void
+static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
LASSERT (!in_interrupt());
if (ev->status != 0) {
+ __u32 errors;
+
spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.errors++;
+ if (ev->status != -ECANCELED) /* cancellation is not error */
+ srpc_data.rpc_counters.errors++;
+ errors = srpc_data.rpc_counters.errors;
spin_unlock(&srpc_data.rpc_glock);
+
+ CNETERR("LNet event status %d type %d, RPC errors %u\n",
+ ev->status, ev->type, errors);
}
rpcev->ev_lnet = ev->type;
}
}
-#ifndef __KERNEL__
-
-int
-srpc_check_event (int timeout)
-{
- lnet_event_t ev;
- int rc;
- int i;
-
- rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
- timeout * 1000, &ev, &i);
- if (rc == 0) return 0;
-
- LASSERT (rc == -EOVERFLOW || rc == 1);
-
- /* We can't affort to miss any events... */
- if (rc == -EOVERFLOW) {
- CERROR ("Dropped an event!!!\n");
- abort();
- }
-
- srpc_lnet_ev_handler(&ev);
- return 1;
-}
-
-#endif
int
srpc_startup (void)
memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
spin_lock_init(&srpc_data.rpc_glock);
- /* 1 second pause to avoid timestamp reuse */
- cfs_pause(cfs_time_seconds(1));
- srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
+ /* 1 second pause to avoid timestamp reuse */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
-#ifdef __KERNEL__
rc = LNetNIInit(LNET_PID_LUSTRE);
-#else
- if (the_lnet.ln_server_mode_flag)
- rc = LNetNIInit(LNET_PID_LUSTRE);
- else
- rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
-#endif
if (rc < 0) {
CERROR ("LNetNIInit() has failed: %d\n", rc);
return rc;
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
-#ifdef __KERNEL__
rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
-#else
- rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
-#endif
if (rc != 0) {
CERROR("LNetEQAlloc() has failed: %d\n", rc);
goto bail;