X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fselftest%2Frpc.c;h=8d817929a78485ac09471d9525b6a97f7ce6c883;hb=f21b415d15eda21aa7f489242b0eb844b968be7a;hp=91c7009f262eb9ee06d2d5af52fef933470f180e;hpb=bf5db236dd09f4e671f64123a7b54c115b626853;p=fs%2Flustre-release.git diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index 91c7009..8d81792 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -54,7 +54,7 @@ typedef enum { SRPC_STATE_STOPPING, } srpc_state_t; -struct smoketest_rpc { +static struct smoketest_rpc { spinlock_t rpc_glock; /* global lock */ srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1]; lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */ @@ -87,7 +87,7 @@ void srpc_set_counters (const srpc_counters_t *cnt) spin_unlock(&srpc_data.rpc_glock); } -int +static int srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { nob = min(nob, (int)PAGE_CACHE_SIZE); @@ -95,17 +95,9 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); -#ifdef __KERNEL__ bk->bk_iovs[i].kiov_offset = 0; bk->bk_iovs[i].kiov_page = pg; bk->bk_iovs[i].kiov_len = nob; -#else - LASSERT(bk->bk_pages != NULL); - - bk->bk_pages[i] = pg; - bk->bk_iovs[i].iov_len = nob; - bk->bk_iovs[i].iov_base = page_address(pg); -#endif return nob; } @@ -116,24 +108,14 @@ srpc_free_bulk (srpc_bulk_t *bk) struct page *pg; LASSERT (bk != NULL); -#ifndef __KERNEL__ - LASSERT (bk->bk_pages != NULL); -#endif for (i = 0; i < bk->bk_niov; i++) { -#ifdef __KERNEL__ pg = bk->bk_iovs[i].kiov_page; -#else - pg = bk->bk_pages[i]; -#endif if (pg == NULL) break; __free_page(pg); } -#ifndef __KERNEL__ - LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov); -#endif LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); return; } @@ -157,24 +139,6 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) bk->bk_sink = sink; bk->bk_len = bulk_len; bk->bk_niov = bulk_npg; -#ifndef __KERNEL__ - { - struct page **pages; - - LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt, - sizeof(struct page *) * bulk_npg); - if (pages == NULL) { - LIBCFS_FREE(bk, offsetof(srpc_bulk_t, - bk_iovs[bulk_npg])); - CERROR("Can't allocate page array for %d pages\n", - bulk_npg); - return NULL; - } - - memset(pages, 0, sizeof(struct page *) * bulk_npg); - bk->bk_pages = pages; - } -#endif for (i = 0; i < bulk_npg; i++) { struct page *pg; @@ -205,7 +169,7 @@ srpc_next_id (void) return id; } -void +static void srpc_init_server_rpc(struct srpc_server_rpc *rpc, struct srpc_service_cd *scd, struct srpc_buffer *buffer) @@ -386,7 +350,7 @@ srpc_remove_service (srpc_service_t *sv) return 0; } -int +static int srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, lnet_handle_md_t *mdh, srpc_event_t *ev) @@ -426,7 +390,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, return 0; } -int +static int srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, lnet_nid_t self, lnet_handle_md_t *mdh, srpc_event_t *ev) @@ -478,16 +442,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, return 0; } -int -srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf, - int len, lnet_handle_md_t *mdh, srpc_event_t *ev) -{ - return srpc_post_active_rdma(srpc_serv_portal(service), service, - buf, len, LNET_MD_OP_PUT, peer, - LNET_NID_ANY, mdh, ev); -} - -int +static int srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, lnet_handle_md_t *mdh, srpc_event_t *ev) { @@ -501,7 +456,7 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, LNET_MD_OP_PUT, any, mdh, ev); } -int +static int srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) __must_hold(&scd->scd_lock) { @@ -733,7 +688,7 @@ srpc_finish_service(struct srpc_service *sv) } /* called with sv->sv_lock held */ -void +static void srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) __must_hold(&scd->scd_lock) { @@ -824,7 +779,7 @@ srpc_shutdown_service(srpc_service_t *sv) } } -int +static int srpc_send_request (srpc_client_rpc_t *rpc) { srpc_event_t *ev = &rpc->crpc_reqstev; @@ -834,9 +789,11 @@ srpc_send_request (srpc_client_rpc_t *rpc) ev->ev_data = rpc; ev->ev_type = SRPC_REQUEST_SENT; - rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service, - &rpc->crpc_reqstmsg, sizeof(srpc_msg_t), - &rpc->crpc_reqstmdh, ev); + rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), + rpc->crpc_service, &rpc->crpc_reqstmsg, + sizeof(srpc_msg_t), LNET_MD_OP_PUT, + rpc->crpc_dest, LNET_NID_ANY, + &rpc->crpc_reqstmdh, ev); if (rc != 0) { LASSERT (rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ @@ -844,7 +801,7 @@ srpc_send_request (srpc_client_rpc_t *rpc) return rc; } -int +static int srpc_prepare_reply (srpc_client_rpc_t *rpc) { srpc_event_t *ev = &rpc->crpc_replyev; @@ -868,7 +825,7 @@ srpc_prepare_reply (srpc_client_rpc_t *rpc) return rc; } -int +static int srpc_prepare_bulk (srpc_client_rpc_t *rpc) { srpc_bulk_t *bk = &rpc->crpc_bulk; @@ -882,11 +839,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc) if (bk->bk_niov == 0) return 0; /* nothing to do */ opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET; -#ifdef __KERNEL__ opt |= LNET_MD_KIOV; -#else - opt |= LNET_MD_IOVEC; -#endif ev->ev_fired = 0; ev->ev_data = rpc; @@ -904,7 +857,7 @@ srpc_prepare_bulk (srpc_client_rpc_t *rpc) return rc; } -int +static int srpc_do_bulk (srpc_server_rpc_t *rpc) { srpc_event_t *ev = &rpc->srpc_ev; @@ -916,11 +869,7 @@ srpc_do_bulk (srpc_server_rpc_t *rpc) LASSERT (bk != NULL); opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT; -#ifdef __KERNEL__ opt |= LNET_MD_KIOV; -#else - opt |= LNET_MD_IOVEC; -#endif ev->ev_fired = 0; ev->ev_data = rpc; @@ -936,7 +885,7 @@ srpc_do_bulk (srpc_server_rpc_t *rpc) } /* only called from srpc_handle_rpc */ -void +static void srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status) { struct srpc_service_cd *scd = rpc->srpc_scd; @@ -1111,7 +1060,7 @@ srpc_handle_rpc(swi_workitem_t *wi) return 0; } -void +static void srpc_client_rpc_expired (void *data) { srpc_client_rpc_t *rpc = data; @@ -1132,7 +1081,7 @@ srpc_client_rpc_expired (void *data) spin_unlock(&srpc_data.rpc_glock); } -inline void +static void srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) { stt_timer_t *timer = &rpc->crpc_timer; @@ -1154,18 +1103,17 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) * * Upon exit the RPC expiry timer is not queued and the handler is not * running on any CPU. */ -void +static void srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc) { /* timer not planted or already exploded */ if (rpc->crpc_timeout == 0) return; - /* timer sucessfully defused */ + /* timer successfully defused */ if (stt_del_timer(&rpc->crpc_timer)) return; -#ifdef __KERNEL__ /* timer detonated, wait for it to explode */ while (rpc->crpc_timeout != 0) { spin_unlock(&rpc->crpc_lock); @@ -1174,12 +1122,9 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc) spin_lock(&rpc->crpc_lock); } -#else - LBUG(); /* impossible in single-threaded runtime */ -#endif } -void +static void srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status) { swi_workitem_t *wi = &rpc->crpc_wi; @@ -1443,7 +1388,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc) } /* when in kernel always called with LNET_LOCK() held, and in thread context */ -void +static void srpc_lnet_ev_handler(lnet_event_t *ev) { struct srpc_service_cd *scd; @@ -1458,9 +1403,16 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT (!in_interrupt()); if (ev->status != 0) { + __u32 errors; + spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.errors++; + if (ev->status != -ECANCELED) /* cancellation is not error */ + srpc_data.rpc_counters.errors++; + errors = srpc_data.rpc_counters.errors; spin_unlock(&srpc_data.rpc_glock); + + CNETERR("LNet event status %d type %d, RPC errors %u\n", + ev->status, ev->type, errors); } rpcev->ev_lnet = ev->type; @@ -1625,32 +1577,6 @@ srpc_lnet_ev_handler(lnet_event_t *ev) } } -#ifndef __KERNEL__ - -int -srpc_check_event (int timeout) -{ - lnet_event_t ev; - int rc; - int i; - - rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1, - timeout * 1000, &ev, &i); - if (rc == 0) return 0; - - LASSERT (rc == -EOVERFLOW || rc == 1); - - /* We can't affort to miss any events... */ - if (rc == -EOVERFLOW) { - CERROR ("Dropped an event!!!\n"); - abort(); - } - - srpc_lnet_ev_handler(&ev); - return 1; -} - -#endif int srpc_startup (void) @@ -1660,20 +1586,14 @@ srpc_startup (void) memset(&srpc_data, 0, sizeof(struct smoketest_rpc)); spin_lock_init(&srpc_data.rpc_glock); - /* 1 second pause to avoid timestamp reuse */ - cfs_pause(cfs_time_seconds(1)); - srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48; + /* 1 second pause to avoid timestamp reuse */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48; srpc_data.rpc_state = SRPC_STATE_NONE; -#ifdef __KERNEL__ rc = LNetNIInit(LNET_PID_LUSTRE); -#else - if (the_lnet.ln_server_mode_flag) - rc = LNetNIInit(LNET_PID_LUSTRE); - else - rc = LNetNIInit(getpid() | LNET_PID_USERFLAG); -#endif if (rc < 0) { CERROR ("LNetNIInit() has failed: %d\n", rc); return rc; @@ -1682,11 +1602,7 @@ srpc_startup (void) srpc_data.rpc_state = SRPC_STATE_NI_INIT; LNetInvalidateHandle(&srpc_data.rpc_lnet_eq); -#ifdef __KERNEL__ rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq); -#else - rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq); -#endif if (rc != 0) { CERROR("LNetEQAlloc() has failed: %d\n", rc); goto bail;