X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fselftest%2Fselftest.h;h=ab5ea8f2204ce4b680d189a6ee4f3ff8fbe02034;hb=ef1d121c2e68e368dae72a5994d3d1fd3b35c2b3;hp=786501dce5016e5d99d8f1c6c03d4aa1cf543766;hpb=c03783fce46ae0b40db0680388df6e2d6fca5008;p=fs%2Flustre-release.git diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index 786501d..ab5ea8f 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -27,6 +27,8 @@ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -168,7 +170,7 @@ typedef struct { #ifdef __KERNEL__ lnet_kiov_t bk_iovs[0]; #else - cfs_page_t **bk_pages; + struct page **bk_pages; lnet_md_iovec_t bk_iovs[0]; #endif } srpc_bulk_t; /* bulk descriptor */ @@ -213,10 +215,10 @@ typedef struct srpc_server_rpc { /* client-side state of a RPC */ typedef struct srpc_client_rpc { - cfs_list_t crpc_list; /* chain on user's lists */ - cfs_spinlock_t crpc_lock; /* serialize */ + cfs_list_t crpc_list; /* chain on user's lists */ + spinlock_t crpc_lock; /* serialize */ int crpc_service; - cfs_atomic_t crpc_refcount; + atomic_t crpc_refcount; int crpc_timeout; /* # seconds to wait for reply */ stt_timer_t crpc_timer; swi_workitem_t crpc_wi; @@ -251,18 +253,18 @@ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) do { \ CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ (rpc), libcfs_id2str((rpc)->crpc_dest), \ - cfs_atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \ - cfs_atomic_inc(&(rpc)->crpc_refcount); \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + atomic_inc(&(rpc)->crpc_refcount); \ } while (0) #define srpc_client_rpc_decref(rpc) \ do { \ CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ (rpc), libcfs_id2str((rpc)->crpc_dest), \ - cfs_atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(cfs_atomic_read(&(rpc)->crpc_refcount) > 0); \ - if (cfs_atomic_dec_and_test(&(rpc)->crpc_refcount)) \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ srpc_destroy_client_rpc(rpc); \ } while (0) @@ -273,7 +275,7 @@ do { \ /* CPU partition data of srpc service */ struct srpc_service_cd { /** serialize */ - cfs_spinlock_t scd_lock; + spinlock_t scd_lock; /** backref to service */ struct srpc_service *scd_svc; /** event buffer */ @@ -338,12 +340,13 @@ typedef struct { lst_sid_t sn_id; /* unique identifier */ unsigned int sn_timeout; /* # seconds' inactivity to expire */ int sn_timer_active; + unsigned int sn_features; stt_timer_t sn_timer; cfs_list_t sn_batches; /* list of batches */ char sn_name[LST_NAME_SIZE]; - cfs_atomic_t sn_refcount; - cfs_atomic_t sn_brw_errors; - cfs_atomic_t sn_ping_errors; + atomic_t sn_refcount; + atomic_t sn_brw_errors; + atomic_t sn_ping_errors; cfs_time_t sn_started; } sfw_session_t; @@ -355,7 +358,7 @@ typedef struct { lst_bid_t bat_id; /* batch id */ int bat_error; /* error code of batch */ sfw_session_t *bat_session; /* batch's session */ - cfs_atomic_t bat_nactive; /* # of active tests */ + atomic_t bat_nactive; /* # of active tests */ cfs_list_t bat_tests; /* test instances */ } sfw_batch_t; @@ -375,30 +378,31 @@ typedef struct sfw_test_instance { sfw_batch_t *tsi_batch; /* batch */ sfw_test_client_ops_t *tsi_ops; /* test client operations */ - /* public parameter for all test units */ - int tsi_is_client:1; /* is test client */ - int tsi_stoptsu_onerr:1; /* stop tsu on error */ + /* public parameter for all test units */ + unsigned int tsi_is_client:1; /* is test client */ + unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ int tsi_concur; /* concurrency */ int tsi_loop; /* loop count */ - /* status of test instance */ - cfs_spinlock_t tsi_lock; /* serialize */ - int tsi_stopping:1; /* test is stopping */ - cfs_atomic_t tsi_nactive; /* # of active test unit */ - cfs_list_t tsi_units; /* test units */ - cfs_list_t tsi_free_rpcs; /* free rpcs */ - cfs_list_t tsi_active_rpcs; /* active rpcs */ - - union { - test_bulk_req_t bulk; /* bulk parameter */ - test_ping_req_t ping; /* ping parameter */ - } tsi_u; + /* status of test instance */ + spinlock_t tsi_lock; /* serialize */ + unsigned int tsi_stopping:1; /* test is stopping */ + atomic_t tsi_nactive; /* # of active test unit */ + cfs_list_t tsi_units; /* test units */ + cfs_list_t tsi_free_rpcs; /* free rpcs */ + cfs_list_t tsi_active_rpcs; /* active rpcs */ + + union { + test_ping_req_t ping; /* ping parameter */ + test_bulk_req_t bulk_v0; /* bulk parameter */ + test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ + } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at +/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at * the end of pages are not used */ #define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_packed_t)) +#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) @@ -418,17 +422,20 @@ typedef struct sfw_test_case { } sfw_test_case_t; srpc_client_rpc_t * -sfw_create_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen, - void (*done) (srpc_client_rpc_t *), void *priv); -int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, - int nblk, int blklen, srpc_client_rpc_t **rpc); +sfw_create_rpc(lnet_process_id_t peer, int service, + unsigned features, int nbulkiov, int bulklen, + void (*done) (srpc_client_rpc_t *), void *priv); +int sfw_create_test_rpc(sfw_test_unit_t *tsu, + lnet_process_id_t peer, unsigned features, + int nblk, int blklen, srpc_client_rpc_t **rpc); void sfw_abort_rpc(srpc_client_rpc_t *rpc); void sfw_post_rpc(srpc_client_rpc_t *rpc); void sfw_client_rpc_done(srpc_client_rpc_t *rpc); void sfw_unpack_message(srpc_msg_t *msg); void sfw_free_pages(srpc_server_rpc_t *rpc); -void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i); -int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int sink); +void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); +int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len, + int sink); int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); srpc_client_rpc_t * @@ -439,7 +446,8 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service, void srpc_post_rpc(srpc_client_rpc_t *rpc); void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why); void srpc_free_bulk(srpc_bulk_t *bk); -srpc_bulk_t *srpc_alloc_bulk(int cpt, int npages, int sink); +srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, + int sink); int srpc_send_rpc(swi_workitem_t *wi); int srpc_send_reply(srpc_server_rpc_t *rpc); int srpc_add_service(srpc_service_t *sv); @@ -513,20 +521,20 @@ void srpc_shutdown(void); static inline void srpc_destroy_client_rpc (srpc_client_rpc_t *rpc) { - LASSERT (rpc != NULL); - LASSERT (!srpc_event_pending(rpc)); - LASSERT (cfs_atomic_read(&rpc->crpc_refcount) == 0); + LASSERT (rpc != NULL); + LASSERT (!srpc_event_pending(rpc)); + LASSERT (atomic_read(&rpc->crpc_refcount) == 0); #ifndef __KERNEL__ - LASSERT (rpc->crpc_bulk.bk_pages == NULL); + LASSERT (rpc->crpc_bulk.bk_pages == NULL); #endif - if (rpc->crpc_fini == NULL) { - LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); - } else { - (*rpc->crpc_fini) (rpc); - } + if (rpc->crpc_fini == NULL) { + LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); + } else { + (*rpc->crpc_fini) (rpc); + } - return; + return; } static inline void @@ -543,11 +551,11 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, CFS_INIT_LIST_HEAD(&rpc->crpc_list); swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc, lst_sched_test[lnet_cpt_of_nid(peer.nid)]); - cfs_spin_lock_init(&rpc->crpc_lock); - cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ + spin_lock_init(&rpc->crpc_lock); + atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ - rpc->crpc_dest = peer; - rpc->crpc_priv = priv; + rpc->crpc_dest = peer; + rpc->crpc_priv = priv; rpc->crpc_service = service; rpc->crpc_bulk.bk_len = bulklen; rpc->crpc_bulk.bk_niov = nbulkiov; @@ -587,8 +595,6 @@ swi_state2str (int state) #undef STATE2STR } -#define UNUSED(x) ( (void)(x) ) - #ifndef __KERNEL__ int stt_poll_interval(void); @@ -607,18 +613,18 @@ int selftest_wait_events(void); #endif -#define lst_wait_until(cond, lock, fmt, ...) \ -do { \ - int __I = 2; \ - while (!(cond)) { \ - CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \ - fmt, ## __VA_ARGS__); \ - cfs_spin_unlock(&(lock)); \ - \ - selftest_wait_events(); \ - \ - cfs_spin_lock(&(lock)); \ - } \ +#define lst_wait_until(cond, lock, fmt, ...) \ +do { \ + int __I = 2; \ + while (!(cond)) { \ + CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \ + fmt, ## __VA_ARGS__); \ + spin_unlock(&(lock)); \ + \ + selftest_wait_events(); \ + \ + spin_lock(&(lock)); \ + } \ } while (0) static inline void