X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fselftest%2Fselftest.h;h=5dc9481586c2e21a113e57f8eed812a644b27d02;hb=b4efa1b2cbfd45f85439e1bb0a4c4eb719540dcd;hp=63179ff1cd30233e9d71841de626f9fbc4c2ec04;hpb=e38aad0021f4ccd43f5a6659097c45eecb1ecf35;p=fs%2Flustre-release.git diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index 63179ff..5dc9481 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -1,25 +1,62 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2001, 2002 Cluster File Systems, Inc. - * Author: Isaac Huang + * GPL HEADER START * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * copy of GPLv2]. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/selftest/selftest.h + * + * Author: Isaac Huang */ #ifndef __SELFTEST_SELFTEST_H__ #define __SELFTEST_SELFTEST_H__ #define LNET_ONLY -#include +#ifndef __KERNEL__ + +/* XXX workaround XXX */ +#ifdef HAVE_SYS_TYPES_H +#include +#endif + +#endif #include #include +#include #include #include -#ifndef __KERNEL__ -#include /* userland spinlock_t and atomic_t */ -#endif - #include "rpc.h" #include "timer.h" @@ -50,11 +87,11 @@ struct sfw_test_instance; * serialized with respect to itself. * - no CPU affinity, a workitem does not necessarily run on the same CPU * that schedules it. However, this might change in the future. - * - if a workitem is scheduled again before it has a chance to run, it + * - if a workitem is scheduled again before it has a chance to run, it * runs only once. - * - if a workitem is scheduled while it runs, it runs again after it - * completes; this ensures that events occurring while other events are - * being processed receive due attention. This behavior also allows a + * - if a workitem is scheduled while it runs, it runs again after it + * completes; this ensures that events occurring while other events are + * being processed receive due attention. This behavior also allows a * workitem to reschedule itself. * * Usage notes: @@ -154,13 +191,13 @@ srpc_service2reply (int service) } typedef enum { - SRPC_BULK_REQ_RCVD = 0, /* passive bulk request(PUT sink/GET source) received */ - SRPC_BULK_PUT_SENT = 1, /* active bulk PUT sent (source) */ - SRPC_BULK_GET_RPLD = 2, /* active bulk GET replied (sink) */ - SRPC_REPLY_RCVD = 3, /* incoming reply received */ - SRPC_REPLY_SENT = 4, /* outgoing reply sent */ - SRPC_REQUEST_RCVD = 5, /* incoming request received */ - SRPC_REQUEST_SENT = 6, /* outgoing request sent */ + SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) received */ + SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ + SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */ + SRPC_REPLY_RCVD = 4, /* incoming reply received */ + SRPC_REPLY_SENT = 5, /* outgoing reply sent */ + SRPC_REQUEST_RCVD = 6, /* incoming request received */ + SRPC_REQUEST_SENT = 7, /* outgoing request sent */ } srpc_event_type_t; /* RPC event */ @@ -185,15 +222,6 @@ typedef struct { #endif } srpc_bulk_t; /* bulk descriptor */ -typedef struct srpc_peer { - struct list_head stp_list; /* chain on peer hash */ - struct list_head stp_rpcq; /* q of non-control RPCs */ - struct list_head stp_ctl_rpcq; /* q of control RPCs */ - spinlock_t stp_lock; /* serialize */ - lnet_nid_t stp_nid; - int stp_credits; /* available credits */ -} srpc_peer_t; - /* message buffer descriptor */ typedef struct { struct list_head buf_list; /* chain on srpc_service::*_msgq */ @@ -216,6 +244,7 @@ typedef struct srpc_server_rpc { srpc_buffer_t *srpc_reqstbuf; srpc_bulk_t *srpc_bulk; + unsigned int srpc_aborted; /* being given up */ int srpc_status; void (*srpc_done)(struct srpc_server_rpc *); } srpc_server_rpc_t; @@ -223,7 +252,6 @@ typedef struct srpc_server_rpc { /* client-side state of a RPC */ typedef struct srpc_client_rpc { struct list_head crpc_list; /* chain on user's lists */ - struct list_head crpc_privl; /* chain on srpc_peer_t::*rpcq */ spinlock_t crpc_lock; /* serialize */ int crpc_service; atomic_t crpc_refcount; @@ -231,7 +259,6 @@ typedef struct srpc_client_rpc { stt_timer_t crpc_timer; swi_workitem_t crpc_wi; lnet_process_id_t crpc_dest; - srpc_peer_t *crpc_peer; void (*crpc_done)(struct srpc_client_rpc *); void (*crpc_fini)(struct srpc_client_rpc *); @@ -304,7 +331,7 @@ typedef struct srpc_service { int (*sv_bulk_ready) (srpc_server_rpc_t *, int); } srpc_service_t; -#define SFW_POST_BUFFERS 8 +#define SFW_POST_BUFFERS 256 #define SFW_SERVICE_CONCURRENCY (SFW_POST_BUFFERS/2) typedef struct { @@ -315,6 +342,7 @@ typedef struct { stt_timer_t sn_timer; struct list_head sn_batches; /* list of batches */ char sn_name[LST_NAME_SIZE]; + atomic_t sn_refcount; atomic_t sn_brw_errors; atomic_t sn_ping_errors; } sfw_session_t; @@ -334,7 +362,7 @@ typedef struct { typedef struct { int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */ void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */ - int (*tso_prep_rpc)(struct sfw_test_unit *tsu, + int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, srpc_client_rpc_t **rpc); /* prep a tests rpc */ void (*tso_done_rpc)(struct sfw_test_unit *tsu, @@ -367,7 +395,7 @@ typedef struct sfw_test_instance { } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at +/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at * the end of pages are not used */ #define SFW_MAX_CONCUR LST_MAX_CONCUR #define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_t)) @@ -402,9 +430,10 @@ void sfw_unpack_message(srpc_msg_t *msg); void sfw_free_pages(srpc_server_rpc_t *rpc); void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i); int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink); +int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); srpc_client_rpc_t * -srpc_create_client_rpc(lnet_process_id_t peer, int service, +srpc_create_client_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen, void (*rpc_done)(srpc_client_rpc_t *), void (*rpc_fini)(srpc_client_rpc_t *), void *priv); @@ -417,6 +446,7 @@ int srpc_send_reply(srpc_server_rpc_t *rpc); int srpc_add_service(srpc_service_t *sv); int srpc_remove_service(srpc_service_t *sv); void srpc_shutdown_service(srpc_service_t *sv); +void srpc_abort_service(srpc_service_t *sv); int srpc_finish_service(srpc_service_t *sv); int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer); void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer); @@ -438,7 +468,6 @@ srpc_destroy_client_rpc (srpc_client_rpc_t *rpc) { LASSERT (rpc != NULL); LASSERT (!srpc_event_pending(rpc)); - LASSERT (list_empty(&rpc->crpc_privl)); LASSERT (atomic_read(&rpc->crpc_refcount) == 0); #ifndef __KERNEL__ LASSERT (rpc->crpc_bulk.bk_pages == NULL); @@ -465,7 +494,6 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, crpc_bulk.bk_iovs[nbulkiov])); CFS_INIT_LIST_HEAD(&rpc->crpc_list); - CFS_INIT_LIST_HEAD(&rpc->crpc_privl); swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc); spin_lock_init(&rpc->crpc_lock); atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ @@ -477,9 +505,9 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, rpc->crpc_bulk.bk_niov = nbulkiov; rpc->crpc_done = rpc_done; rpc->crpc_fini = rpc_fini; - rpc->crpc_reqstmdh = - rpc->crpc_replymdh = - rpc->crpc_bulk.bk_mdh = LNET_INVALID_HANDLE; + LNetInvalidateHandle(&rpc->crpc_reqstmdh); + LNetInvalidateHandle(&rpc->crpc_replymdh); + LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh); /* no event is expected at this point */ rpc->crpc_bulkev.ev_fired = @@ -492,12 +520,12 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, return; } -static inline const char * +static inline const char * swi_state2str (int state) { #define STATE2STR(x) case x: return #x switch(state) { - default: + default: LBUG(); STATE2STR(SWI_STATE_NEWBORN); STATE2STR(SWI_STATE_REPLY_SUBMITTED); @@ -532,14 +560,12 @@ int selftest_wait_events(void); #endif -#define lst_wait_until(cond, lock, fmt, a...) \ +#define lst_wait_until(cond, lock, fmt, ...) \ do { \ int __I = 2; \ while (!(cond)) { \ - __I++; \ - CDEBUG(((__I & (-__I)) == __I) ? D_WARNING : \ - D_NET, /* 2**n? */ \ - fmt, ## a); \ + CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \ + fmt, ## __VA_ARGS__); \ spin_unlock(&(lock)); \ \ selftest_wait_events(); \