/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
- * Author: Isaac Huang <isaac@clusterfs.com>
+ * GPL HEADER START
*
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ * copy of GPLv2].
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/selftest.h
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
*/
#ifndef __SELFTEST_SELFTEST_H__
#define __SELFTEST_SELFTEST_H__
#define LNET_ONLY
-#include <libcfs/kp30.h>
+#ifndef __KERNEL__
+
+/* XXX workaround XXX */
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#endif
#include <libcfs/libcfs.h>
#include <lnet/lnet.h>
+#include <lnet/lib-lnet.h>
#include <lnet/lib-types.h>
#include <lnet/lnetst.h>
-#ifndef __KERNEL__
-#include <liblustre.h> /* userland spinlock_t and atomic_t */
-#endif
-
#include "rpc.h"
#include "timer.h"
* serialized with respect to itself.
* - no CPU affinity, a workitem does not necessarily run on the same CPU
* that schedules it. However, this might change in the future.
- * - if a workitem is scheduled again before it has a chance to run, it
+ * - if a workitem is scheduled again before it has a chance to run, it
* runs only once.
- * - if a workitem is scheduled while it runs, it runs again after it
- * completes; this ensures that events occurring while other events are
- * being processed receive due attention. This behavior also allows a
+ * - if a workitem is scheduled while it runs, it runs again after it
+ * completes; this ensures that events occurring while other events are
+ * being processed receive due attention. This behavior also allows a
* workitem to reschedule itself.
*
* Usage notes:
}
typedef enum {
- SRPC_BULK_REQ_RCVD = 0, /* passive bulk request(PUT sink/GET source) received */
- SRPC_BULK_PUT_SENT = 1, /* active bulk PUT sent (source) */
- SRPC_BULK_GET_RPLD = 2, /* active bulk GET replied (sink) */
- SRPC_REPLY_RCVD = 3, /* incoming reply received */
- SRPC_REPLY_SENT = 4, /* outgoing reply sent */
- SRPC_REQUEST_RCVD = 5, /* incoming request received */
- SRPC_REQUEST_SENT = 6, /* outgoing request sent */
+ SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) received */
+ SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
+ SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
+ SRPC_REPLY_RCVD = 4, /* incoming reply received */
+ SRPC_REPLY_SENT = 5, /* outgoing reply sent */
+ SRPC_REQUEST_RCVD = 6, /* incoming request received */
+ SRPC_REQUEST_SENT = 7, /* outgoing request sent */
} srpc_event_type_t;
/* RPC event */
#endif
} srpc_bulk_t; /* bulk descriptor */
-typedef struct srpc_peer {
- struct list_head stp_list; /* chain on peer hash */
- struct list_head stp_rpcq; /* q of non-control RPCs */
- struct list_head stp_ctl_rpcq; /* q of control RPCs */
- spinlock_t stp_lock; /* serialize */
- lnet_nid_t stp_nid;
- int stp_credits; /* available credits */
-} srpc_peer_t;
-
/* message buffer descriptor */
typedef struct {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
srpc_buffer_t *srpc_reqstbuf;
srpc_bulk_t *srpc_bulk;
+ unsigned int srpc_aborted; /* being given up */
int srpc_status;
void (*srpc_done)(struct srpc_server_rpc *);
} srpc_server_rpc_t;
/* client-side state of a RPC */
typedef struct srpc_client_rpc {
struct list_head crpc_list; /* chain on user's lists */
- struct list_head crpc_privl; /* chain on srpc_peer_t::*rpcq */
spinlock_t crpc_lock; /* serialize */
int crpc_service;
atomic_t crpc_refcount;
stt_timer_t crpc_timer;
swi_workitem_t crpc_wi;
lnet_process_id_t crpc_dest;
- srpc_peer_t *crpc_peer;
void (*crpc_done)(struct srpc_client_rpc *);
void (*crpc_fini)(struct srpc_client_rpc *);
int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
} srpc_service_t;
-#define SFW_POST_BUFFERS 8
+#define SFW_POST_BUFFERS 256
#define SFW_SERVICE_CONCURRENCY (SFW_POST_BUFFERS/2)
typedef struct {
stt_timer_t sn_timer;
struct list_head sn_batches; /* list of batches */
char sn_name[LST_NAME_SIZE];
+ atomic_t sn_refcount;
atomic_t sn_brw_errors;
+ atomic_t sn_ping_errors;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
typedef struct {
int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
- int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
+ int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
srpc_client_rpc_t **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
sfw_test_client_ops_t *tsi_ops; /* test client operations */
/* public parameter for all test units */
- int tsi_is_client:1; /* is test client */
- int tsi_stop_onerr:1; /* stop on error */
- int tsi_concur; /* concurrency */
- int tsi_loop; /* loop count */
+ int tsi_is_client:1; /* is test client */
+ int tsi_stoptsu_onerr:1; /* stop tsu on error */
+ int tsi_concur; /* concurrency */
+ int tsi_loop; /* loop count */
/* status of test instance */
spinlock_t tsi_lock; /* serialize */
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
+/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
* the end of pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_t))
struct list_head tsu_list; /* chain on lst_test_instance */
lnet_process_id_t tsu_dest; /* id of dest node */
int tsu_loop; /* loop count of the test */
- int tsu_error; /* error code */
sfw_test_instance_t *tsu_instance; /* pointer to test instance */
void *tsu_private; /* private data */
swi_workitem_t tsu_worker; /* workitem of the test unit */
void sfw_free_pages(srpc_server_rpc_t *rpc);
void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink);
+int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
srpc_client_rpc_t *
-srpc_create_client_rpc(lnet_process_id_t peer, int service,
+srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
void (*rpc_done)(srpc_client_rpc_t *),
void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
int srpc_add_service(srpc_service_t *sv);
int srpc_remove_service(srpc_service_t *sv);
void srpc_shutdown_service(srpc_service_t *sv);
+void srpc_abort_service(srpc_service_t *sv);
int srpc_finish_service(srpc_service_t *sv);
int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
{
LASSERT (rpc != NULL);
LASSERT (!srpc_event_pending(rpc));
- LASSERT (list_empty(&rpc->crpc_privl));
LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
#ifndef __KERNEL__
LASSERT (rpc->crpc_bulk.bk_pages == NULL);
crpc_bulk.bk_iovs[nbulkiov]));
CFS_INIT_LIST_HEAD(&rpc->crpc_list);
- CFS_INIT_LIST_HEAD(&rpc->crpc_privl);
swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
spin_lock_init(&rpc->crpc_lock);
atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
rpc->crpc_bulk.bk_niov = nbulkiov;
rpc->crpc_done = rpc_done;
rpc->crpc_fini = rpc_fini;
- rpc->crpc_reqstmdh =
- rpc->crpc_replymdh =
- rpc->crpc_bulk.bk_mdh = LNET_INVALID_HANDLE;
+ LNetInvalidateHandle(&rpc->crpc_reqstmdh);
+ LNetInvalidateHandle(&rpc->crpc_replymdh);
+ LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
/* no event is expected at this point */
rpc->crpc_bulkev.ev_fired =
return;
}
-static inline const char *
+static inline const char *
swi_state2str (int state)
{
#define STATE2STR(x) case x: return #x
switch(state) {
- default:
+ default:
LBUG();
STATE2STR(SWI_STATE_NEWBORN);
STATE2STR(SWI_STATE_REPLY_SUBMITTED);
#ifndef __KERNEL__
int stt_poll_interval(void);
+int sfw_session_removed(void);
int stt_check_events(void);
int swi_check_events(void);
#endif
-#define lst_wait_until(cond, lock, fmt, a...) \
+#define lst_wait_until(cond, lock, fmt, ...) \
do { \
int __I = 2; \
while (!(cond)) { \
- __I++; \
- CDEBUG(((__I & (-__I)) == __I) ? D_WARNING : \
- D_NET, /* 2**n? */ \
- fmt, ## a); \
+ CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
+ fmt, ## __VA_ARGS__); \
spin_unlock(&(lock)); \
\
selftest_wait_events(); \