static inline int lnet_is_wire_handle_none (lnet_handle_wire_t *wh)
{
- return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_NONE.wh_interface_cookie &&
- wh->wh_object_cookie == LNET_WIRE_HANDLE_NONE.wh_object_cookie);
+ return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
+ wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
}
static inline int lnet_md_exhausted (lnet_libmd_t *md)
{
/* NEVER called with liblock held */
lnet_libmd_t *md;
- int size;
+ unsigned int size;
unsigned int niov;
if ((umd->options & LNET_MD_KIOV) != 0) {
lnet_md_free (lnet_libmd_t *md)
{
/* ALWAYS called with liblock held */
- int size;
+ unsigned int size;
if ((md->md_options & LNET_MD_KIOV) != 0)
size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
lnet_eq2handle (lnet_handle_eq_t *handle, lnet_eq_t *eq)
{
if (eq == NULL) {
- *handle = LNET_EQ_NONE;
+ LNetInvalidateHandle(handle);
return;
}
* one epoch (i.e. new cookie when the interface restarts or the node
* reboots). The object cookie only matches one object on that interface
* during that object's lifetime (i.e. no cookie re-use). */
+#include <libcfs/libcfs_pack.h>
typedef struct {
__u64 wh_interface_cookie;
__u64 wh_object_cookie;
} WIRE_ATTR lnet_handle_wire_t;
-
-/* byte-flip insensitive! */
-#define LNET_WIRE_HANDLE_NONE \
-((const lnet_handle_wire_t) {.wh_interface_cookie = -1, .wh_object_cookie = -1})
+#include <libcfs/libcfs_unpack.h>
typedef enum {
LNET_MSG_ACK = 0,
* byte boundary in the message header. Note that all types used in these
* wire structs MUST be fixed size and the smaller types are placed at the
* end. */
+#include <libcfs/libcfs_pack.h>
typedef struct lnet_ack {
lnet_handle_wire_t dst_wmd;
__u64 match_bits;
__u32 acr_version; /* protocol version */
__u64 acr_nid; /* target NID */
} WIRE_ATTR lnet_acceptor_connreq_t;
+#include <libcfs/libcfs_unpack.h>
#define LNET_PROTO_ACCEPTOR_VERSION 1
} lnet_libhandle_t;
#define lh_entry(ptr, type, member) \
- ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+ ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
typedef struct lnet_eq {
struct list_head eq_list;
lnet_kiov_t rb_kiov[0]; /* the buffer space */
} lnet_rtrbuf_t;
+#include <libcfs/libcfs_pack.h>
typedef struct {
__u32 msgs_alloc;
__u32 msgs_max;
__u64 recv_length;
__u64 route_length;
__u64 drop_length;
-} lnet_counters_t;
+} WIRE_ATTR lnet_counters_t;
+#include <libcfs/libcfs_unpack.h>
#define LNET_PEER_HASHSIZE 503 /* prime! */
__u64 ses_stamp; /* time stamp */
} lst_sid_t; /*** session id */
-#define LST_INVALID_SID ((const lst_sid_t){.ses_nid = LNET_NID_ANY,\
- .ses_stamp = -1})
+extern lst_sid_t LST_INVALID_SID;
typedef struct {
__u64 bat_id; /* unique id in session */
} lst_test_ping_param_t;
/* more tests */
-
+#include <libcfs/libcfs_pack.h>
typedef struct {
__u32 errors;
__u32 rpcs_sent;
__u32 rpcs_expired;
__u64 bulk_get;
__u64 bulk_put;
-} srpc_counters_t;
+} WIRE_ATTR srpc_counters_t;
typedef struct {
__u32 active_tests;
__u32 zombie_sessions;
__u32 brw_errors;
__u32 ping_errors;
-} sfw_counters_t;
+} WIRE_ATTR sfw_counters_t;
+#include <libcfs/libcfs_unpack.h>
#endif
#define SOCKLND_CONN_BULK_OUT 3
#define SOCKLND_CONN_NTYPES 4
+#include <libcfs/libcfs_pack.h>
typedef struct {
__u32 kshm_magic; /* magic number of socklnd message */
__u32 kshm_version; /* version of socklnd message */
typedef struct {
lnet_hdr_t ksnm_hdr; /* lnet hdr */
- char ksnm_payload[0];/* lnet payload */
+
+ /*
+ * ksnm_payload is removed because of winnt compiler's limitation:
+ * zero-sized array can only be placed at the tail of [nested]
+ * structure definitions. lnet payload will be stored just after
+ * the body of structure ksock_lnet_msg_t
+ */
} WIRE_ATTR ksock_lnet_msg_t;
typedef struct {
} WIRE_ATTR ksm_u;
} WIRE_ATTR ksock_msg_t;
+#include <libcfs/libcfs_unpack.h>
+
#define KSOCK_MSG_NOOP 0xc0 /* ksm_u empty */
#define KSOCK_MSG_LNET 0xc1 /* lnet msg */
typedef lnet_handle_any_t lnet_handle_md_t;
typedef lnet_handle_any_t lnet_handle_me_t;
-#define LNET_INVALID_HANDLE \
- ((const lnet_handle_any_t){.cookie = -1})
-#define LNET_EQ_NONE LNET_INVALID_HANDLE
+#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
+
+static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
+{
+ h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
+}
static inline int LNetHandleIsEqual (lnet_handle_any_t h1, lnet_handle_any_t h2)
{
return (h1.cookie == h2.cookie);
}
+static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
+{
+ return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
+}
+
typedef struct {
lnet_nid_t nid;
lnet_pid_t pid; /* node id / process id */
#error Do not #include this file directly. #include <lnet/lib-types.h> instead
#endif
-#include <libcfs/libcfs.h>
-
-typedef struct {
- spinlock_t lock;
-} lib_ni_lock_t;
-
-static inline void lib_ni_lock_init(lib_ni_lock_t *l)
-{
- spin_lock_init(&l->lock);
-}
-
-static inline void lib_ni_lock_fini(lib_ni_lock_t *l)
-{}
-
-static inline void lib_ni_lock(lib_ni_lock_t *l)
-{
- int flags;
- spin_lock_irqsave(&l->lock, flags);
-}
-
-static inline void lib_ni_unlock(lib_ni_lock_t *l)
-{
- spin_unlock_irqrestore(&l->lock, 0);
-}
-
#endif
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=4:tabstop=4:
*
* GPL HEADER START
*
#endif
#ifdef __KERNEL__
-
-#include <libcfs/libcfs.h>
-#include <lnet/lib-lnet.h>
-
-/*
- * tdilnd routines
- */
-
-
-PUCHAR
-KsNtStatusToString (IN NTSTATUS Status);
-
-
-VOID
-KsPrintf(
- IN LONG DebugPrintLevel,
- IN PCHAR DebugMessage,
- IN ...
- );
-
-
-ksock_mdl_t *
-ks_lock_iovs(
- IN struct iovec *iov,
- IN int niov,
- IN int recv,
- IN int * len
- );
-
-ksock_mdl_t *
-ks_lock_kiovs(
- IN lnet_kiov_t * kiov,
- IN int nkiov,
- IN int recv,
- IN int * len
- );
-
-int
-ks_send_mdl(
- ksock_tconn_t * tconn,
- void * tx,
- ksock_mdl_t * mdl,
- int len,
- int flags
- );
-
-int
-ks_query_data(
- ksock_tconn_t * tconn,
- size_t * size,
- int bIsExpedited);
+#include <lnet/types.h>
int
-ks_recv_mdl(
- ksock_tconn_t * tconn,
- ksock_mdl_t * mdl,
- int size,
- int flags
- );
+ks_query_iovs_length(struct iovec *iov, int niov);
int
-ks_get_tcp_option (
- ksock_tconn_t * tconn,
- ULONG ID,
- PVOID OptionValue,
- PULONG Length
- );
-
-NTSTATUS
-ks_set_tcp_option (
- ksock_tconn_t * tconn,
- ULONG ID,
- PVOID OptionValue,
- ULONG Length
- );
+ks_query_kiovs_length(lnet_kiov_t *kiov, int nkiov);
int
-ks_bind_tconn (
- ksock_tconn_t * tconn,
- ksock_tconn_t * parent,
- ulong_ptr addr,
- unsigned short port
- );
+ks_send_buf(ks_tconn_t *, char *, int, int, int);
int
-ks_build_tconn(
- ksock_tconn_t * tconn,
- ulong_ptr addr,
- unsigned short port
- );
+ks_recv_buf(ks_tconn_t *, char *, int, int, int);
int
-ks_disconnect_tconn(
- ksock_tconn_t * tconn,
- ulong_ptr flags
- );
-
-void
-ks_abort_tconn(
- ksock_tconn_t * tconn
- );
+ks_send_iovs(ks_tconn_t *, struct iovec *, int, int, int);
int
-ks_query_local_ipaddr(
- ksock_tconn_t * tconn
- );
+ks_recv_iovs(ks_tconn_t *, struct iovec *, int, int, int);
int
-ks_tconn_write (ksock_tconn_t *tconn, void *buffer, int nob);
+ks_send_kiovs(ks_tconn_t *, lnet_kiov_t *, int, int, int);
int
-ks_tconn_read (ksock_tconn_t * tconn, void *buffer, int nob);
-
-NTSTATUS
-KsTcpCompletionRoutine(
- IN PDEVICE_OBJECT DeviceObject,
- IN PIRP Irp,
- IN PVOID Context
- );
-
-NTSTATUS
-KsDisconectCompletionRoutine (
- IN PDEVICE_OBJECT DeviceObject,
- IN PIRP Irp,
- IN PVOID Context
- );
-
-NTSTATUS
-KsTcpReceiveCompletionRoutine(
- IN PIRP Irp,
- IN PKS_TCP_COMPLETION_CONTEXT Context
- );
-
-NTSTATUS
-KsTcpSendCompletionRoutine(
- IN PIRP Irp,
- IN PKS_TCP_COMPLETION_CONTEXT Context
- );
-
-NTSTATUS
-KsAcceptCompletionRoutine(
- IN PDEVICE_OBJECT DeviceObject,
- IN PIRP Irp,
- IN PVOID Context
- );
-
-
-NTSTATUS
-KsConnectEventHandler(
- IN PVOID TdiEventContext,
- IN LONG RemoteAddressLength,
- IN PVOID RemoteAddress,
- IN LONG UserDataLength,
- IN PVOID UserData,
- IN LONG OptionsLength,
- IN PVOID Options,
- OUT CONNECTION_CONTEXT * ConnectionContext,
- OUT PIRP * AcceptIrp
- );
-
-NTSTATUS
-KsDisconnectEventHandler(
- IN PVOID TdiEventContext,
- IN CONNECTION_CONTEXT ConnectionContext,
- IN LONG DisconnectDataLength,
- IN PVOID DisconnectData,
- IN LONG DisconnectInformationLength,
- IN PVOID DisconnectInformation,
- IN ULONG DisconnectFlags
- );
-
-NTSTATUS
-KsTcpReceiveEventHandler(
- IN PVOID TdiEventContext,
- IN CONNECTION_CONTEXT ConnectionContext,
- IN ULONG ReceiveFlags,
- IN ULONG BytesIndicated,
- IN ULONG BytesAvailable,
- OUT ULONG * BytesTaken,
- IN PVOID Tsdu,
- OUT PIRP * IoRequestPacket
- );
-
-NTSTATUS
-KsTcpReceiveExpeditedEventHandler(
- IN PVOID TdiEventContext,
- IN CONNECTION_CONTEXT ConnectionContext,
- IN ULONG ReceiveFlags,
- IN ULONG BytesIndicated,
- IN ULONG BytesAvailable,
- OUT ULONG * BytesTaken,
- IN PVOID Tsdu,
- OUT PIRP * IoRequestPacket
- );
-
-NTSTATUS
-KsTcpChainedReceiveEventHandler (
- IN PVOID TdiEventContext, // the event context
- IN CONNECTION_CONTEXT ConnectionContext,
- IN ULONG ReceiveFlags,
- IN ULONG ReceiveLength,
- IN ULONG StartingOffset, // offset of start of client data in TSDU
- IN PMDL Tsdu, // TSDU data chain
- IN PVOID TsduDescriptor // for call to TdiReturnChainedReceives
- );
-
-NTSTATUS
-KsTcpChainedReceiveExpeditedEventHandler (
- IN PVOID TdiEventContext, // the event context
- IN CONNECTION_CONTEXT ConnectionContext,
- IN ULONG ReceiveFlags,
- IN ULONG ReceiveLength,
- IN ULONG StartingOffset, // offset of start of client data in TSDU
- IN PMDL Tsdu, // TSDU data chain
- IN PVOID TsduDescriptor // for call to TdiReturnChainedReceives
- );
-
-
-
-VOID
-KsDisconnectHelper(PKS_DISCONNECT_WORKITEM WorkItem);
-
-
-ULONG
-ks_tdi_send_flags(ULONG SockFlags);
-
-PIRP
-KsBuildTdiIrp(
- IN PDEVICE_OBJECT DeviceObject
- );
-
-NTSTATUS
-KsSubmitTdiIrp(
- IN PDEVICE_OBJECT DeviceObject,
- IN PIRP Irp,
- IN BOOLEAN bSynchronous,
- OUT PULONG Information
- );
-
-NTSTATUS
-KsOpenControl(
- IN PUNICODE_STRING DeviceName,
- OUT HANDLE * Handle,
- OUT PFILE_OBJECT * FileObject
- );
-
-NTSTATUS
-KsCloseControl(
- IN HANDLE Handle,
- IN PFILE_OBJECT FileObject
- );
-
-NTSTATUS
-KsOpenAddress(
- IN PUNICODE_STRING DeviceName,
- IN PTRANSPORT_ADDRESS pAddress,
- IN ULONG AddressLength,
- OUT HANDLE * Handle,
- OUT PFILE_OBJECT * FileObject
- );
-
-NTSTATUS
-KsCloseAddress(
- IN HANDLE Handle,
- IN PFILE_OBJECT FileObject
- );
-
-NTSTATUS
-KsOpenConnection(
- IN PUNICODE_STRING DeviceName,
- IN CONNECTION_CONTEXT ConnectionContext,
- OUT HANDLE * Handle,
- OUT PFILE_OBJECT * FileObject
- );
-
-NTSTATUS
-KsCloseConnection(
- IN HANDLE Handle,
- IN PFILE_OBJECT FileObject
- );
-
-NTSTATUS
-KsAssociateAddress(
- IN HANDLE AddressHandle,
- IN PFILE_OBJECT ConnectionObject
- );
-
-
-NTSTATUS
-KsDisassociateAddress(
- IN PFILE_OBJECT ConnectionObject
- );
-
-
-NTSTATUS
-KsSetEventHandlers(
- IN PFILE_OBJECT AddressObject,
- IN PVOID EventContext,
- IN PKS_EVENT_HANDLERS Handlers
- );
-
-
-NTSTATUS
-KsQueryProviderInfo(
- PWSTR TdiDeviceName,
- PTDI_PROVIDER_INFO ProviderInfo
- );
-
-NTSTATUS
-KsQueryAddressInfo(
- IN PFILE_OBJECT FileObject,
- OUT PTDI_ADDRESS_INFO AddressInfo,
- OUT PULONG AddressSize
- );
-
-NTSTATUS
-KsQueryConnectionInfo(
- IN PFILE_OBJECT ConnectionObject,
- OUT PTDI_CONNECTION_INFO ConnectionInfo,
- OUT PULONG ConnectionSize
- );
-
-ULONG
-KsInitializeTdiAddress(
- IN OUT PTA_IP_ADDRESS pTransportAddress,
- IN ULONG IpAddress,
- IN USHORT IpPort
- );
-
-ULONG
-KsQueryMdlsSize (IN PMDL Mdl);
-
-
-ULONG
-KsQueryTdiAddressLength(
- OUT PTRANSPORT_ADDRESS pTransportAddress
- );
-
-NTSTATUS
-KsQueryIpAddress(
- IN PFILE_OBJECT FileObject,
- OUT PVOID TdiAddress,
- OUT ULONG* AddressLength
- );
-
-
-NTSTATUS
-KsErrorEventHandler(
- IN PVOID TdiEventContext,
- IN NTSTATUS Status
- );
-
-int
-ks_set_handlers(
- ksock_tconn_t * tconn
- );
-
-
-VOID
-KsPrintProviderInfo(
- PWSTR DeviceName,
- PTDI_PROVIDER_INFO ProviderInfo
- );
-
-ksock_tconn_t *
-ks_create_tconn();
-
-void
-ks_free_tconn(
- ksock_tconn_t * tconn
- );
-
-void
-ks_init_listener(
- ksock_tconn_t * tconn
- );
-
-void
-ks_init_sender(
- ksock_tconn_t * tconn
- );
-
-void
-ks_init_child(
- ksock_tconn_t * tconn
- );
-
-void
-ks_get_tconn(
- ksock_tconn_t * tconn
- );
-
-void
-ks_put_tconn(
- ksock_tconn_t * tconn
- );
-
-int
-ks_reset_handlers(
- ksock_tconn_t * tconn
- );
-
-void
-ks_destroy_tconn(
- ksock_tconn_t * tconn
- );
-
-
-PKS_TSDU
-KsAllocateKsTsdu();
-
-VOID
-KsPutKsTsdu(
- PKS_TSDU KsTsdu
- );
-
-VOID
-KsFreeKsTsdu(
- PKS_TSDU KsTsdu
- );
-
-VOID
-KsInitializeKsTsdu(
- PKS_TSDU KsTsdu,
- ULONG Length
- );
-
-
-VOID
-KsInitializeKsTsduMgr(
- PKS_TSDUMGR TsduMgr
- );
-
-VOID
-KsInitializeKsChain(
- PKS_CHAIN KsChain
- );
-
-NTSTATUS
-KsCleanupTsduMgr(
- PKS_TSDUMGR KsTsduMgr
- );
-
-NTSTATUS
-KsCleanupKsChain(
- PKS_CHAIN KsChain
- );
-
-NTSTATUS
-KsCleanupTsdu(
- ksock_tconn_t * tconn
- );
-
-NTSTATUS
-KsCopyMdlChainToMdlChain(
- IN PMDL SourceMdlChain,
- IN ULONG SourceOffset,
- IN PMDL DestinationMdlChain,
- IN ULONG DestinationOffset,
- IN ULONG BytesTobecopied,
- OUT PULONG BytesCopied
- );
-
-ULONG
-KsQueryMdlsSize (PMDL Mdl);
-
-NTSTATUS
-KsLockUserBuffer (
- IN PVOID UserBuffer,
- IN BOOLEAN bPaged,
- IN ULONG Length,
- IN LOCK_OPERATION Operation,
- OUT PMDL * pMdl
- );
-
-PVOID
-KsMapMdlBuffer (PMDL Mdl);
-
-VOID
-KsReleaseMdl ( IN PMDL Mdl,
- IN int Paged );
-
-int
-ks_lock_buffer (
- void * buffer,
- int paged,
- int length,
- LOCK_OPERATION access,
- ksock_mdl_t ** kmdl
- );
-
-void *
-ks_map_mdl (ksock_mdl_t * mdl);
-
-void
-ks_release_mdl (ksock_mdl_t *mdl, int paged);
+ks_recv_kiovs(ks_tconn_t *, lnet_kiov_t *, int, int, int);
#endif /* __KERNEL__ */
-
#endif
#include "socklnd.h"
-lnd_t the_ksocklnd = {
- .lnd_type = SOCKLND,
- .lnd_startup = ksocknal_startup,
- .lnd_shutdown = ksocknal_shutdown,
- .lnd_ctl = ksocknal_ctl,
- .lnd_send = ksocknal_send,
- .lnd_recv = ksocknal_recv,
- .lnd_notify = ksocknal_notify,
- .lnd_accept = ksocknal_accept,
-};
-
+lnd_t the_ksocklnd;
ksock_nal_data_t ksocknal_data;
ksock_interface_t *
write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
- lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
if (irq != 0) { /* Hardware NIC */
info->ksni_valid = 1;
- info->ksni_sched = sched - ksocknal_data.ksnd_schedulers;
+ info->ksni_sched = (unsigned int)(sched - ksocknal_data.ksnd_schedulers);
/* no overflow... */
- LASSERT (info->ksni_sched == sched - ksocknal_data.ksnd_schedulers);
+ LASSERT (info->ksni_sched == (unsigned int)(sched - ksocknal_data.ksnd_schedulers));
}
return (sched);
{
ksock_route_t *route;
- list_for_each_entry (route, &peer->ksnp_routes, ksnr_list) {
+ cfs_list_for_each_entry_typed (route, &peer->ksnp_routes,
+ ksock_route_t, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
return route->ksnr_connecting;
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
notify = 1;
- last_alive = cfs_time_current_sec() -
+ last_alive = (time_t) (cfs_time_current_sec() -
cfs_duration_sec(cfs_time_current() -
- peer->ksnp_last_alive);
+ peer->ksnp_last_alive));
}
read_unlock (&ksocknal_data.ksnd_global_lock);
spin_lock(&peer->ksnp_lock);
- list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
+ cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
+ ksock_tx_t, tx_zc_list) {
if (tx->tx_conn != conn)
continue;
write_lock_bh (&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
- lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers;
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
{
/* The router is telling me she's been notified of a change in
* gateway state.... */
- lnet_process_id_t id = {.nid = gw_nid, .pid = LNET_PID_ANY};
+ lnet_process_id_t id = {0};
+
+ id.nid = gw_nid;
+ id.pid = LNET_PID_ANY;
CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
alive ? "up" : "down");
int
ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
+ lnet_process_id_t id = {0};
struct libcfs_ioctl_data *data = arg;
int rc;
read_lock (&ksocknal_data.ksnd_global_lock);
- if (data->ioc_count < 0 ||
- data->ioc_count >= net->ksnn_ninterfaces) {
+ if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
rc = -ENOENT;
} else {
rc = 0;
data->ioc_u32[0]); /* IP address */
case IOC_LIBCFS_GET_PEER: {
- lnet_process_id_t id = {0,};
__u32 myip = 0;
__u32 ip = 0;
int port = 0;
return 0;
}
- case IOC_LIBCFS_ADD_PEER: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LUSTRE_SRV_LNET_PID};
+ case IOC_LIBCFS_ADD_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LUSTRE_SRV_LNET_PID;
return ksocknal_add_peer (ni, id,
data->ioc_u32[0], /* IP */
data->ioc_u32[1]); /* port */
- }
- case IOC_LIBCFS_DEL_PEER: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
+
+ case IOC_LIBCFS_DEL_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_del_peer (ni, id,
data->ioc_u32[0]); /* IP */
- }
+
case IOC_LIBCFS_GET_CONN: {
int txmem;
int rxmem;
data->ioc_u32[1] = conn->ksnc_port;
data->ioc_u32[2] = conn->ksnc_myipaddr;
data->ioc_u32[3] = conn->ksnc_type;
- data->ioc_u32[4] = conn->ksnc_scheduler -
- ksocknal_data.ksnd_schedulers;
+ data->ioc_u32[4] = (__u32)(conn->ksnc_scheduler -
+ ksocknal_data.ksnd_schedulers);
data->ioc_u32[5] = rxmem;
data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
ksocknal_conn_decref(conn);
return 0;
}
- case IOC_LIBCFS_CLOSE_CONNECTION: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
-
+ case IOC_LIBCFS_CLOSE_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_close_matching_conns (id,
data->ioc_u32[0]);
- }
+
case IOC_LIBCFS_REGISTER_MYNID:
/* Ignore if this is a noop */
if (data->ioc_nid == ni->ni_nid)
libcfs_nid2str(ni->ni_nid));
return -EINVAL;
- case IOC_LIBCFS_PUSH_CONNECTION: {
- lnet_process_id_t id = {.nid = data->ioc_nid,
- .pid = LNET_PID_ANY};
-
+ case IOC_LIBCFS_PUSH_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
return ksocknal_push(ni, id);
- }
+
default:
return -EINVAL;
}
*ksocknal_tunables.ksnd_nconnds = 2;
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- rc = ksocknal_thread_start (ksocknal_connd, (void *)((long)i));
+ rc = ksocknal_thread_start (ksocknal_connd,
+ (void *)((ulong_ptr_t)i));
if (rc != 0) {
CERROR("Can't spawn socknal connd: %d\n", rc);
goto failed;
{
ksock_net_t *net = ni->ni_data;
int i;
- lnet_process_id_t anyid = {.nid = LNET_NID_ANY,
- .pid = LNET_PID_ANY};
+ lnet_process_id_t anyid = {0};
+
+ anyid.nid = LNET_NID_ANY;
+ anyid.pid = LNET_PID_ANY;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
ksocknal_module_fini (void)
{
lnet_unregister_lnd(&the_ksocklnd);
- ksocknal_lib_tunables_fini();
+ ksocknal_tunables_fini();
}
int __init
/* check ksnr_connected/connecting field large enough */
CLASSERT(SOCKLND_CONN_NTYPES <= 4);
- rc = ksocknal_lib_tunables_init();
+ /* initialize the_ksocklnd */
+ the_ksocklnd.lnd_type = SOCKLND;
+ the_ksocklnd.lnd_startup = ksocknal_startup;
+ the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
+ the_ksocklnd.lnd_ctl = ksocknal_ctl;
+ the_ksocklnd.lnd_send = ksocknal_send;
+ the_ksocklnd.lnd_recv = ksocknal_recv;
+ the_ksocklnd.lnd_notify = ksocknal_notify;
+ the_ksocklnd.lnd_accept = ksocknal_accept;
+
+ rc = ksocknal_tunables_init();
if (rc != 0)
return rc;
} tx_frags;
} ksock_tx_t;
-#define KSOCK_NOOP_TX_SIZE offsetof(ksock_tx_t, tx_frags.paged.kiov[0])
+#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
/* network zero copy callback descriptor embedded in ksock_tx_t */
extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem,
int *rxmem, int *nagle);
+extern int ksocknal_tunables_init(void);
+extern void ksocknal_tunables_fini(void);
extern int ksocknal_lib_tunables_init(void);
extern void ksocknal_lib_tunables_fini(void);
do {
LASSERT (tx->tx_niov > 0);
- if (nob < iov->iov_len) {
- iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob);
+ if (nob < (int) iov->iov_len) {
+ iov->iov_base = (void *)((char *)iov->iov_base + nob);
iov->iov_len -= nob;
return (rc);
}
do {
LASSERT(tx->tx_nkiov > 0);
- if (nob < kiov->kiov_len) {
+ if (nob < (int)kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return rc;
}
- nob -= kiov->kiov_len;
+ nob -= (int)kiov->kiov_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
} while (nob != 0);
do {
LASSERT (conn->ksnc_rx_niov > 0);
- if (nob < iov->iov_len) {
+ if (nob < (int)iov->iov_len) {
iov->iov_len -= nob;
- iov->iov_base = (void *)(((unsigned long)iov->iov_base) + nob);
+ iov->iov_base = (void *)((char *)iov->iov_base + nob);
return (-EAGAIN);
}
do {
LASSERT (conn->ksnc_rx_nkiov > 0);
- if (nob < kiov->kiov_len) {
+ if (nob < (int) kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return -EAGAIN;
} else {
/* lnet packet */
hdr_nob = (c->ksnc_proto == &ksocknal_protocol_v2x)?
- offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload):
- sizeof(lnet_hdr_t);
+ sizeof(ksock_msg_t) : sizeof(lnet_hdr_t);
}
switch (c->ksnc_type) {
* We always expect at least 1 mapped fragment containing the
* complete ksocknal message header. */
LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
- lnet_kiov_nob (tx->tx_nkiov, tx->tx_kiov) == tx->tx_nob);
+ lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
+ (unsigned int)tx->tx_nob);
LASSERT (tx->tx_niov >= 1);
LASSERT (tx->tx_resid == tx->tx_nob);
ksock_tx_t *tx;
int rc;
int nloops = 0;
- int id = sched - ksocknal_data.ksnd_schedulers;
+ int id = (int)(sched - ksocknal_data.ksnd_schedulers);
char name[16];
snprintf (name, sizeof (name),"socknal_sd%02d", id);
nloops = 0;
if (!did_something) { /* wait for something to do */
- rc = wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
sched->kss_waitq,
- !ksocknal_sched_cansleep(sched));
+ !ksocknal_sched_cansleep(sched), rc);
LASSERT (rc == 0);
} else {
our_cond_resched();
if (hello->kshm_nips == 0)
goto out;
- for (i = 0; i < hello->kshm_nips; i++) {
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
}
goto out;
}
- for (i = 0; i < hello->kshm_nips; i++) {
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
if (hello->kshm_ips[i] == 0) {
return rc;
}
- for (i = 0; i < hello->kshm_nips; i++) {
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload);
- tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload) +
- tx->tx_lnetmsg->msg_len;
+ tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
+ tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
} else {
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
- LASSERT (0 <= hello->kshm_nips && hello->kshm_nips <= LNET_MAX_INTERFACES);
+ LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
LASSERT (conn->ksnc_proto != NULL);
int
ksocknal_connd (void *arg)
{
- long id = (long)arg;
+ long id = (long)(long_ptr_t)arg;
char name[16];
ksock_connreq_t *cr;
ksock_route_t *route;
+ int rc = 0;
snprintf (name, sizeof (name), "socknal_cd%02ld", id);
cfs_daemonize (name);
spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
- wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
ksocknal_data.ksnd_connd_waitq,
- ksocknal_connd_ready());
+ ksocknal_connd_ready(), rc);
spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
}
}
/* print out warnings about stale ZC_REQs */
- list_for_each_entry(peer, peers, ksnp_list) {
+ cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
ksock_tx_t *tx;
int n = 0;
- list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
+ cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list,
+ ksock_tx_t, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
#include "socklnd.h"
-# if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM
-static ctl_table ksocknal_ctl_table[18];
+# if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+static cfs_sysctl_table_t ksocknal_ctl_table[21];
-ctl_table ksocknal_top_ctl_table[] = {
- {200, "socknal", NULL, 0, 0555, ksocknal_ctl_table},
+cfs_sysctl_table_t ksocknal_top_ctl_table[] = {
+ {
+ /* ctl_name */ 200,
+ /* procname */ "socknal",
+ /* data */ NULL,
+ /* maxlen */ 0,
+ /* mode */ 0555,
+ /* child */ ksocknal_ctl_table
+ },
{ 0 }
};
int
-ksocknal_lib_tunables_init ()
+ksocknal_lib_tunables_init ()
{
- int i = 0;
- int j = 1;
-
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "timeout", ksocknal_tunables.ksnd_timeout,
- sizeof (int), 0644, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "credits", ksocknal_tunables.ksnd_credits,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "peer_credits", ksocknal_tunables.ksnd_peercredits,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "nconnds", ksocknal_tunables.ksnd_nconnds,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "min_reconnectms", ksocknal_tunables.ksnd_min_reconnectms,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "max_reconnectms", ksocknal_tunables.ksnd_max_reconnectms,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "eager_ack", ksocknal_tunables.ksnd_eager_ack,
- sizeof (int), 0644, NULL, &proc_dointvec};
-#if SOCKNAL_ZC
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "zero_copy", ksocknal_tunables.ksnd_zc_min_frag,
- sizeof (int), 0644, NULL, &proc_dointvec};
-#endif
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "typed", ksocknal_tunables.ksnd_typed_conns,
- sizeof (int), 0444, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "min_bulk", ksocknal_tunables.ksnd_min_bulk,
- sizeof (int), 0644, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "buffer_size", ksocknal_tunables.ksnd_buffer_size,
- sizeof(int), 0644, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "nagle", ksocknal_tunables.ksnd_nagle,
- sizeof(int), 0644, NULL, &proc_dointvec};
+ int i = 0;
+ int j = 1;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "timeout";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_timeout;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "credits";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_credits;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "peer_credits";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_peercredits;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "nconnds";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_nconnds;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "min_reconnectms";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_min_reconnectms;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "max_reconnectms";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_max_reconnectms;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "eager_ack";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_eager_ack;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "zero_copy";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_zc_min_frag;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "typed";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_typed_conns;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0444;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "min_bulk";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_min_bulk;
+ ksocknal_ctl_table[i].maxlen = sizeof (int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "rx_buffer_size";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_rx_buffer_size;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "tx_buffer_size";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_tx_buffer_size;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "nagle";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_nagle;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
#ifdef CPU_AFFINITY
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "irq_affinity", ksocknal_tunables.ksnd_irq_affinity,
- sizeof(int), 0644, NULL, &proc_dointvec};
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "irq_affinity";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_irq_affinity;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+#endif
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "keepalive_idle";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_keepalive_idle;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "keepalive_count";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_keepalive_count;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "keepalive_intvl";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_keepalive_intvl;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+#ifdef SOCKNAL_BACKOFF
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "backoff_init";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_backoff_init;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "backoff_max";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_backoff_max;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
#endif
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "keepalive_idle", ksocknal_tunables.ksnd_keepalive_idle,
- sizeof(int), 0644, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "keepalive_count", ksocknal_tunables.ksnd_keepalive_count,
- sizeof(int), 0644, NULL, &proc_dointvec};
- ksocknal_ctl_table[i++] = (ctl_table)
- {j++, "keepalive_intvl", ksocknal_tunables.ksnd_keepalive_intvl,
- sizeof(int), 0644, NULL, &proc_dointvec};
-
- LASSERT (j == i+1);
- LASSERT (i < sizeof(ksocknal_ctl_table)/sizeof(ksocknal_ctl_table[0]));
+
+#if SOCKNAL_VERSION_DEBUG
+ ksocknal_ctl_table[i].ctl_name = j++;
+ ksocknal_ctl_table[i].procname = "protocol";
+ ksocknal_ctl_table[i].data = ksocknal_tunables.ksnd_protocol;
+ ksocknal_ctl_table[i].maxlen = sizeof(int);
+ ksocknal_ctl_table[i].mode = 0644;
+ ksocknal_ctl_table[i].proc_handler = &proc_dointvec;
+ i++;
+#endif
+
+ LASSERT (j == i + 1);
+ LASSERT (i <= sizeof(ksocknal_ctl_table)/sizeof(ksocknal_ctl_table[0]));
ksocknal_tunables.ksnd_sysctl =
- register_sysctl_table(ksocknal_top_ctl_table, 0);
+ cfs_register_sysctl_table(ksocknal_top_ctl_table, 0);
if (ksocknal_tunables.ksnd_sysctl == NULL)
- CWARN("Can't setup /proc tunables\n");
+ CWARN("Can't setup /proc tunables\n");
- return 0;
+ return 0;
}
void
-ksocknal_lib_tunables_fini ()
+ksocknal_lib_tunables_fini ()
{
if (ksocknal_tunables.ksnd_sysctl != NULL)
- unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
+ cfs_unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
}
#else
int
-ksocknal_lib_tunables_init ()
+ksocknal_lib_tunables_init ()
{
- return 0;
+ return 0;
}
-void
+void
ksocknal_lib_tunables_fini ()
{
}
-#endif
+#endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
void
ksocknal_lib_bind_irq (unsigned int irq)
ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
{
int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
- &conn->ksnc_ipaddr, &conn->ksnc_port);
+ &conn->ksnc_ipaddr,
+ &conn->ksnc_port);
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT (!conn->ksnc_closing);
unsigned int
ksocknal_lib_sock_irq (struct socket *sock)
{
- return 0;
-}
-
-#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
-static struct page *
-ksocknal_kvaddr_to_page (unsigned long vaddr)
-{
- struct page *page;
-
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END)
- page = vmalloc_to_page ((void *)vaddr);
-#ifdef CONFIG_HIGHMEM
- else if (vaddr >= PKMAP_BASE &&
- vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
- page = vmalloc_to_page ((void *)vaddr);
- /* in 2.4 ^ just walks the page tables */
-#endif
- else
- page = virt_to_page (vaddr);
-
- if (page == NULL ||
- !VALID_PAGE (page))
- return (NULL);
-
- return (page);
-}
-#endif
-
-/*
- * ks_lock_iovs
- * Lock the i/o vector buffers into MDL structure
- *
- * Arguments:
- * iov: the array of i/o vectors
- * niov: number of i/o vectors to be locked
- * len: the real length of the iov vectors
- *
- * Return Value:
- * ksock_mdl_t *: the Mdl of the locked buffers or
- * NULL pointer in failure case
- *
- * Notes:
- * N/A
- */
-
-ksock_mdl_t *
-ks_lock_iovs(
- IN struct iovec *iov,
- IN int niov,
- IN int recving,
- IN int * len )
-{
- int rc = 0;
-
- int i = 0;
- int total = 0;
- ksock_mdl_t * mdl = NULL;
- ksock_mdl_t * tail = NULL;
-
- LASSERT(iov != NULL);
- LASSERT(niov > 0);
- LASSERT(len != NULL);
-
- for (i=0; i < niov; i++) {
-
- ksock_mdl_t * Iovec = NULL;
-
- rc = ks_lock_buffer(
- iov[i].iov_base,
- FALSE,
- iov[i].iov_len,
- recving ? IoWriteAccess : IoReadAccess,
- &Iovec );
-
- if (rc < 0) {
- break;
- }
-
- if (tail) {
- tail->Next = Iovec;
- } else {
- mdl = Iovec;
- }
-
- tail = Iovec;
-
- total +=iov[i].iov_len;
- }
-
- if (rc >= 0) {
- *len = total;
- } else {
- if (mdl) {
- ks_release_mdl(mdl, FALSE);
- mdl = NULL;
- }
- }
-
- return mdl;
-}
-
-/*
- * ks_lock_kiovs
- * Lock the kiov pages into MDL structure
- *
- * Arguments:
- * kiov: the array of kiov pages
- * niov: number of kiov to be locked
- * len: the real length of the kiov arrary
- *
- * Return Value:
- * PMDL: the Mdl of the locked buffers or NULL
- * pointer in failure case
- *
- * Notes:
- * N/A
- */
-ksock_mdl_t *
-ks_lock_kiovs(
- IN lnet_kiov_t * kiov,
- IN int nkiov,
- IN int recving,
- IN int * len )
-{
- int rc = 0;
- int i = 0;
- int total = 0;
- ksock_mdl_t * mdl = NULL;
- ksock_mdl_t * tail = NULL;
-
- LASSERT(kiov != NULL);
- LASSERT(nkiov > 0);
- LASSERT(len != NULL);
-
- for (i=0; i < nkiov; i++) {
-
- ksock_mdl_t * Iovec = NULL;
-
-
- //
- // Lock the kiov page into Iovec ¡Â
- //
-
- rc = ks_lock_buffer(
- (PUCHAR)kiov[i].kiov_page->addr +
- kiov[i].kiov_offset,
- FALSE,
- kiov[i].kiov_len,
- recving ? IoWriteAccess : IoReadAccess,
- &Iovec
- );
-
- if (rc < 0) {
- break;
- }
-
- //
- // Attach the Iovec to the mdl chain
- //
-
- if (tail) {
- tail->Next = Iovec;
- } else {
- mdl = Iovec;
- }
-
- tail = Iovec;
-
- total += kiov[i].kiov_len;
-
- }
-
- if (rc >= 0) {
- *len = total;
- } else {
- if (mdl) {
- ks_release_mdl(mdl, FALSE);
- mdl = NULL;
- }
- }
-
- return mdl;
+ return 0;
}
-
int
ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
struct socket *sock = conn->ksnc_sock;
-#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
- unsigned long vaddr = (unsigned long)iov->iov_base
- int offset = vaddr & (PAGE_SIZE - 1);
- int zcsize = MIN (iov->iov_len, PAGE_SIZE - offset);
- struct page *page;
-#endif
+
int nob;
int rc;
- ksock_mdl_t * mdl;
+ int flags;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
-#if (SOCKNAL_ZC && SOCKNAL_VADDR_ZC)
- if (zcsize >= ksocknal_data.ksnd_zc_min_frag &&
- (sock->sk->sk_route_caps & NETIF_F_SG) &&
- (sock->sk->sk_route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)) &&
- (page = ksocknal_kvaddr_to_page (vaddr)) != NULL) {
- int msgflg = MSG_DONTWAIT;
+ if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
+ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
+ tx->tx_nob == tx->tx_resid && /* frist sending */
+ tx->tx_msg.ksm_csum == 0) /* not checksummed */
+ ksocknal_lib_csum_tx(tx);
- CDEBUG(D_NET, "vaddr %p, page %p->%p + offset %x for %d\n",
- (void *)vaddr, page, page_address(page), offset, zcsize);
+ nob = ks_query_iovs_length(tx->tx_iov, tx->tx_niov);
+ flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
+ rc = ks_send_iovs(sock, tx->tx_iov, tx->tx_niov, flags, 0);
- if (!list_empty (&conn->ksnc_tx_queue) ||
- zcsize < tx->tx_resid)
- msgflg |= MSG_MORE;
-
- rc = tcp_sendpage_zccd(sock, page, offset, zcsize, msgflg, &tx->tx_zccd);
- } else
-#endif
- {
- /* lock the whole tx iovs into a single mdl chain */
- mdl = ks_lock_iovs(tx->tx_iov, tx->tx_niov, FALSE, &nob);
-
- if (mdl) {
- /* send the total mdl chain */
- rc = ks_send_mdl( conn->ksnc_sock, tx, mdl, nob,
- (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
- (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT);
- } else {
- rc = -ENOMEM;
- }
- }
-
- return rc;
+ KsPrint((4, "ksocknal_lib_send_iov: conn %p sock %p rc %d\n",
+ conn, sock, rc));
+ return rc;
}
int
lnet_kiov_t *kiov = tx->tx_kiov;
int rc;
int nob;
- ksock_mdl_t * mdl;
+ int nkiov;
+ int flags;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
-
-#if SOCKNAL_ZC
- if (kiov->kiov_len >= *ksocknal_tunables.ksnd_zc_min_frag &&
- (sock->sk->sk_route_caps & NETIF_F_SG) &&
- (sock->sk->sk_route_caps & (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM))) {
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int fragsize = kiov->kiov_len;
- int msgflg = MSG_DONTWAIT;
-
- CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
-
- if (!list_empty(&conn->ksnc_tx_queue) ||
- fragsize < tx->tx_resid)
- msgflg |= MSG_MORE;
-
- rc = tcp_sendpage_zccd(sock, page, offset, fragsize, msgflg,
- &tx->tx_zccd);
- } else
-#endif
- {
- /* lock the whole tx kiovs into a single mdl chain */
- mdl = ks_lock_kiovs(tx->tx_kiov, tx->tx_nkiov, FALSE, &nob);
-
- if (mdl) {
- /* send the total mdl chain */
- rc = ks_send_mdl(
- conn->ksnc_sock, tx, mdl, nob,
- (!list_empty(&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
- (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT);
- } else {
- rc = -ENOMEM;
- }
- }
+ nkiov = tx->tx_nkiov;
+ nob = ks_query_kiovs_length(tx->tx_kiov, nkiov);
+ flags = (!list_empty (&conn->ksnc_tx_queue) || nob < tx->tx_resid) ?
+ (MSG_DONTWAIT | MSG_MORE) : MSG_DONTWAIT;
+ rc = ks_send_kiovs(sock, tx->tx_kiov, nkiov, flags, 0);
- return rc;
+ KsPrint((4, "ksocknal_lib_send_kiov: conn %p sock %p rc %d\n",
+ conn, sock, rc));
+ return rc;
}
-
int
ksocknal_lib_recv_iov (ksock_conn_t *conn)
{
struct iovec *iov = conn->ksnc_rx_iov;
int rc;
int size;
- ksock_mdl_t * mdl;
- /* lock the whole tx iovs into a single mdl chain */
- mdl = ks_lock_iovs(iov, conn->ksnc_rx_niov, TRUE, &size);
+ /* receive payload from tsdu queue */
+ rc = ks_recv_iovs (conn->ksnc_sock, iov, conn->ksnc_rx_niov,
+ MSG_DONTWAIT, 0);
- if (!mdl) {
- return (-ENOMEM);
- }
-
- LASSERT (size <= conn->ksnc_rx_nob_wanted);
+ /* calcuate package checksum */
+ if (rc > 0) {
+
+ int i;
+ int fragnob;
+ int sum;
+ __u32 saved_csum = 0;
+
+ if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
+ saved_csum = conn->ksnc_msg.ksm_csum;
+ conn->ksnc_msg.ksm_csum = 0;
+ }
- /* try to request data for the whole mdl chain */
- rc = ks_recv_mdl (conn->ksnc_sock, mdl, size, MSG_DONTWAIT);
+ if (saved_csum != 0) {
+ /* accumulate checksum */
+ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
+ LASSERT (i < conn->ksnc_rx_niov);
+
+ fragnob = iov[i].iov_len;
+ if (fragnob > sum)
+ fragnob = sum;
+
+ conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
+ iov[i].iov_base, fragnob);
+ }
+ conn->ksnc_msg.ksm_csum = saved_csum;
+ }
+ }
+
+ KsPrint((4, "ksocknal_lib_recv_iov: conn %p sock %p rc %d.\n",
+ conn, conn->ksnc_sock, rc));
return rc;
}
ksocknal_lib_recv_kiov (ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
- int size;
int rc;
- ksock_mdl_t * mdl;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone, so we only receive 1 frag at a time. */
LASSERT (conn->ksnc_rx_nkiov > 0);
- /* lock the whole tx kiovs into a single mdl chain */
- mdl = ks_lock_kiovs(kiov, conn->ksnc_rx_nkiov, TRUE, &size);
+ /* receive payload from tsdu queue */
+ rc = ks_recv_kiovs (conn->ksnc_sock, kiov, conn->ksnc_rx_nkiov,
+ MSG_DONTWAIT, 0);
- if (!mdl) {
- rc = -ENOMEM;
- return (rc);
- }
-
- LASSERT (size <= conn->ksnc_rx_nob_wanted);
+ if (rc > 0 && conn->ksnc_msg.ksm_csum != 0) {
+
+ int i;
+ char *base;
+ int sum;
+ int fragnob;
+
+ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- /* try to request data for the whole mdl chain */
- rc = ks_recv_mdl (conn->ksnc_sock, mdl, size, MSG_DONTWAIT);
+ LASSERT (i < conn->ksnc_rx_nkiov);
+ base = (char *)(kiov[i].kiov_page->addr) + kiov[i].kiov_offset;
+ fragnob = kiov[i].kiov_len;
+ if (fragnob > sum)
+ fragnob = sum;
+
+ conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
+ base, fragnob);
+ }
+ }
+
+ KsPrint((4, "ksocknal_lib_recv_kiov: conn %p sock %p rc %d.\n",
+ conn, conn->ksnc_sock, rc));
return rc;
}
int
ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
{
- ksock_tconn_t * tconn = conn->ksnc_sock;
+ ks_tconn_t * tconn = conn->ksnc_sock;
int len;
int rc;
ks_get_tconn (tconn);
-
*txmem = *rxmem = 0;
-
len = sizeof(*nagle);
-
- rc = ks_get_tcp_option(
- tconn, TCP_SOCKET_NODELAY,
- (__u32 *)nagle, &len);
-
+ rc = ks_get_tcp_option(tconn, TCP_SOCKET_NODELAY, (__u32 *)nagle, &len);
ks_put_tconn (tconn);
- printk("ksocknal_get_conn_tunables: nodelay = %d rc = %d\n", *nagle, rc);
+ KsPrint((2, "ksocknal_get_conn_tunables: nodelay = %d rc = %d\n", *nagle, rc));
if (rc == 0)
*nagle = !*nagle;
}
int
-ksocknal_lib_buffersize (int current_sz, int tunable_sz)
-{
- /* ensure >= SOCKNAL_MIN_BUFFER */
- if (current_sz < SOCKNAL_MIN_BUFFER)
- return MAX(SOCKNAL_MIN_BUFFER, tunable_sz);
-
- if (tunable_sz > SOCKNAL_MIN_BUFFER)
- return tunable_sz;
-
- /* leave alone */
- return 0;
-}
-
-int
ksocknal_lib_setup_sock (struct socket *sock)
{
int rc;
__u32 option;
- /* set the window size */
-
#if 0
+ /* set the window size */
tconn->kstc_snd_wnd = ksocknal_tunables.ksnd_buffer_size;
tconn->kstc_rcv_wnd = ksocknal_tunables.ksnd_buffer_size;
#endif
sock, TCP_SOCKET_NODELAY,
&option, sizeof (option));
if (rc != 0) {
- printk ("Can't disable nagle: %d\n", rc);
+ CERROR ("Can't disable nagle: %d\n", rc);
return (rc);
}
}
void
ksocknal_lib_push_conn (ksock_conn_t *conn)
{
- ksock_tconn_t * tconn;
+ ks_tconn_t * tconn;
__u32 nagle;
__u32 val = 1;
int rc;
spin_lock(&tconn->kstc_lock);
if (tconn->kstc_type == kstt_sender) {
- nagle = tconn->sender.kstc_info.nagle;
- tconn->sender.kstc_info.nagle = 0;
+ nagle = tconn->sender.kstc_info.nagle;
+ tconn->sender.kstc_info.nagle = 0;
} else {
- LASSERT(tconn->kstc_type == kstt_child);
- nagle = tconn->child.kstc_info.nagle;
- tconn->child.kstc_info.nagle = 0;
+ LASSERT(tconn->kstc_type == kstt_child);
+ nagle = tconn->child.kstc_info.nagle;
+ tconn->child.kstc_info.nagle = 0;
}
spin_unlock(&tconn->kstc_lock);
spin_lock(&tconn->kstc_lock);
if (tconn->kstc_type == kstt_sender) {
- tconn->sender.kstc_info.nagle = nagle;
+ tconn->sender.kstc_info.nagle = nagle;
} else {
- LASSERT(tconn->kstc_type == kstt_child);
- tconn->child.kstc_info.nagle = nagle;
+ LASSERT(tconn->kstc_type == kstt_child);
+ tconn->child.kstc_info.nagle = nagle;
}
spin_unlock(&tconn->kstc_lock);
-
ks_put_tconn(tconn);
}
-/* @mode: 0: receiving mode / 1: sending mode */
void
-ksocknal_sched_conn (ksock_conn_t *conn, int mode, ksock_tx_t *tx)
+ksocknal_lib_csum_tx(ksock_tx_t *tx)
{
- int flags;
- ksock_sched_t * sched;
- ENTRY;
-
- /* interleave correctly with closing sockets... */
- read_lock (&ksocknal_data.ksnd_global_lock);
-
- sched = conn->ksnc_scheduler;
+ int i;
+ __u32 csum;
+ void *base;
- spin_lock_irqsave (&sched->kss_lock, flags);
+ LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
+ LASSERT(tx->tx_conn != NULL);
+ LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
- if (mode) { /* transmission can continue ... */
+ tx->tx_msg.ksm_csum = 0;
-#error "This is out of date - we should be calling ksocknal_write_callback()"
- conn->ksnc_tx_ready = 1;
+ csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
+ tx->tx_iov[0].iov_len);
- if (tx) {
- /* Incomplete send: place tx on HEAD of tx_queue */
- list_add (&tx->tx_list, &conn->ksnc_tx_queue);
- }
-
- if ( !conn->ksnc_tx_scheduled &&
- !list_empty(&conn->ksnc_tx_queue)) { //packets to send
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- conn->ksnc_tx_scheduled = 1;
- /* extra ref for scheduler */
- atomic_inc (&conn->ksnc_conn_refcount);
-
- cfs_waitq_signal (&sched->kss_waitq);
- }
- } else { /* receiving can continue ... */
-
- conn->ksnc_rx_ready = 1;
+ if (tx->tx_kiov != NULL) {
+ for (i = 0; i < tx->tx_nkiov; i++) {
+ base = (PUCHAR)(tx->tx_kiov[i].kiov_page->addr) +
+ tx->tx_kiov[i].kiov_offset;
- if ( !conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
- conn->ksnc_rx_scheduled = 1;
- /* extra ref for scheduler */
- atomic_inc (&conn->ksnc_conn_refcount);
-
- cfs_waitq_signal (&sched->kss_waitq);
+ csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
}
+ } else {
+ for (i = 1; i < tx->tx_niov; i++)
+ csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
+ tx->tx_iov[i].iov_len);
}
- spin_unlock_irqrestore (&sched->kss_lock, flags);
- read_unlock (&ksocknal_data.ksnd_global_lock);
+ if (*ksocknal_tunables.ksnd_inject_csum_error) {
+ csum++;
+ *ksocknal_tunables.ksnd_inject_csum_error = 0;
+ }
- EXIT;
+ tx->tx_msg.ksm_csum = csum;
}
-void ksocknal_schedule_callback(struct socket*sock, int mode, void * tx, ulong_ptr bytes)
+void ksocknal_schedule_callback(struct socket*sock, int mode)
{
- ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
+ ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
- if (mode) {
- ksocknal_sched_conn(conn, mode, tx);
- } else {
- if ( CAN_BE_SCHED(bytes, (ulong_ptr)conn->ksnc_rx_nob_wanted )) {
- ksocknal_sched_conn(conn, mode, tx);
+ read_lock (&ksocknal_data.ksnd_global_lock);
+ if (mode) {
+ ksocknal_write_callback(conn);
+ } else {
+ ksocknal_read_callback(conn);
}
- }
+ read_unlock (&ksocknal_data.ksnd_global_lock);
}
-extern void
-ksocknal_tx_launched (ksock_tx_t *tx);
-
void
-ksocknal_fini_sending(ksock_tcpx_fini_t *tcpx)
-{
- ksocknal_tx_launched(tcpx->tx);
- cfs_free(tcpx);
-}
-
-void *
-ksocknal_update_tx(
- struct socket* tconn,
- void * txp,
- ulong_ptr rc
- )
+ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx)
{
- ksock_tx_t * tx = (ksock_tx_t *)txp;
-
- /*
- * the transmission was done, we need update the tx
- */
-
- LASSERT(tx->tx_resid >= (int)rc);
- tx->tx_resid -= (int)rc;
-
- /*
- * just partial of tx is sent out, we need update
- * the fields of tx and schedule later transmission.
- */
-
- if (tx->tx_resid) {
-
- if (tx->tx_niov > 0) {
-
- /* if there's iov, we need process iov first */
- while (rc > 0 ) {
- if (rc < tx->tx_iov->iov_len) {
- /* didn't send whole iov entry... */
- tx->tx_iov->iov_base =
- (char *)(tx->tx_iov->iov_base) + rc;
- tx->tx_iov->iov_len -= rc;
- rc = 0;
- } else {
- /* the whole of iov was sent out */
- rc -= tx->tx_iov->iov_len;
- tx->tx_iov++;
- tx->tx_niov--;
- }
- }
-
- } else {
-
- /* now we need process the kiov queues ... */
-
- while (rc > 0 ) {
-
- if (rc < tx->tx_kiov->kiov_len) {
- /* didn't send whole kiov entry... */
- tx->tx_kiov->kiov_offset += rc;
- tx->tx_kiov->kiov_len -= rc;
- rc = 0;
- } else {
- /* whole kiov was sent out */
- rc -= tx->tx_kiov->kiov_len;
- tx->tx_kiov++;
- tx->tx_nkiov--;
- }
- }
+ /* remove tx/conn from conn's outgoing queue */
+ spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
+ list_del(&tx->tx_list);
+ if (list_empty(&conn->ksnc_tx_queue)) {
+ list_del (&conn->ksnc_tx_list);
}
+ spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
- } else {
-
- ksock_tcpx_fini_t * tcpx =
- cfs_alloc(sizeof(ksock_tcpx_fini_t), CFS_ALLOC_ZERO);
-
- ASSERT(tx->tx_resid == 0);
-
- if (!tcpx) {
-
- ksocknal_tx_launched (tx);
-
- } else {
-
- tcpx->tx = tx;
- ExInitializeWorkItem(
- &(tcpx->item),
- ksocknal_fini_sending,
- tcpx
- );
- ExQueueWorkItem(
- &(tcpx->item),
- CriticalWorkQueue
- );
- }
-
- tx = NULL;
- }
-
- return (void *)tx;
+ /* complete send; tx -ref */
+ ksocknal_tx_decref (tx);
}
void
{
sock->kstc_conn = conn;
sock->kstc_sched_cb = ksocknal_schedule_callback;
- sock->kstc_update_tx = ksocknal_update_tx;
}
void
{
sock->kstc_conn = NULL;
sock->kstc_sched_cb = NULL;
- sock->kstc_update_tx = NULL;
+}
+
+int
+ksocknal_lib_zc_capable(struct socket *sock)
+{
+ return 0;
}
#endif
+static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
+{
+ while (len-- > 0)
+ crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
+ return crc;
+}
+
+
#endif
"protocol version");
#endif
-ksock_tunables_t ksocknal_tunables = {
- .ksnd_timeout = &sock_timeout,
- .ksnd_credits = &credits,
- .ksnd_peercredits = &peer_credits,
- .ksnd_nconnds = &nconnds,
- .ksnd_min_reconnectms = &min_reconnectms,
- .ksnd_max_reconnectms = &max_reconnectms,
- .ksnd_eager_ack = &eager_ack,
- .ksnd_typed_conns = &typed_conns,
- .ksnd_min_bulk = &min_bulk,
- .ksnd_tx_buffer_size = &tx_buffer_size,
- .ksnd_rx_buffer_size = &rx_buffer_size,
- .ksnd_nagle = &nagle,
- .ksnd_keepalive_idle = &keepalive_idle,
- .ksnd_keepalive_count = &keepalive_count,
- .ksnd_keepalive_intvl = &keepalive_intvl,
- .ksnd_enable_csum = &enable_csum,
- .ksnd_inject_csum_error = &inject_csum_error,
- .ksnd_zc_min_frag = &zc_min_frag,
+ksock_tunables_t ksocknal_tunables;
+
+int ksocknal_tunables_init(void)
+{
+
+ /* initialize ksocknal_tunables structure */
+ ksocknal_tunables.ksnd_timeout = &sock_timeout;
+ ksocknal_tunables.ksnd_nconnds = &nconnds;
+ ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms;
+ ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms;
+ ksocknal_tunables.ksnd_eager_ack = &eager_ack;
+ ksocknal_tunables.ksnd_typed_conns = &typed_conns;
+ ksocknal_tunables.ksnd_min_bulk = &min_bulk;
+ ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size;
+ ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size;
+ ksocknal_tunables.ksnd_nagle = &nagle;
+ ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle;
+ ksocknal_tunables.ksnd_keepalive_count = &keepalive_count;
+ ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl;
+ ksocknal_tunables.ksnd_credits = &credits;
+ ksocknal_tunables.ksnd_peercredits = &peer_credits;
+ ksocknal_tunables.ksnd_enable_csum = &enable_csum;
+ ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error;
+ ksocknal_tunables.ksnd_zc_min_frag = &zc_min_frag;
+
#ifdef CPU_AFFINITY
- .ksnd_irq_affinity = &enable_irq_affinity,
+ ksocknal_tunables.ksnd_irq_affinity = &enable_irq_affinity;
#endif
+
#ifdef SOCKNAL_BACKOFF
- .ksnd_backoff_init = &backoff_init,
- .ksnd_backoff_max = &backoff_max,
+ ksocknal_tunables.ksnd_backoff_init = &backoff_init;
+ ksocknal_tunables.ksnd_backoff_max = &backoff_max;
#endif
+
#if SOCKNAL_VERSION_DEBUG
- .ksnd_protocol = &protocol,
+ ksocknal_tunables.ksnd_protocol = &protocol;
#endif
+
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+ ksocknal_tunables.ksnd_sysctl = NULL;
+#endif
+
+ /* initialize platform-sepcific tunables */
+ return ksocknal_lib_tunables_init();
};
+void ksocknal_tunables_fini(void)
+{
+ ksocknal_lib_tunables_fini();
+}
__u32 magic;
__u32 peer_ip;
int peer_port;
- int secure = (int)((unsigned long)arg);
+ int secure = (int)((long_ptr_t)arg);
LASSERT (lnet_acceptor_state.pta_sock == NULL);
if (lnet_count_acceptor_nis() == 0) /* not required */
return 0;
- pid = cfs_kernel_thread(lnet_acceptor, (void *)secure, 0);
+ pid = cfs_kernel_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure, 0);
if (pid < 0) {
CERROR("Can't start acceptor thread: %ld\n", pid);
return -ESRCH;
/* maybe we're waken up with libcfs_sock_abort_accept() */
if (lnet_acceptor_state.pta_shutdown) {
- close(newsock);
+ libcfs_sock_release(newsock);
break;
}
continue;
failed:
- close(newsock);
+ libcfs_sock_release(newsock);
}
- close(lnet_acceptor_state.pta_sock);
+ libcfs_sock_release(lnet_acceptor_state.pta_sock);
LCONSOLE(0,"Acceptor stopping\n");
/* unblock lnet_acceptor_stop() */
list_for_each (tmp, &the_lnet.ln_lnds) {
lnd = list_entry(tmp, lnd_t, lnd_list);
- if (lnd->lnd_type == type)
+ if ((int)lnd->lnd_type == type)
return lnd;
}
#ifdef __KERNEL__
int i;
- the_lnet.ln_nfinalizers = num_online_cpus();
+ the_lnet.ln_nfinalizers = (int) num_online_cpus();
LIBCFS_ALLOC(the_lnet.ln_finalizers,
the_lnet.ln_nfinalizers *
LNetCtl(unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
- lnet_process_id_t id;
+ lnet_process_id_t id = {0};
lnet_ni_t *ni;
int rc;
return 0;
case IOC_LIBCFS_PING:
- rc = lnet_ping((lnet_process_id_t) {.nid = data->ioc_nid,
- .pid = data->ioc_u32[0]},
- data->ioc_u32[1], /* timeout */
+ id.nid = data->ioc_nid;
+ id.pid = data->ioc_u32[0];
+ rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
(lnet_process_id_t *)data->ioc_pbuf1,
data->ioc_plen1/sizeof(lnet_process_id_t));
if (rc < 0)
case IOC_LIBCFS_DEBUG_PEER: {
/* CAVEAT EMPTOR: this one designed for calling directly; not
* via an ioctl */
- lnet_process_id_t *id = arg;
+ id = *((lnet_process_id_t *) arg);
- lnet_debug_peer(id->nid);
+ lnet_debug_peer(id.nid);
- ni = lnet_net2ni(LNET_NIDNET(id->nid));
+ ni = lnet_net2ni(LNET_NIDNET(id.nid));
if (ni == NULL) {
- CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(*id));
+ CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
} else {
if (ni->ni_lnd->lnd_ctl == NULL) {
CDEBUG(D_WARNING, "No ctl for %s\n",
- libcfs_id2str(*id));
+ libcfs_id2str(id));
} else {
(void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
}
{
lnet_handle_me_t meh;
lnet_process_id_t id;
+ lnet_md_t md = {0};
int rc;
int rc2;
int n;
- int infosz;
+ unsigned int infosz;
int i;
for (n = 0; ; n++) {
goto failed_0;
}
- rc = LNetMEAttach(LNET_RESERVED_PORTAL,
- (lnet_process_id_t){.nid = LNET_NID_ANY,
- .pid = LNET_PID_ANY},
- LNET_PROTO_PING_MATCHBITS, 0LL,
+ memset(&id, 0, sizeof(lnet_process_id_t));
+ id.nid = LNET_NID_ANY;
+ id.pid = LNET_PID_ANY;
+
+ rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
LNET_UNLINK, LNET_INS_AFTER,
&meh);
if (rc != 0) {
goto failed_1;
}
- rc = LNetMDAttach(meh,
- (lnet_md_t){.start = the_lnet.ln_ping_info,
- .length = infosz,
- .threshold = LNET_MD_THRESH_INF,
- .options = (LNET_MD_OP_GET |
- LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE),
- .eq_handle = the_lnet.ln_ping_target_eq},
+ /* initialize md content */
+ md.start = the_lnet.ln_ping_info;
+ md.length = infosz;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.user_ptr = NULL;
+ md.eq_handle = the_lnet.ln_ping_target_eq;
+
+ rc = LNetMDAttach(meh, md,
LNET_RETAIN,
&the_lnet.ln_ping_target_md);
if (rc != 0) {
lnet_handle_eq_t eqh;
lnet_handle_md_t mdh;
lnet_event_t event;
+ lnet_md_t md = {0};
int which;
int unlinked = 0;
int replied = 0;
goto out_0;
}
- rc = LNetMDBind((lnet_md_t){.start = info,
- .length = infosz,
- .threshold = 2, /* GET/REPLY */
- .options = LNET_MD_TRUNCATE,
- .eq_handle = eqh},
- LNET_UNLINK,
- &mdh);
+ /* initialize md content */
+ md.start = info;
+ md.length = infosz;
+ md.threshold = 2; /*GET/REPLY*/
+ md.max_size = 0;
+ md.options = LNET_MD_TRUNCATE;
+ md.user_ptr = NULL;
+ md.eq_handle = eqh;
+
+ rc = LNetMDBind(md, LNET_UNLINK, &mdh);
if (rc != 0) {
CERROR("Can't bind MD: %d\n", rc);
goto out_1;
__swab32s(&info->pi_version);
__swab32s(&info->pi_pid);
__swab32s(&info->pi_nnids);
- for (i = 0; i < info->pi_nnids && i < n_ids; i++)
+ for (i = 0; i < (int)info->pi_nnids && i < (int)n_ids; i++)
__swab64s(&info->pi_nid[i]);
} else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
goto out_1;
}
- if (nob < offsetof(lnet_ping_info_t, pi_nid[0])) {
+ if (nob < (int)offsetof(lnet_ping_info_t, pi_nid[0])) {
CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
nob, (int)offsetof(lnet_ping_info_t, pi_nid[0]));
goto out_1;
}
- if (info->pi_nnids < n_ids)
+ if ((int) info->pi_nnids < n_ids)
n_ids = info->pi_nnids;
- if (nob < offsetof(lnet_ping_info_t, pi_nid[n_ids])) {
+ if (nob < (int)offsetof(lnet_ping_info_t, pi_nid[n_ids])) {
CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
nob, (int)offsetof(lnet_ping_info_t, pi_nid[n_ids]));
goto out_1;
the_lnet.ln_network_tokens = tokens;
the_lnet.ln_network_tokens_nob = tokensize;
memcpy (tokens, networks, tokensize);
- str = tokens;
+ str = tokens;
/* Add in the loopback network */
ni = lnet_new_ni(LNET_MKNET(LOLND, 0), nilist);
if (net == LNET_NIDNET(LNET_NID_ANY)) {
lnet_syntax("networks", networks,
- str - tokens, strlen(str));
+ (int)(str - tokens), strlen(str));
LCONSOLE_ERROR_MSG(0x113, "Unrecognised network"
" type\n");
goto failed;
net = libcfs_str2net(lnet_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
lnet_syntax("networks", networks,
- str - tokens, strlen(str));
+ (int)(str - tokens), strlen(str));
goto failed;
}
bracket = strchr(iface, ')');
if (bracket == NULL) {
lnet_syntax("networks", networks,
- iface - tokens, strlen(iface));
+ (int)(iface - tokens), strlen(iface));
goto failed;
}
iface = lnet_trimwhite(iface);
if (*iface == 0) {
lnet_syntax("networks", networks,
- iface - tokens, strlen(iface));
+ (int)(iface - tokens), strlen(iface));
goto failed;
}
str = lnet_trimwhite(str);
if (*str != 0) {
lnet_syntax("networks", networks,
- str - tokens, strlen(str));
+ (int)(str - tokens), strlen(str));
goto failed;
}
str = comma + 1;
str = lnet_trimwhite(str);
if (*str != 0) {
lnet_syntax("networks", networks,
- str - tokens, strlen(str));
+ (int)(str - tokens), strlen(str));
goto failed;
}
}
if (lnet_issep(*sep) || *sep == '#')
break;
- nob = sep - str;
+ nob = (int)(sep - str);
if (nob > 0) {
ltb = lnet_new_text_buf(nob);
if (ltb == NULL) {
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
- int len1 = sep1 - str;
+ int len1 = (int)(sep1 - str);
int len2 = strlen(sep2 + 1);
lnet_text_buf_t *ltb;
/* simple string enumeration */
if (lnet_expand1tb(&pending, str, sep, sep2,
- parsed, enditem - parsed) != 0)
+ parsed, (int)(enditem - parsed)) != 0)
goto failed;
continue;
goto out;
token_error:
- lnet_syntax("routes", cmd, token - str, strlen(token));
+ lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
out:
lnet_free_text_bufs(&nets);
lnet_free_text_bufs(&gateways);
rc = lnet_match_network_token(token, ipaddrs, nip);
if (rc < 0) {
lnet_syntax("ip2nets", net_entry,
- token - tokens, len);
+ (int)(token - tokens), len);
return rc;
}
bracket < sep) {
/* netspec lists interfaces... */
- offset2 = offset + (bracket - tb->ltb_text);
+ offset2 = offset + (int)(bracket - tb->ltb_text);
len = strlen(bracket);
bracket = strchr(bracket + 1, ')');
if (sep == NULL)
return 0;
- offset += sep - tb->ltb_text;
+ offset += (int)(sep - tb->ltb_text);
tb2 = lnet_new_text_buf(strlen(sep));
if (tb2 == NULL)
return -ENOMEM;
cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
cfs_time_seconds(timeout_ms)/1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now),
- &tv);
- timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
+ &tv);
+ timeout_ms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
if (timeout_ms < 0)
timeout_ms = 0;
}
* otherwise caller may only lnet_md_free() it.
*/
- if (!LNetHandleIsEqual (umd->eq_handle, LNET_EQ_NONE)) {
+ if (!LNetHandleIsInvalid (umd->eq_handle)) {
eq = lnet_handle2eq(&umd->eq_handle);
if (eq == NULL)
return -ENOENT;
memcpy(lmd->md_iov.iov, umd->start,
niov * sizeof (lmd->md_iov.iov[0]));
- for (i = 0; i < niov; i++) {
+ for (i = 0; i < (int)niov; i++) {
/* We take the base address on trust */
if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
return -EINVAL;
memcpy(lmd->md_iov.kiov, umd->start,
niov * sizeof (lmd->md_iov.kiov[0]));
- for (i = 0; i < niov; i++) {
+ for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
(umd->max_size < 0 ||
- umd->max_size > umd->length)) // illegal max_size
+ umd->max_size > (int)umd->length)) // illegal max_size
return -EINVAL;
}
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
- if (portal >= the_lnet.ln_nportals)
+ if ((int)portal >= the_lnet.ln_nportals)
return -EINVAL;
me = lnet_me_alloc();
return LNET_MATCHMD_DROP;
}
- list_for_each_entry_safe (me, tmp, &ptl->ptl_ml, me_list) {
+ cfs_list_for_each_entry_safe_typed (me, tmp, &ptl->ptl_ml,
+ lnet_me_t, me_list) {
md = me->me_md;
/* ME attached but MD not attached yet */
niov = 1;
for (;;) {
LASSERT (src_niov > 0);
- LASSERT (niov <= dst_niov);
+ LASSERT ((int)niov <= dst_niov);
frag_len = src->iov_len - offset;
dst->iov_base = ((char *)src->iov_base) + offset;
niov = 1;
for (;;) {
LASSERT (src_niov > 0);
- LASSERT (niov <= dst_niov);
+ LASSERT ((int)niov <= dst_niov);
frag_len = src->kiov_len - offset;
dst->kiov_page = src->kiov_page;
lnet_rtrbufpool_t *rbp = &the_lnet.ln_rtrpools[0];
LASSERT (msg->msg_len <= LNET_MTU);
- while (msg->msg_len > rbp->rbp_npages * CFS_PAGE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
rbp++;
LASSERT (rbp < &the_lnet.ln_rtrpools[LNET_NRBPOOLS]);
}
static void
lnet_drop_delayed_put(lnet_msg_t *msg, char *reason)
{
+ lnet_process_id_t id = {0};
+
+ id.nid = msg->msg_hdr.src_nid;
+ id.pid = msg->msg_hdr.src_pid;
+
LASSERT (msg->msg_md == NULL);
LASSERT (msg->msg_delayed);
LASSERT (msg->msg_rxpeer != NULL);
CWARN("Dropping delayed PUT from %s portal %d match "LPU64
" offset %d length %d: %s\n",
- libcfs_id2str((lnet_process_id_t){
- .nid = msg->msg_hdr.src_nid,
- .pid = msg->msg_hdr.src_pid}),
+ libcfs_id2str(id),
msg->msg_hdr.msg.put.ptl_index,
msg->msg_hdr.msg.put.match_bits,
msg->msg_hdr.msg.put.offset,
lnet_me_t *me = md->md_me;
lnet_portal_t *ptl = &the_lnet.ln_portals[me->me_portal];
- LASSERT (me->me_portal < the_lnet.ln_nportals);
+ LASSERT (me->me_portal < (unsigned int)the_lnet.ln_nportals);
if ((ptl->ptl_options & LNET_PTL_LAZY) == 0) {
LASSERT (list_empty(&ptl->ptl_msgq));
unsigned int rlength = hdr->payload_length;
unsigned int mlength = 0;
unsigned int offset = 0;
- lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
- /* .pid = */ hdr->src_pid};
+ lnet_process_id_t src= {0};
lnet_libmd_t *md;
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
/* Convert put fields to host byte order */
hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
lnet_hdr_t *hdr = &msg->msg_hdr;
unsigned int mlength = 0;
unsigned int offset = 0;
- lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
- /* .pid = */ hdr->src_pid};
+ lnet_process_id_t src = {0};
lnet_handle_wire_t reply_wmd;
lnet_libmd_t *md;
int rc;
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
/* Convert get fields to host byte order */
hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
{
void *private = msg->msg_private;
lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
- /* .pid = */ hdr->src_pid};
+ lnet_process_id_t src = {0};
lnet_libmd_t *md;
int rlength;
int mlength;
LNET_LOCK();
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
/* NB handles only looked up by creator (no flips) */
md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
LASSERT (md->md_offset == 0);
rlength = hdr->payload_length;
- mlength = MIN(rlength, md->md_length);
+ mlength = MIN(rlength, (int)md->md_length);
if (mlength < rlength &&
(md->md_options & LNET_MD_TRUNCATE) == 0) {
lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
{
lnet_hdr_t *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
- /* .pid = */ hdr->src_pid};
- lnet_libmd_t *md;
+ lnet_process_id_t src = {0};
+ lnet_libmd_t *md;
+
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
/* Convert ack fields to host byte order */
hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
void
lnet_print_hdr(lnet_hdr_t * hdr)
{
- lnet_process_id_t src = {/* .nid = */ hdr->src_nid,
- /* .pid = */ hdr->src_pid};
- lnet_process_id_t dst = {/* .nid = */ hdr->dest_nid,
- /* .pid = */ hdr->dest_pid};
+ lnet_process_id_t src = {0};
+ lnet_process_id_t dst = {0};
char *type_str = lnet_msgtyp2str (hdr->type);
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
+ dst.nid = hdr->dest_nid;
+ dst.pid = hdr->dest_pid;
+
CWARN("P3 Header at %p of type %s\n", hdr, type_str);
CWARN(" From %s\n", libcfs_id2str(src));
CWARN(" To %s\n", libcfs_id2str(dst));
case LNET_MSG_PUT:
case LNET_MSG_REPLY:
- if (payload_length > (for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
+ if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
CERROR("%s, src %s: bad %s payload %d "
"(%d max expected)\n",
libcfs_nid2str(from_nid),
msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
md->md_lh.lh_cookie;
} else {
- msg->msg_hdr.msg.put.ack_wmd = LNET_WIRE_HANDLE_NONE;
+ msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
+ LNET_WIRE_HANDLE_COOKIE_NONE;
+ msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
+ LNET_WIRE_HANDLE_COOKIE_NONE;
}
msg->msg_ev.type = LNET_EVENT_SEND;
int rc;
lnet_handle_md_t mdh;
lnet_peer_t *rtr;
+ lnet_md_t md = {0};
struct list_head *entry;
time_t now;
lnet_process_id_t rtr_id;
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
- rc = LNetMDBind((lnet_md_t){.start = &pinginfo,
- .length = sizeof(pinginfo),
- .threshold = LNET_MD_THRESH_INF,
- .options = LNET_MD_TRUNCATE,
- .eq_handle = the_lnet.ln_rc_eqh},
- LNET_UNLINK,
- &mdh);
+ /* initialize md content */
+ md.start = &pinginfo;
+ md.length = sizeof(pinginfo);
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_TRUNCATE,
+ md.user_ptr = NULL;
+ md.eq_handle = the_lnet.ln_rc_eqh;
+
+ rc = LNetMDBind(md, LNET_UNLINK, &mdh);
if (rc < 0) {
CERROR("Can't bind MD: %d\n", rc);
#if defined(__KERNEL__) && defined(LNET_ROUTER)
+#if defined(__linux__)
#include <linux/seq_file.h>
+#endif
/* this is really lnet_proc.c */
}
static struct seq_operations lnet_routes_sops = {
- .start = lnet_route_seq_start,
- .stop = lnet_route_seq_stop,
- .next = lnet_route_seq_next,
- .show = lnet_route_seq_show,
+ /* start */ lnet_route_seq_start,
+ /* stop */ lnet_route_seq_stop,
+ /* next */ lnet_route_seq_next,
+ /* show */ lnet_route_seq_show,
};
static int
return rc;
}
-static struct file_operations lnet_routes_fops = {
- .owner = THIS_MODULE,
- .open = lnet_route_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+static struct file_operations lnet_routes_fops;
+
+static void
+lnet_init_routes_fops(void)
+{
+ lnet_routes_fops.owner = THIS_MODULE;
+ lnet_routes_fops.llseek = seq_lseek;
+ lnet_routes_fops.read = seq_read;
+ lnet_routes_fops.open = lnet_route_seq_open;
+ lnet_routes_fops.release = seq_release;
+}
typedef struct {
__u64 lrtrsi_version;
}
static struct seq_operations lnet_routers_sops = {
- .start = lnet_router_seq_start,
- .stop = lnet_router_seq_stop,
- .next = lnet_router_seq_next,
- .show = lnet_router_seq_show,
+ /* start */ lnet_router_seq_start,
+ /* stop */ lnet_router_seq_stop,
+ /* next */ lnet_router_seq_next,
+ /* show */ lnet_router_seq_show,
};
static int
return rc;
}
-static struct file_operations lnet_routers_fops = {
- .owner = THIS_MODULE,
- .open = lnet_router_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+static struct file_operations lnet_routers_fops;
+
+static void
+lnet_init_routers_fops(void)
+{
+ lnet_routers_fops.owner = THIS_MODULE;
+ lnet_routers_fops.llseek = seq_lseek;
+ lnet_routers_fops.read = seq_read;
+ lnet_routers_fops.open = lnet_router_seq_open;
+ lnet_routers_fops.release = seq_release;
+}
typedef struct {
unsigned long long lpsi_version;
}
static struct seq_operations lnet_peer_sops = {
- .start = lnet_peer_seq_start,
- .stop = lnet_peer_seq_stop,
- .next = lnet_peer_seq_next,
- .show = lnet_peer_seq_show,
+ /* start */ lnet_peer_seq_start,
+ /* stop */ lnet_peer_seq_stop,
+ /* next */ lnet_peer_seq_next,
+ /* show */ lnet_peer_seq_show,
};
static int
return rc;
}
-static struct file_operations lnet_peer_fops = {
- .owner = THIS_MODULE,
- .open = lnet_peer_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+static struct file_operations lnet_peer_fops;
+
+static void
+lnet_init_peer_fops(void)
+{
+ lnet_peer_fops.owner = THIS_MODULE;
+ lnet_peer_fops.llseek = seq_lseek;
+ lnet_peer_fops.read = seq_read;
+ lnet_peer_fops.open = lnet_peer_seq_open;
+ lnet_peer_fops.release = seq_release;
+}
typedef struct {
int lbsi_idx;
}
static struct seq_operations lnet_buffer_sops = {
- .start = lnet_buffer_seq_start,
- .stop = lnet_buffer_seq_stop,
- .next = lnet_buffer_seq_next,
- .show = lnet_buffer_seq_show,
+ /* start */ lnet_buffer_seq_start,
+ /* stop */ lnet_buffer_seq_stop,
+ /* next */ lnet_buffer_seq_next,
+ /* show */ lnet_buffer_seq_show,
};
static int
return rc;
}
-static struct file_operations lnet_buffers_fops = {
- .owner = THIS_MODULE,
- .open = lnet_buffer_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+static struct file_operations lnet_buffers_fops;
+
+static void
+lnet_init_buffers_fops(void)
+{
+ lnet_buffers_fops.owner = THIS_MODULE;
+ lnet_buffers_fops.llseek = seq_lseek;
+ lnet_buffers_fops.read = seq_read;
+ lnet_buffers_fops.open = lnet_buffer_seq_open;
+ lnet_buffers_fops.release = seq_release;
+}
typedef struct {
lnet_ni_t *lnsi_ni;
}
static struct seq_operations lnet_ni_sops = {
- .start = lnet_ni_seq_start,
- .stop = lnet_ni_seq_stop,
- .next = lnet_ni_seq_next,
- .show = lnet_ni_seq_show,
+ /* start */ lnet_ni_seq_start,
+ /* stop */ lnet_ni_seq_stop,
+ /* next */ lnet_ni_seq_next,
+ /* show */ lnet_ni_seq_show,
};
static int
return rc;
}
-static struct file_operations lnet_ni_fops = {
- .owner = THIS_MODULE,
- .open = lnet_ni_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+static struct file_operations lnet_ni_fops;
+
+static void
+lnet_init_ni_fops(void)
+{
+ lnet_ni_fops.owner = THIS_MODULE;
+ lnet_ni_fops.llseek = seq_lseek;
+ lnet_ni_fops.read = seq_read;
+ lnet_ni_fops.open = lnet_ni_seq_open;
+ lnet_ni_fops.release = seq_release;
+}
void
lnet_proc_init(void)
return;
}
+ lnet_init_routes_fops();
pde->proc_fops = &lnet_routes_fops;
pde->data = NULL;
return;
}
+ lnet_init_routers_fops();
pde->proc_fops = &lnet_routers_fops;
pde->data = NULL;
return;
}
+ lnet_init_peer_fops();
pde->proc_fops = &lnet_peer_fops;
pde->data = NULL;
return;
}
+ lnet_init_buffers_fops();
pde->proc_fops = &lnet_buffers_fops;
pde->data = NULL;
return;
}
+ lnet_init_ni_fops();
pde->proc_fops = &lnet_ni_fops;
pde->data = NULL;
}
LASSERT (tsi->tsi_is_client);
- list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
+ sfw_test_unit_t, tsu_list) {
bulk = tsu->tsu_private;
if (bulk == NULL) continue;
flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
return -EINVAL;
- list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
+ sfw_test_unit_t, tsu_list) {
bulk = srpc_alloc_bulk(npg, breq->blk_opc == LST_BRW_READ);
if (bulk == NULL) {
brw_client_fini(tsi);
if (reply->brw_status != 0) {
atomic_inc(&sn->sn_brw_errors);
- rpc->crpc_status = -reply->brw_status;
+ rpc->crpc_status = -(int)reply->brw_status;
goto out;
}
__swab64s(&reqst->brw_rpyid);
__swab64s(&reqst->brw_bulkid);
}
- LASSERT (reqstmsg->msg_type == srpc_service2request(sv->sv_id));
+ LASSERT (reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
rpc->srpc_done = brw_server_rpc_done;
return 0;
}
-sfw_test_client_ops_t brw_test_client =
+sfw_test_client_ops_t brw_test_client;
+void brw_init_test_client(void)
{
- .tso_init = brw_client_init,
- .tso_fini = brw_client_fini,
- .tso_prep_rpc = brw_client_prep_rpc,
- .tso_done_rpc = brw_client_done_rpc,
+ brw_test_client.tso_init = brw_client_init;
+ brw_test_client.tso_fini = brw_client_fini;
+ brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
+ brw_test_client.tso_done_rpc = brw_client_done_rpc;
};
-srpc_service_t brw_test_service =
+srpc_service_t brw_test_service;
+void brw_init_test_service(void)
{
- .sv_name = "brw test",
- .sv_handler = brw_server_handle,
- .sv_bulk_ready = brw_bulk_ready,
- .sv_id = SRPC_SERVICE_BRW,
-};
+ brw_test_service.sv_id = SRPC_SERVICE_BRW;
+ brw_test_service.sv_name = "brw_test";
+ brw_test_service.sv_handler = brw_server_handle;
+ brw_test_service.sv_bulk_ready = brw_bulk_ready;
+}
lstcon_rpc_trans_t *trans;
if (translist != NULL) {
- list_for_each_entry(trans, translist, tas_link) {
+ cfs_list_for_each_entry_typed(trans, translist,
+ lstcon_rpc_trans_t, tas_link) {
/* Can't enqueue two private transaction on
* the same object */
if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
lstcon_rpc_t *crpc;
lstcon_node_t *nd;
- list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
+ cfs_list_for_each_entry_typed (crpc, &trans->tas_rpcs_list,
+ lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
lstcon_rpc_trans_name(trans->tas_opc));
/* post all requests */
- list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
+ cfs_list_for_each_entry_typed (crpc, &trans->tas_rpcs_list,
+ lstcon_rpc_t, crp_link) {
LASSERT (!crpc->crp_posted);
lstcon_rpc_post(crpc);
mutex_up(&console_session.ses_mutex);
- rc = cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
+ cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
lstcon_rpc_trans_check(trans),
- timeout * HZ);
+ cfs_time_seconds(timeout), rc);
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
memset(stat, 0, sizeof(*stat));
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
+ lstcon_rpc_t, crp_link) {
lstcon_rpc_stat_total(stat, 1);
rpc = crpc->crp_rpc;
next = head_up;
- list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
+ lstcon_rpc_t, crp_link) {
if (copy_from_user(&tmp, next, sizeof(struct list_head)))
return -EFAULT;
nd = crpc->crp_node;
- dur = cfs_time_sub(crpc->crp_stamp,
- console_session.ses_id.ses_stamp);
+ dur = (cfs_duration_t)cfs_time_sub(crpc->crp_stamp,
+ (cfs_time_t)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
if (copy_to_user(&ent->rpe_peer,
lstcon_rpc_t *tmp;
int count = 0;
- list_for_each_entry_safe(crpc, tmp,
- &trans->tas_rpcs_list, crp_link) {
+ cfs_list_for_each_entry_safe_typed(crpc, tmp,
+ &trans->tas_rpcs_list,
+ lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
start = ((idx / dist) * span) % grp->grp_nnode;
end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
+ lstcon_ndlink_t, ndl_link) {
nd = ndl->ndl_node;
if (i < start) {
i ++;
if (start <= end) /* done */
return 0;
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
+ lstcon_ndlink_t, ndl_link) {
if (i > grp->grp_nnode + end)
break;
trq->tsr_bid = test->tes_hdr.tsb_id;
trq->tsr_concur = test->tes_concur;
trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
- trq->tsr_stop_onerr = test->tes_stop_onerr;
+ trq->tsr_stop_onerr = !!test->tes_stop_onerr;
switch (test->tes_type) {
case LST_TEST_PING:
srpc_batch_reply_t *bat_rep;
srpc_test_reply_t *test_rep;
srpc_stat_reply_t *stat_rep;
- int errno = 0;
+ int rc = 0;
switch (transop) {
case LST_TRANS_SESNEW:
mksn_rep->mksn_status == EINVAL);
lstcon_sesop_stat_failure(stat, 1);
- errno = mksn_rep->mksn_status;
+ rc = mksn_rep->mksn_status;
break;
case LST_TRANS_SESEND:
rmsn_rep->rmsn_status == EINVAL);
lstcon_sesop_stat_failure(stat, 1);
- errno = rmsn_rep->rmsn_status;
+ rc = rmsn_rep->rmsn_status;
break;
case LST_TRANS_SESQRY:
}
lstcon_tsbop_stat_failure(stat, 1);
- errno = bat_rep->bar_status;
+ rc = bat_rep->bar_status;
break;
case LST_TRANS_TSBCLIQRY:
return;
lstcon_tsbqry_stat_failure(stat, 1);
- errno = bat_rep->bar_status;
+ rc = bat_rep->bar_status;
break;
case LST_TRANS_TSBCLIADD:
}
lstcon_tsbop_stat_failure(stat, 1);
- errno = test_rep->tsr_status;
+ rc = test_rep->tsr_status;
break;
case LST_TRANS_STATQRY:
}
lstcon_statqry_stat_failure(stat, 1);
- errno = stat_rep->str_status;
+ rc = stat_rep->str_status;
break;
default:
}
if (stat->trs_fwk_errno == 0)
- stat->trs_fwk_errno = errno;
+ stat->trs_fwk_errno = rc;
return;
}
return rc;
}
- list_for_each_entry(ndl, ndlist, ndl_link) {
+ cfs_list_for_each_entry_typed(ndl, ndlist, lstcon_ndlink_t, ndl_link) {
rc = condition == NULL ? 1 :
condition(transop, ndl->ndl_node, arg);
if (!console_session.ses_expired &&
cfs_time_current_sec() - console_session.ses_laststamp >
- console_session.ses_timeout)
+ (time_t)console_session.ses_timeout)
console_session.ses_expired = 1;
trans = console_session.ses_ping;
LASSERT (trans != NULL);
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
+ cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_list,
+ lstcon_ndlink_t, ndl_link) {
nd = ndl->ndl_node;
if (console_session.ses_expired) {
intv = cfs_duration_sec(cfs_time_sub(cfs_time_current(),
nd->nd_stamp));
- if (intv < nd->nd_timeout / 2)
+ if (intv < (time_t)nd->nd_timeout / 2)
continue;
rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG, 0, 0, crpc);
CDEBUG(D_NET, "Ping %d nodes in session\n", count);
- ptimer->stt_expires = cfs_time_current_sec() + LST_PING_INTERVAL;
+ ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
mutex_up(&console_session.ses_mutex);
}
ptimer = &console_session.ses_ping_timer;
- ptimer->stt_expires = cfs_time_current_sec() + LST_PING_INTERVAL;
+ ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
LASSERT (id.nid != LNET_NID_ANY);
- list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
+ cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_hash[idx],
+ lstcon_ndlink_t, ndl_hlink) {
if (ndl->ndl_node->nd_id.nid != id.nid ||
ndl->ndl_node->nd_id.pid != id.pid)
continue;
return -EINVAL;
/* search in hash */
- list_for_each_entry(ndl, &hash[idx], ndl_hlink) {
+ cfs_list_for_each_entry_typed(ndl, &hash[idx],
+ lstcon_ndlink_t, ndl_hlink) {
if (ndl->ndl_node->nd_id.nid != id.nid ||
ndl->ndl_node->nd_id.pid != id.pid)
continue;
lstcon_ndlink_t *ndl;
lstcon_ndlink_t *tmp;
- list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
+ cfs_list_for_each_entry_safe_typed(ndl, tmp, &grp->grp_ndl_list,
+ lstcon_ndlink_t, ndl_link) {
if ((ndl->ndl_node->nd_state & keep) == 0)
lstcon_group_ndlink_release(grp, ndl);
}
{
lstcon_group_t *grp;
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
+ cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
+ lstcon_group_t, grp_link) {
if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
continue;
LASSERT (index >= 0);
LASSERT (name_up != NULL);
- list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
+ cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
+ lstcon_group_t, grp_link) {
if (index-- == 0) {
return copy_to_user(name_up, grp->grp_name, len) ?
-EFAULT : 0;
LASSERT (*index_p >= 0);
LASSERT (*count_p > 0);
- list_for_each_entry(ndl, head, ndl_link) {
+ cfs_list_for_each_entry_typed(ndl, head, lstcon_ndlink_t, ndl_link) {
if (index++ < *index_p)
continue;
memset(gentp, 0, sizeof(lstcon_ndlist_ent_t));
- list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link)
+ cfs_list_for_each_entry_typed(ndl, &grp->grp_ndl_list,
+ lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
rc = copy_to_user(gents_p, gentp,
{
lstcon_batch_t *bat;
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
+ cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
+ lstcon_batch_t, bat_link) {
if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
*batpp = bat;
return 0;
LASSERT (name_up != NULL);
LASSERT (index >= 0);
- list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
+ cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
+ lstcon_batch_t, bat_link) {
if (index-- == 0) {
return copy_to_user(name_up,bat->bat_name, len) ?
-EFAULT: 0;
if (testidx > 0) {
/* query test, test index start from 1 */
- list_for_each_entry(test, &bat->bat_test_list, tes_link) {
+ cfs_list_for_each_entry_typed(test, &bat->bat_test_list,
+ lstcon_test_t, tes_link) {
if (testidx-- == 1)
break;
}
entp->u.tbe_test.tse_concur = test->tes_concur;
}
- list_for_each_entry(ndl, clilst, ndl_link)
+ cfs_list_for_each_entry_typed(ndl, clilst, lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
- list_for_each_entry(ndl, srvlst, ndl_link)
+ cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
rc = copy_to_user(ent_up, entp,
{
lstcon_test_t *test;
- list_for_each_entry(test, &batch->bat_test_list, tes_link) {
+ cfs_list_for_each_entry_typed(test, &batch->bat_test_list,
+ lstcon_test_t, tes_link) {
if (idx == test->tes_hdr.tsb_index) {
*testpp = test;
return 0;
memset(entp, 0, sizeof(*entp));
- list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
+ cfs_list_for_each_entry_typed(ndl, &console_session.ses_ndl_list,
+ lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
if (copy_to_user(sid_up, &console_session.ses_id, sizeof(lst_sid_t)) ||
return rc;
}
-srpc_service_t lstcon_acceptor_service =
+srpc_service_t lstcon_acceptor_service;
+void lstcon_init_acceptor_service(void)
{
- .sv_name = "join session",
- .sv_handler = lstcon_acceptor_handle,
- .sv_bulk_ready = NULL,
- .sv_id = SRPC_SERVICE_JOIN,
- .sv_concur = SFW_SERVICE_CONCURRENCY,
-};
+ /* initialize selftest console acceptor service table */
+ lstcon_acceptor_service.sv_name = "join session";
+ lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
+ lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
+ lstcon_acceptor_service.sv_concur = SFW_SERVICE_CONCURRENCY;
+}
extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
CFS_INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
+
+ /* initialize acceptor service table */
+ lstcon_init_acceptor_service();
+
rc = srpc_add_service(&lstcon_acceptor_service);
LASSERT (rc != -EBUSY);
if (rc != 0) {
#include "selftest.h"
+lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
+
int brw_inject_errors = 0;
CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
"# data errors to inject randomly, zero by default");
LASSERT (id <= SRPC_SERVICE_MAX_ID);
LASSERT (id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
- list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
+ sfw_test_case_t, tsc_list) {
if (tsc->tsc_srv_service->sv_id == id)
return tsc;
}
atomic_inc(&sfw_data.fw_nzombies);
list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
- list_for_each_entry (tsb, &sn->sn_batches, bat_list) {
+ cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
+ sfw_batch_t, bat_list) {
if (sfw_batch_active(tsb)) {
nactive++;
sfw_stop_batch(tsb, 1);
LASSERT (sn != NULL);
- list_for_each_entry (bat, &sn->sn_batches, bat_list) {
+ cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
+ sfw_batch_t, bat_list) {
if (bat->bat_id.bat_id == bid.bat_id)
return bat;
}
cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
cnt->active_tests = cnt->active_batches = 0;
- list_for_each_entry (bat, &sn->sn_batches, bat_list) {
+ cfs_list_for_each_entry_typed (bat, &sn->sn_batches,
+ sfw_batch_t, bat_list) {
int n = atomic_read(&bat->bat_nactive);
if (n > 0) {
#ifndef __KERNEL__
LASSERT (bk->bk_pages != NULL);
#endif
- LASSERT (bk->bk_niov * SFW_ID_PER_PAGE >= ndest);
- LASSERT (bk->bk_len >= sizeof(lnet_process_id_t) * ndest);
+ LASSERT (bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
+ LASSERT ((unsigned int)bk->bk_len >= sizeof(lnet_process_id_t) * ndest);
sfw_unpack_test_req(msg);
memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
LASSERT (!list_empty(&sn->sn_list)); /* I'm a zombie! */
- list_for_each_entry (tsb, &sn->sn_batches, bat_list) {
+ cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
+ sfw_batch_t, bat_list) {
if (sfw_batch_active(tsb)) {
spin_unlock(&sfw_data.fw_lock);
return;
return -EPERM;
}
- list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
+ sfw_test_instance_t, tsi_list) {
if (!tsi->tsi_is_client) /* skip server instances */
continue;
atomic_inc(&tsb->bat_nactive);
- list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ cfs_list_for_each_entry_typed (tsu, &tsi->tsi_units,
+ sfw_test_unit_t, tsu_list) {
atomic_inc(&tsi->tsi_nactive);
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
if (!sfw_batch_active(tsb))
return -EPERM;
- list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
+ sfw_test_instance_t, tsi_list) {
spin_lock(&tsi->tsi_lock);
if (!tsi->tsi_is_client ||
}
/* abort launched rpcs in the test */
- list_for_each_entry (rpc, &tsi->tsi_active_rpcs, crpc_list) {
+ cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
+ srpc_client_rpc_t, crpc_list) {
spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, -EINTR);
return 0;
}
- list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
+ sfw_test_instance_t, tsi_list) {
if (testidx-- > 1)
continue;
static srpc_service_t sfw_services[] =
{
{
- .sv_name = "debug",
- .sv_id = SRPC_SERVICE_DEBUG,
+ /* sv_id */ SRPC_SERVICE_DEBUG,
+ /* sv_name */ "debug",
+ 0
},
{
- .sv_name = "query stats",
- .sv_id = SRPC_SERVICE_QUERY_STAT,
+ /* sv_id */ SRPC_SERVICE_QUERY_STAT,
+ /* sv_name */ "query stats",
+ 0
},
{
- .sv_name = "make sessin",
- .sv_id = SRPC_SERVICE_MAKE_SESSION,
+ /* sv_id */ SRPC_SERVICE_MAKE_SESSION,
+ /* sv_name */ "make session",
+ 0
},
{
- .sv_name = "remove session",
- .sv_id = SRPC_SERVICE_REMOVE_SESSION,
+ /* sv_id */ SRPC_SERVICE_REMOVE_SESSION,
+ /* sv_name */ "remove session",
+ 0
},
{
- .sv_name = "batch service",
- .sv_id = SRPC_SERVICE_BATCH,
+ /* sv_id */ SRPC_SERVICE_BATCH,
+ /* sv_name */ "batch service",
+ 0
},
{
- .sv_name = "test service",
- .sv_id = SRPC_SERVICE_TEST,
+ /* sv_id */ SRPC_SERVICE_TEST,
+ /* sv_name */ "test service",
+ 0
},
- { .sv_name = NULL, }
+ {
+ /* sv_id */ 0,
+ /* sv_name */ NULL,
+ 0
+ }
};
extern sfw_test_client_ops_t ping_test_client;
extern srpc_service_t ping_test_service;
+extern void ping_init_test_client(void);
+extern void ping_init_test_service(void);
extern sfw_test_client_ops_t brw_test_client;
extern srpc_service_t brw_test_service;
+extern void brw_init_test_client(void);
+extern void brw_init_test_service(void);
+
int
sfw_startup (void)
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
+ brw_init_test_client();
+ brw_init_test_service();
rc = sfw_register_test(&brw_test_service, &brw_test_client);
LASSERT (rc == 0);
+
+ ping_init_test_client();
+ ping_init_test_service();
rc = sfw_register_test(&ping_test_service, &ping_test_client);
LASSERT (rc == 0);
error = 0;
- list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
+ sfw_test_case_t, tsc_list) {
sv = tsc->tsc_srv_service;
sv->sv_concur = SFW_TEST_CONCURRENCY;
srpc_remove_service(sv);
}
- list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
+ sfw_test_case_t, tsc_list) {
sv = tsc->tsc_srv_service;
srpc_shutdown_service(sv);
srpc_remove_service(sv);
return;
}
+
+void
+lnet_selftest_structure_assertion(void)
+{
+ CLASSERT(sizeof(srpc_msg_t) == 160);
+ CLASSERT(sizeof(srpc_test_reqst_t) == 70);
+ CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_concur) == 72);
+ CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_ndest) == 78);
+ CLASSERT(sizeof(srpc_stat_reply_t) == 136);
+ CLASSERT(sizeof(srpc_stat_reqst_t) == 28);
+}
+
int
lnet_selftest_init (void)
{
return 0;
}
-sfw_test_client_ops_t ping_test_client =
+sfw_test_client_ops_t ping_test_client;
+void ping_init_test_client(void)
{
- .tso_init = ping_client_init,
- .tso_fini = ping_client_fini,
- .tso_prep_rpc = ping_client_prep_rpc,
- .tso_done_rpc = ping_client_done_rpc,
-};
+ ping_test_client.tso_init = ping_client_init;
+ ping_test_client.tso_fini = ping_client_fini;
+ ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
+ ping_test_client.tso_done_rpc = ping_client_done_rpc;
+}
-srpc_service_t ping_test_service =
+srpc_service_t ping_test_service;
+void ping_init_test_service(void)
{
- .sv_name = "ping test",
- .sv_handler = ping_server_handle,
- .sv_id = SRPC_SERVICE_PING,
-};
+ ping_test_service.sv_id = SRPC_SERVICE_PING;
+ ping_test_service.sv_name = "ping_test";
+ ping_test_service.sv_handler = ping_server_handle;
+}
LASSERT (nid != LNET_NID_ANY);
- list_for_each_entry (peer, peer_list, stp_list) {
+ cfs_list_for_each_entry_typed (peer, peer_list,
+ srpc_peer_t, stp_list) {
if (peer->stp_nid == nid)
return peer;
}
rpc->srpc_reqstbuf = buffer;
rpc->srpc_peer = buffer->buf_peer;
rpc->srpc_self = buffer->buf_self;
- rpc->srpc_replymdh = LNET_INVALID_HANDLE;
+ LNetInvalidateHandle(&rpc->srpc_replymdh);
}
int
{
int rc;
int portal;
- lnet_process_id_t any = {.nid = LNET_NID_ANY,
- .pid = LNET_PID_ANY};
+ lnet_process_id_t any = {0};
+
+ any.nid = LNET_NID_ANY;
+ any.pid = LNET_PID_ANY;
if (service > SRPC_FRAMEWORK_SERVICE_MAX_ID)
portal = SRPC_REQUEST_PORTAL;
LASSERT (!sv->sv_shuttingdown);
- buf->buf_mdh = LNET_INVALID_HANDLE;
+ LNetInvalidateHandle(&buf->buf_mdh);
list_add(&buf->buf_list, &sv->sv_posted_msgq);
sv->sv_nposted_msg++;
spin_unlock(&sv->sv_lock);
sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
/* schedule in-flight RPCs to notice the shutdown */
- list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
+ cfs_list_for_each_entry_typed (rpc, &sv->sv_active_rpcq,
+ srpc_server_rpc_t, srpc_list) {
swi_schedule_workitem(&rpc->srpc_wi);
}
/* OK to traverse sv_posted_msgq without lock, since no one
* touches sv_posted_msgq now */
- list_for_each_entry (buf, &sv->sv_posted_msgq, buf_list)
+ cfs_list_for_each_entry_typed (buf, &sv->sv_posted_msgq,
+ srpc_buffer_t, buf_list)
LNetMDUnlink(buf->buf_mdh);
return;
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
- srpc_data.rpc_lnet_eq = LNET_EQ_NONE;
+ LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
#ifdef __KERNEL__
rc = LNetEQAlloc(16, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
#else
SRPC_MSG_JOIN_REPLY = 17,
} srpc_msg_type_t;
+#include <libcfs/libcfs_pack.h>
+
/* CAVEAT EMPTOR:
* All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
* and 2nd field matchbits of bulk buffer if any.
} msg_body;
} WIRE_ATTR srpc_msg_t;
+#include <libcfs/libcfs_unpack.h>
+
#endif /* __SELFTEST_RPC_H__ */
rpc->crpc_bulk.bk_niov = nbulkiov;
rpc->crpc_done = rpc_done;
rpc->crpc_fini = rpc_fini;
- rpc->crpc_reqstmdh =
- rpc->crpc_replymdh =
- rpc->crpc_bulk.bk_mdh = LNET_INVALID_HANDLE;
+ LNetInvalidateHandle(&rpc->crpc_reqstmdh);
+ LNetInvalidateHandle(&rpc->crpc_replymdh);
+ LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
/* no event is expected at this point */
rpc->crpc_bulkev.ev_fired =
#endif
-#define lst_wait_until(cond, lock, fmt, a...) \
+#define lst_wait_until(cond, lock, fmt, ...) \
do { \
int __I = 2; \
while (!(cond)) { \
- __I++; \
- CDEBUG(((__I & (-__I)) == __I) ? D_WARNING : \
- D_NET, /* 2**n? */ \
- fmt, ## a); \
+ CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
+ fmt, ## __VA_ARGS__); \
spin_unlock(&(lock)); \
\
selftest_wait_events(); \
int
stt_timer_main (void *arg)
{
+ int rc = 0;
UNUSED(arg);
cfs_daemonize("st_timer");
cfs_waitq_wait_event_timeout(stt_data.stt_waitq,
stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ cfs_time_seconds(STTIMER_SLOTTIME),
+ rc);
}
spin_lock(&stt_data.stt_lock);
--- /dev/null
+/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=4:tabstop=4:
+ *
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * winnt selftest driver framework
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+/*
+ * Included Headers
+ */
+
+
+#include <libcfs/libcfs.h>
+
+
+/* libcfs module init/exit routines */
+DECLARE_INIT(init_libcfs_module);
+DECLARE_EXIT(exit_libcfs_module);
+
+/* portal module init/exit routines */
+DECLARE_INIT(init_lnet);
+DECLARE_EXIT(fini_lnet);
+
+/* tdinal module init/exit routines */
+DECLARE_INIT(ksocknal_module_init);
+DECLARE_EXIT(ksocknal_module_fini);
+
+/* selftest module init/exit routines */
+DECLARE_INIT(lnet_selftest_init);
+DECLARE_EXIT(lnet_selftest_fini);
+
+/*
+ * module info
+ */
+
+struct module libcfs_global_module = {"selftest"};
+
+/*
+ * structure definitions
+ */
+
+#define LNET_SELFTEST_VERSION 0x00010001 /* LNET selftest module version */
+
+#define LNET_SELFTEST_DEVICE L"\\Device\\Selftest" /* device object name */
+#define LNET_SELFTEST_SYMLNK L"\\DosDevices\\Selftest" /* user-visible name for the device*/
+
+typedef struct _DEVICE_EXTENSION {
+ BOOLEAN bProcFS;
+} DEVICE_EXTENSION, *PDEVICE_EXTENSION;
+
+/*
+ * global definitions
+ */
+
+PDEVICE_OBJECT SelfObject = NULL; /* lnet selftest object */
+PDEVICE_OBJECT ProcObject = NULL; /* procfs emulator device */
+
+
+/*
+ * common routines
+ */
+
+
+//
+// complete Irp request ...
+//
+
+NTSTATUS
+LstCompleteIrp(
+ PIRP Irp,
+ NTSTATUS Status,
+ ULONG Info
+ )
+{
+ Irp->IoStatus.Status = Status;
+ Irp->IoStatus.Information = Info;
+ IoCompleteRequest(Irp,IO_NO_INCREMENT);
+
+ return Status;
+}
+
+//
+// Open/Create Device ...
+//
+
+NTSTATUS
+LstCreate(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ KdPrint(("LstCreate: DeviceCreate ...\n"));
+
+ return LstCompleteIrp(Irp,STATUS_SUCCESS,0);
+}
+
+//
+// Close Devcie ...
+//
+
+NTSTATUS
+LstClose(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp)
+{
+ KdPrint(("LstClose: Device Closed.\n"));
+
+ return LstCompleteIrp(Irp, STATUS_SUCCESS, 0);
+
+ UNREFERENCED_PARAMETER(DeviceObject);
+}
+
+
+//
+// computer is being shut down
+//
+
+NTSTATUS
+LstShutdown(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ KdPrint(("LstShutdown: ...\n"));
+
+ return LstCompleteIrp(Irp, STATUS_SUCCESS, 0);
+
+ UNREFERENCED_PARAMETER(DeviceObject);
+}
+
+//
+// device io control
+//
+
+
+NTSTATUS
+LstDeviceControl(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ NTSTATUS Status = STATUS_INVALID_DEVICE_REQUEST;
+ PIO_STACK_LOCATION IrpSp;
+
+ ULONG ControlCode;
+ ULONG InputLength;
+ ULONG OutputLength;
+
+ PVOID lpvInBuffer;
+
+ Irp->IoStatus.Information = 0;
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+
+ ControlCode = IrpSp->Parameters.DeviceIoControl.IoControlCode;
+ InputLength = IrpSp->Parameters.DeviceIoControl.InputBufferLength;
+ OutputLength = IrpSp->Parameters.DeviceIoControl.OutputBufferLength;
+ lpvInBuffer = Irp->AssociatedIrp.SystemBuffer;
+
+ ASSERT (IrpSp->MajorFunction == IRP_MJ_DEVICE_CONTROL);
+
+ switch (ControlCode)
+ {
+ case IOCTL_LIBCFS_VERSION:
+
+ *((ULONG *)lpvInBuffer) = (ULONG)(LNET_SELFTEST_VERSION);
+ Irp->IoStatus.Information = sizeof(ULONG);
+ Status = STATUS_SUCCESS;
+ break;
+
+ default:
+ break;
+ }
+
+ Irp->IoStatus.Status = Status;
+ IoCompleteRequest(Irp, IO_NO_INCREMENT);
+
+ return Status;
+}
+
+NTSTATUS
+ProcCreate(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ NTSTATUS Status;
+ PIO_STACK_LOCATION IrpSp;
+
+ FILE_FULL_EA_INFORMATION * ea;
+ cfs_file_t * fp;
+
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+ ea = (PFILE_FULL_EA_INFORMATION) Irp->AssociatedIrp.SystemBuffer;
+
+ if (!ea) {
+ Status = STATUS_INVALID_PARAMETER;
+ } else {
+ fp = lustre_open_file(&ea->EaName[0]);
+ if (!fp) {
+ Status = STATUS_OBJECT_NAME_NOT_FOUND;
+ } else {
+ IrpSp->FileObject->FsContext = fp;
+ IrpSp->FileObject->FsContext2 = fp->private_data;
+ Status = STATUS_SUCCESS;
+ }
+ }
+
+ return LstCompleteIrp(Irp, Status, 0);
+}
+
+//
+// Close Devcie ...
+//
+
+NTSTATUS
+ProcClose(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp)
+{
+ PIO_STACK_LOCATION IrpSp;
+
+ cfs_file_t * fp;
+
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+ fp = (cfs_file_t *) IrpSp->FileObject->FsContext;
+ ASSERT(fp != NULL);
+ ASSERT(IrpSp->FileObject->FsContext2 == fp->private_data);
+
+ lustre_close_file(fp);
+
+ return LstCompleteIrp(Irp, STATUS_SUCCESS, 0);
+
+ UNREFERENCED_PARAMETER(DeviceObject);
+}
+
+/*
+ * proc frame routines
+ */
+
+NTSTATUS
+ProcDeviceControl(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ NTSTATUS Status = STATUS_INVALID_DEVICE_REQUEST;
+ PIO_STACK_LOCATION IrpSp;
+
+ ULONG ControlCode;
+ ULONG InputLength;
+ ULONG OutputLength;
+
+ PVOID lpvInBuffer;
+
+ Irp->IoStatus.Information = 0;
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+
+ ControlCode = IrpSp->Parameters.DeviceIoControl.IoControlCode;
+ InputLength = IrpSp->Parameters.DeviceIoControl.InputBufferLength;
+ OutputLength = IrpSp->Parameters.DeviceIoControl.OutputBufferLength;
+ lpvInBuffer = Irp->AssociatedIrp.SystemBuffer;
+
+ ASSERT (IrpSp->MajorFunction == IRP_MJ_DEVICE_CONTROL);
+
+ switch (ControlCode)
+ {
+ case IOCTL_LIBCFS_VERSION:
+
+ *((ULONG *)lpvInBuffer) = (ULONG)(LNET_SELFTEST_VERSION);
+ Irp->IoStatus.Information = sizeof(ULONG);
+
+ Status = STATUS_SUCCESS;
+
+ break;
+
+ case IOCTL_LIBCFS_ENTRY:
+ {
+ int rc = 0;
+ cfs_file_t * fp;
+
+ fp = (cfs_file_t *) IrpSp->FileObject->FsContext;
+
+ if (!fp) {
+ rc = -EINVAL;
+ } else {
+ rc = lustre_ioctl_file(fp, (PCFS_PROC_IOCTL) lpvInBuffer);
+ }
+
+ ((PCFS_PROC_IOCTL) lpvInBuffer)->rc = rc;
+ Irp->IoStatus.Information = InputLength;
+ Status = STATUS_SUCCESS;
+ }
+ }
+
+ Irp->IoStatus.Status = Status;
+ IoCompleteRequest(Irp, IO_NO_INCREMENT);
+
+ return Status;
+}
+
+
+NTSTATUS
+ProcReadWrite (PDEVICE_OBJECT DeviceObject, PIRP Irp)
+{
+ PIO_STACK_LOCATION IrpSp;
+ NTSTATUS Status;
+
+ cfs_file_t * fp;
+ int rc;
+ PCHAR buf;
+
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+ if (Irp->MdlAddress) {
+ buf = MmGetSystemAddressForMdlSafe(
+ Irp->MdlAddress,
+ NormalPagePriority);
+ } else {
+ buf = Irp->AssociatedIrp.SystemBuffer;
+ }
+
+ if (buf == NULL) {
+ Status = STATUS_SUCCESS;
+ rc = 0;
+ } else {
+ fp = (cfs_file_t *) IrpSp->FileObject->FsContext;
+
+ if (!fp) {
+ Status = STATUS_INVALID_PARAMETER;
+ goto errorout;
+ }
+
+ if (IrpSp->MajorFunction == IRP_MJ_READ) {
+ rc = lustre_read_file(
+ fp, IrpSp->Parameters.Read.ByteOffset.LowPart,
+ IrpSp->Parameters.Read.Length, buf);
+ } else {
+ rc = lustre_write_file(
+ fp, IrpSp->Parameters.Write.ByteOffset.LowPart,
+ IrpSp->Parameters.Write.Length, buf);
+ }
+ if (rc < 0) {
+ cfs_enter_debugger();
+ Status = STATUS_UNSUCCESSFUL;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+ }
+
+
+errorout:
+ return LstCompleteIrp(Irp, Status, rc);
+}
+
+
+//
+// common dispatch routines
+//
+
+NTSTATUS
+LstDispatchRequest(
+ IN PDEVICE_OBJECT DeviceObject,
+ IN PIRP Irp
+ )
+{
+ NTSTATUS Status;
+ PIO_STACK_LOCATION IrpSp;
+
+ Status = STATUS_INVALID_DEVICE_REQUEST;
+
+ __try {
+
+ IrpSp = IoGetCurrentIrpStackLocation(Irp);
+
+ switch (IrpSp->MajorFunction) {
+
+ case IRP_MJ_CREATE:
+ if (DeviceObject == SelfObject) {
+ Status = LstCreate(DeviceObject, Irp);
+ } else if (DeviceObject == ProcObject) {
+ Status = ProcCreate(DeviceObject, Irp);
+ }
+ break;
+
+ case IRP_MJ_CLOSE:
+ if (DeviceObject == SelfObject) {
+ Status = LstClose(DeviceObject, Irp);
+ } else if (DeviceObject == ProcObject) {
+ Status = ProcClose(DeviceObject, Irp);
+ }
+ break;
+
+ case IRP_MJ_READ:
+ case IRP_MJ_WRITE:
+ if (DeviceObject == ProcObject) {
+ Status = ProcReadWrite(DeviceObject, Irp);
+ }
+ break;
+
+ case IRP_MJ_DEVICE_CONTROL:
+ if (DeviceObject == SelfObject) {
+ Status = LstDeviceControl(DeviceObject, Irp);
+ } else if (DeviceObject == ProcObject) {
+ Status = ProcDeviceControl(DeviceObject, Irp);
+ }
+ break;
+
+ case IRP_MJ_SHUTDOWN:
+ Status = LstShutdown(DeviceObject, Irp);
+ break;
+
+ default:
+
+ KdPrint(("LstDispatchRequest: Major Function: %xh is not supported.\n",
+ IrpSp->MajorFunction));
+ LstCompleteIrp(Irp, Status, 0);
+ break;
+ }
+ }
+
+ __finally {
+ }
+
+ return Status;
+}
+
+//
+// create a device object and a dosdevice symbol link
+//
+
+PDEVICE_OBJECT
+LstCreateDevice(
+ IN PDRIVER_OBJECT DriverObject,
+ IN PWCHAR DeviceName,
+ IN PWCHAR SymlnkName,
+ IN BOOLEAN bProcFS
+ )
+{
+ NTSTATUS Status;
+
+ UNICODE_STRING NtDevName;
+ UNICODE_STRING Win32DevName;
+
+ PDEVICE_EXTENSION DeviceExtension;
+ PDEVICE_OBJECT DeviceObject;
+
+ /* create the device object with the specified name */
+
+ RtlInitUnicodeString(&NtDevName, DeviceName);
+
+ Status = IoCreateDevice(
+ DriverObject,
+ sizeof(DEVICE_EXTENSION),
+ &NtDevName,
+ FILE_DEVICE_UNKNOWN,
+ 0,
+ FALSE,
+ &DeviceObject );
+
+ if (!NT_SUCCESS(Status)) {
+
+ cfs_enter_debugger();
+ return NULL;
+ }
+
+ /* create the symlink to make the device visible to user */
+
+ RtlInitUnicodeString(&Win32DevName, SymlnkName);
+ Status = IoCreateSymbolicLink(&Win32DevName, &NtDevName);
+
+ if (!NT_SUCCESS(Status)) {
+
+ IoDeleteDevice(DeviceObject);
+ return NULL;
+ }
+
+ DeviceExtension = (PDEVICE_EXTENSION)DeviceObject->DeviceObjectExtension;
+ DeviceExtension->bProcFS = bProcFS;
+
+ DeviceObject->AlignmentRequirement = 0;
+ DeviceObject->SectorSize = 0;
+ DeviceObject->Flags |= DO_BUFFERED_IO;
+ DeviceObject->Flags &= ~DO_DEVICE_INITIALIZING;
+
+ return DeviceObject;
+}
+
+//
+// DriverEntry
+//
+
+NTSTATUS DriverEntry(
+ IN PDRIVER_OBJECT DriverObject,
+ IN PUNICODE_STRING RegistryPath
+ )
+{
+ KdPrint(("LNet selftest: Build Time: " __DATE__ " " __TIME__ "\n"));
+ KdPrint(("LNet selftest: DriverEntry ... \n"));
+
+ /* initialize libcfs module */
+ if (module_init_libcfs_module() != 0) {
+ KdPrint(("selftest: failed to initialize module: libcfs ...\n"));
+ goto errorout;
+ }
+
+ /* initialize portals module */
+ if (module_init_lnet() != 0) {
+ KdPrint(("selftest: failed to initialize module: lnet ...\n"));
+ module_exit_libcfs_module();
+ goto errorout;
+ }
+
+ /* initialize tdinal module */
+ if (module_ksocknal_module_init() != 0) {
+ KdPrint(("selftest: failed to initialize module: socklnd ...\n"));
+ module_fini_lnet();
+ module_exit_libcfs_module();
+ goto errorout;
+ }
+
+ /* initialize lnet selttest module */
+ if (module_lnet_selftest_init() != 0) {
+ KdPrint(("selftest: failed to initialize module: selftest ...\n"));
+ module_ksocknal_module_fini();
+ module_fini_lnet();
+ module_exit_libcfs_module();
+ goto errorout;
+ }
+
+ /* create lnet selftest device object */
+ SelfObject = LstCreateDevice(
+ DriverObject,
+ LNET_SELFTEST_DEVICE,
+ LNET_SELFTEST_SYMLNK,
+ FALSE );
+ if (!SelfObject) {
+ KdPrint(("selftest: failed to allocate DeviceObject ...\n"));
+ module_lnet_selftest_fini();
+ module_ksocknal_module_fini();
+ module_fini_lnet();
+ module_exit_libcfs_module();
+
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* create the libcfs proc fs emultor device object */
+ ProcObject = LstCreateDevice(
+ DriverObject,
+ LUSTRE_PROC_DEVICE,
+ LUSTRE_PROC_SYMLNK,
+ TRUE );
+ if (!ProcObject) {
+
+ KdPrint(("selftest: failed to allocate proc DeviceObject ...\n"));
+ /* remove Selftest DeviceObject */
+ IoDeleteDevice(SelfObject);
+ module_lnet_selftest_fini();
+ module_ksocknal_module_fini();
+ module_fini_lnet();
+ module_exit_libcfs_module();
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /* initialize the driver callback routines */
+
+ DriverObject->MajorFunction[IRP_MJ_CREATE] = LstDispatchRequest;
+ DriverObject->MajorFunction[IRP_MJ_CLOSE] = LstDispatchRequest;
+ DriverObject->MajorFunction[IRP_MJ_READ] = LstDispatchRequest;
+ DriverObject->MajorFunction[IRP_MJ_WRITE] = LstDispatchRequest;
+ DriverObject->MajorFunction[IRP_MJ_SHUTDOWN] = LstDispatchRequest;
+ DriverObject->MajorFunction[IRP_MJ_DEVICE_CONTROL] = LstDispatchRequest;
+
+ return STATUS_SUCCESS;
+
+errorout:
+
+ cfs_enter_debugger();
+
+ return STATUS_UNSUCCESSFUL;
+}
int
swi_scheduler_main (void *arg)
{
- int id = (long) arg;
+ int id = (int)(long_ptr_t) arg;
char name[16];
snprintf(name, sizeof(name), "swi_sd%03d", id);
spin_unlock(&swi_data.wi_lock);
if (nloops < SWI_RESCHED)
- wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
swi_data.wi_waitq,
- !swi_sched_cansleep(&swi_data.wi_runq));
+ !swi_sched_cansleep(&swi_data.wi_runq), rc);
else
our_cond_resched();
spin_unlock(&swi_data.wi_lock);
if (nloops < SWI_RESCHED)
- wait_event_interruptible_exclusive(
+ cfs_wait_event_interruptible_exclusive(
swi_data.wi_serial_waitq,
- !swi_sched_cansleep(&swi_data.wi_serial_runq));
+ !swi_sched_cansleep(&swi_data.wi_serial_runq), rc);
else
our_cond_resched();
}
for (i = 0; i < num_online_cpus(); i++) {
- rc = swi_start_thread(swi_scheduler_main, (void *) (long) i);
+ rc = swi_start_thread(swi_scheduler_main,
+ (void *) (long_ptr_t) i);
if (rc != 0) {
CERROR ("Can't spawn workitem scheduler: %d\n", rc);
swi_shutdown();
tx->tx_size = size;
tx->tx_lnetmsg = lntmsg;
- tx->tx_resid = tx->tx_nob =
- offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload) +
- payload_nob;
+ tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + payload_nob;
usocklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = lntmsg->msg_hdr;
tx->tx_iova[0].iov_base = (void *)&tx->tx_msg;
- tx->tx_iova[0].iov_len = offsetof(ksock_msg_t,
- ksm_u.lnetmsg.ksnm_payload);
+ tx->tx_iova[0].iov_len = sizeof(ksock_msg_t);
tx->tx_iov = tx->tx_iova;
tx->tx_niov = 1 +
if (the_lnet.ln_pid & LNET_PID_USERFLAG)
return SOCKLND_CONN_ANY;
- nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_payload) +
- lntmsg->msg_len;
+ nob = sizeof(ksock_msg_t) + lntmsg->msg_len;
if (nob >= usock_tuns.ut_min_bulk)
return SOCKLND_CONN_BULK_OUT;
#include <libcfs/libcfsutil.h>
#include <lnet/lnetctl.h>
-#include <sys/utsname.h>
static char rawbuf[8192];
static char *buf = rawbuf;
return (rc == 0 ? 0: 1);
}
+#elif defined(__WINNT__)
+
+#define DAEMON_CTL_NAME "/proc/sys/lnet/daemon_file"
+#define SUBSYS_DEBUG_CTL_NAME "/proc/sys/lnet/subsystem_debug"
+#define DEBUG_CTL_NAME "/proc/sys/lnet/debug"
+#define DUMP_KERNEL_CTL_NAME "/proc/sys/lnet/dump_kernel"
+
+static int
+dbg_open_ctlhandle(const char *str)
+{
+ int fd;
+ fd = cfs_proc_open((char *)str, (int)O_WRONLY);
+ if (fd < 0) {
+ fprintf(stderr, "open %s failed: %s\n", str,
+ strerror(errno));
+ return -1;
+ }
+ return fd;
+}
+
+static void
+dbg_close_ctlhandle(int fd)
+{
+ cfs_proc_close(fd);
+}
+
+static int
+dbg_write_cmd(int fd, char *str, int len)
+{
+ int rc = cfs_proc_write(fd, str, len);
+
+ return (rc == len ? 0 : 1);
+}
+
#else
#error - Unknown sysctl convention.
#endif
struct dbg_line *line = linev[i];
struct ptldebug_header *hdr = line->hdr;
- fprintf(out, "%08x:%08x:%u:%u.%06llu:%u:%u:%u:(%s:%u:%s()) %s",
+ fprintf(out, "%08x:%08x:%u:%u." LPU64 ":%u:%u:%u:(%s:%u:%s()) %s",
hdr->ph_subsys, hdr->ph_mask, hdr->ph_cpu_id,
hdr->ph_sec, (unsigned long long)hdr->ph_usec,
hdr->ph_stack, hdr->ph_pid, hdr->ph_extern_pid,
if (argc > 1 && raw)
strcpy(filename, argv[1]);
else
- sprintf(filename, "/tmp/lustre-log."CFS_TIME_T".%u",
- time(NULL),getpid());
+ sprintf(filename, "%s"CFS_TIME_T".%u",
+ DEBUG_FILE_PATH_DEFAULT, time(NULL), getpid());
if (stat(filename, &st) == 0 && S_ISREG(st.st_mode))
unlink(filename);
int main(int argc, char **argv)
{
- if (dbg_initialize(argc, argv) < 0)
- exit(2);
+ int rc = 0;
+
+ rc = libcfs_arch_init();
+ if (rc < 0)
+ return rc;
+
+ rc = dbg_initialize(argc, argv);
+ if (rc < 0)
+ goto errorout;
register_ioc_dev(LNET_DEV_ID, LNET_DEV_PATH,
LNET_DEV_MAJOR, LNET_DEV_MINOR);
Parser_init("debugctl > ", list);
- if (argc > 1)
- return Parser_execarg(argc - 1, &argv[1], list);
+ if (argc > 1) {
+ rc = Parser_execarg(argc - 1, &argv[1], list);
+ unregister_ioc_dev(LNET_DEV_ID);
+ goto errorout;
+ }
Parser_commands();
unregister_ioc_dev(LNET_DEV_ID);
- return 0;
+
+errorout:
+ libcfs_arch_cleanup();
+ return rc;
}
#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <getopt.h>
-#include <errno.h>
-#include <pwd.h>
+#include <libcfs/libcfsutil.h>
#include <lnet/lnetctl.h>
#include <lnet/lnetst.h>
-#include <libcfs/libcfsutil.h>
-static command_t lst_cmdlist[];
+
+lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
static lst_sid_t session_id;
static int session_key;
static lstcon_trans_stat_t trans_stat;
{
lstcon_rpc_ent_t *ent;
- list_for_each_entry(ent, head, rpe_link) {
+ cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
ent->rpe_sid = LST_INVALID_SID;
ent->rpe_peer.nid = LNET_NID_ANY;
ent->rpe_peer.pid = LNET_PID_ANY;
{
lstcon_rpc_ent_t *ent;
- list_for_each_entry(ent, head, rpe_link) {
+ cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
if (ent->rpe_rpc_errno == 0 && ent->rpe_fwk_errno == 0)
continue;
int
lst_new_session_ioctl (char *name, int timeout, int force, lst_sid_t *sid)
{
- lstio_session_new_args_t args = {
- .lstio_ses_key = session_key,
- .lstio_ses_timeout = timeout,
- .lstio_ses_force = force,
- .lstio_ses_idp = sid,
- .lstio_ses_namep = name,
- .lstio_ses_nmlen = strlen(name),
- };
+ lstio_session_new_args_t args = {0};
+
+ args.lstio_ses_key = session_key;
+ args.lstio_ses_timeout = timeout;
+ args.lstio_ses_force = force;
+ args.lstio_ses_idp = sid;
+ args.lstio_ses_nmlen = strlen(name);
+ args.lstio_ses_namep = name;
return lst_ioctl (LSTIO_SESSION_NEW, &args, sizeof(args));
}
lst_session_info_ioctl(char *name, int len, int *key,
lst_sid_t *sid, lstcon_ndlist_ent_t *ndinfo)
{
- lstio_session_info_args_t args = {
- .lstio_ses_keyp = key,
- .lstio_ses_idp = sid,
- .lstio_ses_ndinfo = ndinfo,
- .lstio_ses_nmlen = len,
- .lstio_ses_namep = name,
- };
+ lstio_session_info_args_t args = {0};
+
+ args.lstio_ses_idp = sid;
+ args.lstio_ses_keyp = key;
+ args.lstio_ses_ndinfo = ndinfo;
+ args.lstio_ses_nmlen = len;
+ args.lstio_ses_namep = name;
return lst_ioctl(LSTIO_SESSION_INFO, &args, sizeof(args));
}
int
lst_end_session_ioctl(void)
{
- lstio_session_end_args_t args = {
- .lstio_ses_key = session_key,
- };
+ lstio_session_end_args_t args = {0};
+ args.lstio_ses_key = session_key;
return lst_ioctl (LSTIO_SESSION_END, &args, sizeof(args));
}
lst_ping_ioctl(char *str, int type, int timeout,
int count, lnet_process_id_t *ids, struct list_head *head)
{
- lstio_debug_args_t args = {
- .lstio_dbg_key = session_key,
- .lstio_dbg_type = type,
- .lstio_dbg_flags = 0,
- .lstio_dbg_timeout = timeout,
- .lstio_dbg_nmlen = (str == NULL) ? 0: strlen(str),
- .lstio_dbg_namep = str,
- .lstio_dbg_count = count,
- .lstio_dbg_idsp = ids,
- .lstio_dbg_resultp = head,
- };
+ lstio_debug_args_t args = {0};
+
+ args.lstio_dbg_key = session_key;
+ args.lstio_dbg_type = type;
+ args.lstio_dbg_flags = 0;
+ args.lstio_dbg_timeout = timeout;
+ args.lstio_dbg_nmlen = (str == NULL) ? 0: strlen(str);
+ args.lstio_dbg_namep = str;
+ args.lstio_dbg_count = count;
+ args.lstio_dbg_idsp = ids;
+ args.lstio_dbg_resultp = head;
return lst_ioctl (LSTIO_DEBUG, &args, sizeof(args));
}
}
/* ignore RPC errors and framwork errors */
- list_for_each_entry(ent, &head, rpe_link) {
+ cfs_list_for_each_entry_typed(ent, &head, lstcon_rpc_ent_t, rpe_link) {
fprintf(stdout, "\t%s: %s [session: %s id: %s]\n",
libcfs_id2str(ent->rpe_peer),
lst_node_state2str(ent->rpe_state),
lst_add_nodes_ioctl (char *name, int count, lnet_process_id_t *ids,
struct list_head *resultp)
{
- lstio_group_nodes_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_nmlen = strlen(name),
- .lstio_grp_namep = name,
- .lstio_grp_count = count,
- .lstio_grp_idsp = ids,
- .lstio_grp_resultp = resultp,
- };
+ lstio_group_nodes_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_nmlen = strlen(name);
+ args.lstio_grp_namep = name;
+ args.lstio_grp_count = count;
+ args.lstio_grp_idsp = ids;
+ args.lstio_grp_resultp = resultp;
return lst_ioctl(LSTIO_NODES_ADD, &args, sizeof(args));
}
int
lst_add_group_ioctl (char *name)
{
- lstio_group_add_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_nmlen = strlen(name),
- .lstio_grp_namep = name,
- };
+ lstio_group_add_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_nmlen = strlen(name);
+ args.lstio_grp_namep = name;
return lst_ioctl(LSTIO_GROUP_ADD, &args, sizeof(args));
}
int
lst_del_group_ioctl (char *name)
{
- lstio_group_del_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_nmlen = strlen(name),
- .lstio_grp_namep = name,
- };
+ lstio_group_del_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_nmlen = strlen(name);
+ args.lstio_grp_namep = name;
return lst_ioctl(LSTIO_GROUP_DEL, &args, sizeof(args));
}
lst_update_group_ioctl(int opc, char *name, int clean, int count,
lnet_process_id_t *ids, struct list_head *resultp)
{
- lstio_group_update_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_opc = opc,
- .lstio_grp_args = clean,
- .lstio_grp_nmlen = strlen(name),
- .lstio_grp_namep = name,
- .lstio_grp_count = count,
- .lstio_grp_idsp = ids,
- .lstio_grp_resultp = resultp,
- };
+ lstio_group_update_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_opc = opc;
+ args.lstio_grp_args = clean;
+ args.lstio_grp_nmlen = strlen(name);
+ args.lstio_grp_namep = name;
+ args.lstio_grp_count = count;
+ args.lstio_grp_idsp = ids;
+ args.lstio_grp_resultp = resultp;
return lst_ioctl(LSTIO_GROUP_UPDATE, &args, sizeof(args));
}
int
lst_list_group_ioctl(int len, char *name, int idx)
{
- lstio_group_list_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_idx = idx,
- .lstio_grp_nmlen = len,
- .lstio_grp_namep = name,
- };
+ lstio_group_list_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_idx = idx;
+ args.lstio_grp_nmlen = len;
+ args.lstio_grp_namep = name;
return lst_ioctl(LSTIO_GROUP_LIST, &args, sizeof(args));
}
lst_info_group_ioctl(char *name, lstcon_ndlist_ent_t *gent,
int *idx, int *count, lstcon_node_ent_t *dents)
{
- lstio_group_info_args_t args = {
- .lstio_grp_key = session_key,
- .lstio_grp_nmlen = strlen(name),
- .lstio_grp_namep = name,
- .lstio_grp_entp = gent,
- .lstio_grp_idxp = idx,
- .lstio_grp_ndentp = count,
- .lstio_grp_dentsp = dents,
- };
+ lstio_group_info_args_t args = {0};
+
+ args.lstio_grp_key = session_key;
+ args.lstio_grp_nmlen = strlen(name);
+ args.lstio_grp_namep = name;
+ args.lstio_grp_entp = gent;
+ args.lstio_grp_idxp = idx;
+ args.lstio_grp_ndentp = count;
+ args.lstio_grp_dentsp = dents;
return lst_ioctl(LSTIO_GROUP_INFO, &args, sizeof(args));
}
lst_stat_ioctl (char *name, int count, lnet_process_id_t *idsp,
int timeout, struct list_head *resultp)
{
- lstio_stat_args_t args = {
- .lstio_sta_key = session_key,
- .lstio_sta_timeout = timeout,
- .lstio_sta_nmlen = strlen(name),
- .lstio_sta_namep = name,
- .lstio_sta_count = count,
- .lstio_sta_idsp = idsp,
- .lstio_sta_resultp = resultp,
- };
+ lstio_stat_args_t args = {0};
+
+ args.lstio_sta_key = session_key;
+ args.lstio_sta_timeout = timeout;
+ args.lstio_sta_nmlen = strlen(name);
+ args.lstio_sta_namep = name;
+ args.lstio_sta_count = count;
+ args.lstio_sta_idsp = idsp;
+ args.lstio_sta_resultp = resultp;
return lst_ioctl (LSTIO_STAT_QUERY, &args, sizeof(args));
}
typedef struct {
/* TODO */
+ int foo;
} lst_srpc_stat_result;
#define LST_LNET_AVG 0
last = now;
- list_for_each_entry(srp, &head, srp_link) {
+ cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
+ srp_link) {
rc = lst_stat_ioctl(srp->srp_name,
srp->srp_count, srp->srp_ids,
timeout, &srp->srp_result[idx]);
list_add_tail(&srp->srp_link, &head);
}
- list_for_each_entry(srp, &head, srp_link) {
+ cfs_list_for_each_entry_typed(srp, &head, lst_stat_req_param_t,
+ srp_link) {
rc = lst_stat_ioctl(srp->srp_name, srp->srp_count,
srp->srp_ids, 5, &srp->srp_result[0]);
ecount = 0;
- list_for_each_entry(ent, &srp->srp_result[0], rpe_link) {
+ cfs_list_for_each_entry_typed(ent, &srp->srp_result[0],
+ lstcon_rpc_ent_t, rpe_link) {
if (ent->rpe_rpc_errno != 0) {
ecount ++;
fprintf(stderr, "RPC failure, can't show error on %s\n",
int
lst_add_batch_ioctl (char *name)
{
- lstio_batch_add_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_nmlen = strlen(name),
- .lstio_bat_namep = name,
- };
+ lstio_batch_add_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_nmlen = strlen(name);
+ args.lstio_bat_namep = name;
return lst_ioctl (LSTIO_BATCH_ADD, &args, sizeof(args));
}
int
lst_start_batch_ioctl (char *name, int timeout, struct list_head *resultp)
{
- lstio_batch_run_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_timeout = timeout,
- .lstio_bat_nmlen = strlen(name),
- .lstio_bat_namep = name,
- .lstio_bat_resultp = resultp,
- };
+ lstio_batch_run_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_timeout = timeout;
+ args.lstio_bat_nmlen = strlen(name);
+ args.lstio_bat_namep = name;
+ args.lstio_bat_resultp = resultp;
return lst_ioctl(LSTIO_BATCH_START, &args, sizeof(args));
}
int
lst_stop_batch_ioctl(char *name, int force, struct list_head *resultp)
{
- lstio_batch_stop_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_force = force,
- .lstio_bat_nmlen = strlen(name),
- .lstio_bat_namep = name,
- .lstio_bat_resultp = resultp,
- };
+ lstio_batch_stop_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_force = force;
+ args.lstio_bat_nmlen = strlen(name);
+ args.lstio_bat_namep = name;
+ args.lstio_bat_resultp = resultp;
return lst_ioctl(LSTIO_BATCH_STOP, &args, sizeof(args));
}
int
lst_list_batch_ioctl(int len, char *name, int index)
{
- lstio_batch_list_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_idx = index,
- .lstio_bat_nmlen = len,
- .lstio_bat_namep = name,
- };
+ lstio_batch_list_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_idx = index;
+ args.lstio_bat_nmlen = len;
+ args.lstio_bat_namep = name;
return lst_ioctl(LSTIO_BATCH_LIST, &args, sizeof(args));
}
lstcon_test_batch_ent_t *entp, int *idxp,
int *ndentp, lstcon_node_ent_t *dentsp)
{
- lstio_batch_info_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_nmlen = strlen(batch),
- .lstio_bat_namep = batch,
- .lstio_bat_server = server,
- .lstio_bat_testidx = test,
- .lstio_bat_entp = entp,
- .lstio_bat_idxp = idxp,
- .lstio_bat_ndentp = ndentp,
- .lstio_bat_dentsp = dentsp,
- };
+ lstio_batch_info_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_nmlen = strlen(batch);
+ args.lstio_bat_namep = batch;
+ args.lstio_bat_server = server;
+ args.lstio_bat_testidx = test;
+ args.lstio_bat_entp = entp;
+ args.lstio_bat_idxp = idxp;
+ args.lstio_bat_ndentp = ndentp;
+ args.lstio_bat_dentsp = dentsp;
return lst_ioctl(LSTIO_BATCH_INFO, &args, sizeof(args));
}
lst_query_batch_ioctl(char *batch, int test, int server,
int timeout, struct list_head *head)
{
- lstio_batch_query_args_t args = {
- .lstio_bat_key = session_key,
- .lstio_bat_testidx = test,
- .lstio_bat_client = !(server),
- .lstio_bat_timeout = timeout,
- .lstio_bat_nmlen = strlen(batch),
- .lstio_bat_namep = batch,
- .lstio_bat_resultp = head,
- };
+ lstio_batch_query_args_t args = {0};
+
+ args.lstio_bat_key = session_key;
+ args.lstio_bat_testidx = test;
+ args.lstio_bat_client = !(server);
+ args.lstio_bat_timeout = timeout;
+ args.lstio_bat_nmlen = strlen(batch);
+ args.lstio_bat_namep = batch;
+ args.lstio_bat_resultp = head;
return lst_ioctl(LSTIO_BATCH_QUERY, &args, sizeof(args));
}
{
lstcon_rpc_ent_t *ent;
- list_for_each_entry(ent, head, rpe_link) {
+ cfs_list_for_each_entry_typed(ent, head, lstcon_rpc_ent_t, rpe_link) {
if (ent->rpe_priv[0] == 0 && active)
continue;
int dist, int span, char *sgrp, char *dgrp,
void *param, int plen, int *retp, struct list_head *resultp)
{
- lstio_test_args_t args = {
- .lstio_tes_key = session_key,
- .lstio_tes_bat_nmlen = strlen(batch),
- .lstio_tes_bat_name = batch,
- .lstio_tes_type = type,
- .lstio_tes_loop = loop,
- .lstio_tes_concur = concur,
- .lstio_tes_dist = dist,
- .lstio_tes_span = span,
- .lstio_tes_sgrp_nmlen = strlen(sgrp),
- .lstio_tes_sgrp_name = sgrp,
- .lstio_tes_dgrp_nmlen = strlen(dgrp),
- .lstio_tes_dgrp_name = dgrp,
- .lstio_tes_param_len = plen,
- .lstio_tes_param = param,
- .lstio_tes_retp = retp,
- .lstio_tes_resultp = resultp,
- };
+ lstio_test_args_t args = {0};
+
+ args.lstio_tes_key = session_key;
+ args.lstio_tes_bat_nmlen = strlen(batch);
+ args.lstio_tes_bat_name = batch;
+ args.lstio_tes_type = type;
+ args.lstio_tes_oneside = 0;
+ args.lstio_tes_loop = loop;
+ args.lstio_tes_concur = concur;
+ args.lstio_tes_dist = dist;
+ args.lstio_tes_span = span;
+ args.lstio_tes_sgrp_nmlen = strlen(sgrp);
+ args.lstio_tes_sgrp_name = sgrp;
+ args.lstio_tes_dgrp_nmlen = strlen(dgrp);
+ args.lstio_tes_dgrp_name = dgrp;
+ args.lstio_tes_param_len = plen;
+ args.lstio_tes_param = param;
+ args.lstio_tes_retp = retp;
+ args.lstio_tes_resultp = resultp;
return lst_ioctl(LSTIO_TEST_ADD, &args, sizeof(args));
}
int
main(int argc, char **argv)
{
+ int rc = 0;
+
setlinebuf(stdout);
- if (lst_initialize() < 0)
- exit(0);
+ rc = libcfs_arch_init();
+ if (rc < 0)
+ return rc;
- if (ptl_initialize(argc, argv) < 0)
- exit(0);
+ rc = lst_initialize();
+ if (rc < 0)
+ goto errorout;
+ rc = ptl_initialize(argc, argv);
+ if (rc < 0)
+ goto errorout;
+
Parser_init("lst > ", lst_cmdlist);
- if (argc != 1)
- return Parser_execarg(argc - 1, argv + 1, lst_cmdlist);
+ if (argc != 1) {
+ rc = Parser_execarg(argc - 1, argv + 1, lst_cmdlist);
+ goto errorout;
+ }
Parser_commands();
- return 0;
+errorout:
+ libcfs_arch_cleanup();
+ return rc;
}
*
*/
-#include <stdio.h>
-#include <sys/types.h>
-#ifdef HAVE_NETDB_H
-#include <netdb.h>
-#endif
-#include <sys/socket.h>
-#ifdef HAVE_NETINET_TCP_H
-#include <netinet/tcp.h>
-#endif
-#include <stdlib.h>
-#include <string.h>
-#include <fcntl.h>
-#ifdef HAVE_SYS_IOCTL_H
-#include <sys/ioctl.h>
-#endif
-#ifndef _IOWR
-#include "ioctl.h"
-#endif
-#include <errno.h>
-#include <unistd.h>
-#include <time.h>
-#include <stdarg.h>
-#ifdef HAVE_ENDIAN_H
-#include <endian.h>
-#endif
-
#include <libcfs/libcfsutil.h>
#include <lnet/api-support.h>
#include <lnet/lnetctl.h>
return (1000.0);
}
+#define LWT_MAX_CPUS (32)
+
int
jt_ptl_lwt(int argc, char **argv)
{
- const int lwt_max_cpus = 32;
int ncpus;
int totalspace;
int nevents_per_cpu;
lwt_event_t *events;
- lwt_event_t *cpu_event[lwt_max_cpus + 1];
- lwt_event_t *next_event[lwt_max_cpus];
- lwt_event_t *first_event[lwt_max_cpus];
+ lwt_event_t *cpu_event[LWT_MAX_CPUS + 1];
+ lwt_event_t *next_event[LWT_MAX_CPUS];
+ lwt_event_t *first_event[LWT_MAX_CPUS];
int cpu;
lwt_event_t *e;
int rc;
if (lwt_snapshot(NULL, &ncpus, &totalspace, NULL, 0) != 0)
return (-1);
- if (ncpus > lwt_max_cpus) {
+ if (ncpus > LWT_MAX_CPUS) {
fprintf(stderr, "Too many cpus: %d (%d)\n",
- ncpus, lwt_max_cpus);
+ ncpus, LWT_MAX_CPUS);
return (-1);
}
int main(int argc, char **argv)
{
- if (ptl_initialize(argc, argv) < 0)
- exit(1);
+ int rc = 0;
+
+ rc = libcfs_arch_init();
+ if (rc < 0)
+ return rc;
+
+ rc = ptl_initialize(argc, argv);
+ if (rc < 0)
+ goto errorout;
Parser_init("ptlctl > ", list);
- if (argc > 1)
- return Parser_execarg(argc - 1, &argv[1], list);
+ if (argc > 1) {
+ rc = Parser_execarg(argc - 1, &argv[1], list);
+ goto errorout;
+ }
Parser_commands();
- return 0;
+errorout:
+ libcfs_arch_cleanup();
+ return rc;
}