#include <linux/sysctl.h>
#include <net/sock.h>
+#include <linux/in.h>
#define DEBUG_SUBSYSTEM S_NAL
#include <linux/kp30.h>
+#include <linux/portals_compat25.h>
#include <portals/p30.h>
#include <portals/lib-p30.h>
#include <portals/nal.h>
#define RANAL_NTX 64 /* # tx descs */
#define RANAL_NTX_NBLK 256 /* # reserved tx descs */
-#define RANAL_FMA_CQ_SIZE 8192 /* # entries in receive CQ
+#define RANAL_FMA_CQ_SIZE 8192 /* # entries in receive CQ
* (overflow is a performance hit) */
#define RANAL_RESCHED 100 /* # scheduler loops before reschedule */
/* default vals for runtime tunables */
#define RANAL_TIMEOUT 30 /* comms timeout (seconds) */
#define RANAL_LISTENER_TIMEOUT 5 /* listener timeout (seconds) */
+#define RANAL_BACKLOG 127 /* listener's backlog */
+#define RANAL_PORT 988 /* listener's port */
#define RANAL_MAX_IMMEDIATE (2<<10) /* immediate payload breakpoint */
-typedef struct
+typedef struct
{
int kra_timeout; /* comms timeout (seconds) */
int kra_listener_timeout; /* max time the listener can block */
- int kra_backlog; /* listener's backlog */
- int kra_port; /* listener's TCP/IP port */
+ int kra_backlog; /* listener's backlog */
+ int kra_port; /* listener's TCP/IP port */
int kra_max_immediate; /* immediate payload breakpoint */
struct ctl_table_header *kra_sysctl; /* sysctl interface */
typedef struct
{
RAP_PVOID rad_handle; /* device handle */
- RAP_PROTECTION_HANDLE rad_ptag; /* protection tag */
- RAP_CQ_HANDLE rad_fma_cq; /* FMA (small message) completion queue */
- RAP_CQ_HANDLE rad_rdma_cq; /* rdma completion queue */
+ RAP_PVOID rad_fma_cqh; /* FMA completion queue handle */
+ RAP_PVOID rad_rdma_cqh; /* rdma completion queue handle */
int rad_id; /* device id */
int rad_idx; /* index in kra_devices */
int rad_ready; /* set by device callback */
spinlock_t rad_lock; /* serialise */
void *rad_scheduler; /* scheduling thread */
} kra_device_t;
-
-typedef struct
+
+typedef struct
{
int kra_init; /* initialisation state */
int kra_shutdown; /* shut down? */
atomic_t kra_nthreads; /* # live threads */
struct semaphore kra_nid_mutex; /* serialise NID/listener ops */
- struct semaphore kra_listener_signal; /* block for listener startup/shutdown */
- struct socket *kra_listener_sock; /* listener's socket */
- int kra_listener_shutdown; /* ask listener to close */
+ struct semaphore kra_listener_signal; /* block for listener startup/shutdown */
+ struct socket *kra_listener_sock; /* listener's socket */
+ int kra_listener_shutdown; /* ask listener to close */
kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq etc */
int kra_ndevs; /* # devices */
long kra_new_min_timeout; /* minimum timeout on any new conn */
wait_queue_head_t kra_reaper_waitq; /* reaper sleeps here */
spinlock_t kra_reaper_lock; /* serialise */
-
+
struct list_head kra_connd_peers; /* peers waiting for a connection */
+ struct list_head kra_connd_acceptq; /* accepted sockets to handshake */
wait_queue_head_t kra_connd_waitq; /* connection daemons sleep here */
spinlock_t kra_connd_lock; /* serialise */
#define RANAL_INIT_LIB 2
#define RANAL_INIT_ALL 3
+typedef struct kra_acceptsock /* accepted socket queued for connd */
+{
+ struct list_head ras_list; /* queue for attention */
+ struct socket *ras_sock; /* the accepted socket */
+} kra_acceptsock_t;
+
/************************************************************************
* Wire message structs. These are sent in sender's byte order
* (i.e. receiver checks magic and flips if required).
*/
-typedef struct kra_connreq /* connection request/response */
-{ /* (sent via socket) */
- __u32 racr_magic; /* I'm an ranal connreq */
- __u16 racr_version; /* this is my version number */
+typedef struct kra_connreq /* connection request/response */
+{ /* (sent via socket) */
+ __u32 racr_magic; /* I'm an ranal connreq */
+ __u16 racr_version; /* this is my version number */
__u16 racr_devid; /* sender's device ID */
- __u64 racr_nid; /* sender's NID */
+ __u64 racr_srcnid; /* sender's NID */
+ __u64 racr_dstnid; /* who sender expects to listen */
__u64 racr_peerstamp; /* sender's instance stamp */
__u64 racr_connstamp; /* sender's connection stamp */
__u32 racr_timeout; /* sender's timeout */
- RAP_RI_PARAMETERS racr_riparams; /* sender's endpoint info */
+ RAP_RI_PARAMETERS racr_riparams; /* sender's endpoint info */
} kra_connreq_t;
typedef struct
typedef struct
{
- ptl_hdr_t raprm_hdr; /* portals header */
- __u64 raprm_cookie; /* opaque completion cookie */
+ ptl_hdr_t raprm_hdr; /* portals header */
+ __u64 raprm_cookie; /* opaque completion cookie */
} kra_putreq_msg_t;
typedef struct
{
- __u64 rapam_src_cookie; /* reflected completion cookie */
- __u64 rapam_dst_cookie; /* opaque completion cookie */
- kra_rdma_desc_t rapam_desc; /* sender's sink buffer */
+ __u64 rapam_src_cookie; /* reflected completion cookie */
+ __u64 rapam_dst_cookie; /* opaque completion cookie */
+ kra_rdma_desc_t rapam_desc; /* sender's sink buffer */
} kra_putack_msg_t;
typedef struct
typedef struct /* NB must fit in FMA "Prefix" */
{
- __u32 ram_magic; /* I'm an ranal message */
- __u16 ram_version; /* this is my version number */
- __u16 ram_type; /* msg type */
+ __u32 ram_magic; /* I'm an ranal message */
+ __u16 ram_version; /* this is my version number */
+ __u16 ram_type; /* msg type */
__u64 ram_srcnid; /* sender's NID */
__u64 ram_connstamp; /* sender's connection stamp */
union {
kra_immediate_msg_t immediate;
- kra_putreq_msg_t putreq;
- kra_putack_msg_t putack;
- kra_get_msg_t get;
+ kra_putreq_msg_t putreq;
+ kra_putack_msg_t putack;
+ kra_get_msg_t get;
kra_completion_msg_t completion;
} ram_u;
__u32 ram_seq; /* incrementing sequence number */
#define RANAL_MSG_NONE 0x00 /* illegal message */
#define RANAL_MSG_NOOP 0x01 /* empty ram_u (keepalive) */
#define RANAL_MSG_IMMEDIATE 0x02 /* ram_u.immediate */
-#define RANAL_MSG_PUT_REQ 0x03 /* ram_u.putreq (src->sink) */
-#define RANAL_MSG_PUT_NAK 0x04 /* ram_u.completion (no PUT match: sink->src) */
-#define RANAL_MSG_PUT_ACK 0x05 /* ram_u.putack (PUT matched: sink->src) */
-#define RANAL_MSG_PUT_DONE 0x86 /* ram_u.completion (src->sink) */
-#define RANAL_MSG_GET_REQ 0x07 /* ram_u.get (sink->src) */
+#define RANAL_MSG_PUT_REQ 0x03 /* ram_u.putreq (src->sink) */
+#define RANAL_MSG_PUT_NAK 0x04 /* ram_u.completion (no PUT match: sink->src) */
+#define RANAL_MSG_PUT_ACK 0x05 /* ram_u.putack (PUT matched: sink->src) */
+#define RANAL_MSG_PUT_DONE 0x86 /* ram_u.completion (src->sink) */
+#define RANAL_MSG_GET_REQ 0x07 /* ram_u.get (sink->src) */
#define RANAL_MSG_GET_NAK 0x08 /* ram_u.completion (no GET match: src->sink) */
-#define RANAL_MSG_GET_DONE 0x89 /* ram_u.completion (src->sink) */
+#define RANAL_MSG_GET_DONE 0x89 /* ram_u.completion (src->sink) */
#define RANAL_MSG_CLOSE 0x8a /* empty ram_u */
/***********************************************************************/
struct list_head tx_list; /* queue on idle_txs/rac_sendq/rac_waitq */
struct kra_conn *tx_conn; /* owning conn */
lib_msg_t *tx_libmsg[2]; /* lib msgs to finalize on completion */
- unsigned long tx_qtime; /* when tx started to wait for something */
+ unsigned long tx_qtime; /* when tx started to wait for something (jiffies) */
int tx_isnblk; /* I'm reserved for non-blocking sends */
int tx_nob; /* # bytes of payload */
int tx_buftype; /* payload buffer type */
#define RANAL_TX_GETT_DONE 0x52 /* GET target about to send GET_DONE */
typedef struct kra_conn
-{
+{
struct kra_peer *rac_peer; /* owning peer */
struct list_head rac_list; /* stash on peer's conn list */
struct list_head rac_hashlist; /* stash in connection hash table */
__u64 rac_peerstamp; /* peer's unique stamp */
__u64 rac_peer_connstamp; /* peer's unique connection stamp */
__u64 rac_my_connstamp; /* my unique connection stamp */
- unsigned long rac_last_tx; /* when I last sent an FMA message */
- unsigned long rac_last_rx; /* when I last received an FMA messages */
- long rac_keepalive; /* keepalive interval */
- long rac_timeout; /* infer peer death on (last_rx + timout > now) */
+ unsigned long rac_last_tx; /* when I last sent an FMA message (jiffies) */
+ unsigned long rac_last_rx; /* when I last received an FMA messages (jiffies) */
+ long rac_keepalive; /* keepalive interval (seconds) */
+ long rac_timeout; /* infer peer death if no rx for this many seconds */
__u32 rac_cqid; /* my completion callback id (non-unique) */
__u32 rac_tx_seq; /* tx msg sequence number */
__u32 rac_rx_seq; /* rx msg sequence number */
unsigned int rac_scheduled; /* being attented to */
spinlock_t rac_lock; /* serialise */
kra_device_t *rac_device; /* which device */
- RAP_PVOID rac_rihandle; /* RA endpoint */
+ RAP_PVOID rac_rihandle; /* RA endpoint */
kra_msg_t *rac_rxmsg; /* incoming message (FMA prefix) */
kra_msg_t rac_msg; /* keepalive/CLOSE message buffer */
} kra_conn_t;
atomic_t rap_refcount; /* # users */
int rap_persistence; /* "known" peer refs */
int rap_connecting; /* connection forming */
- unsigned long rap_reconnect_time; /* CURRENT_TIME when reconnect OK */
+ unsigned long rap_reconnect_time; /* CURRENT_SECONDS when reconnect OK */
unsigned long rap_reconnect_interval; /* exponential backoff */
} kra_peer_t;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
# define sk_allocation allocation
-# define sk_data_ready data_ready
+# define sk_data_ready data_ready
# define sk_write_space write_space
# define sk_user_data user_data
# define sk_prot prot
kranal_peer_addref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->"LPX64"\n", peer, peer->rap_nid);
- LASSERT(atomic_read(&peer->rap_refcount) > 0);
- atomic_inc(&peer->rap_refcount);
+ LASSERT(atomic_read(&peer->rap_refcount) > 0);
+ atomic_inc(&peer->rap_refcount);
}
static inline void
kranal_peer_decref(kra_peer_t *peer)
{
CDEBUG(D_NET, "%p->"LPX64"\n", peer, peer->rap_nid);
- LASSERT(atomic_read(&peer->rap_refcount) > 0);
- if (atomic_dec_and_test(&peer->rap_refcount))
- kranal_destroy_peer(peer);
+ LASSERT(atomic_read(&peer->rap_refcount) > 0);
+ if (atomic_dec_and_test(&peer->rap_refcount))
+ kranal_destroy_peer(peer);
}
static inline struct list_head *
-kranal_nid2peerlist (ptl_nid_t nid)
+kranal_nid2peerlist (ptl_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % kranal_data.kra_peer_hash_size;
-
+
return (&kranal_data.kra_peers[hash]);
}
kranal_conn_addref(kra_conn_t *conn)
{
CDEBUG(D_NET, "%p->"LPX64"\n", conn, conn->rac_peer->rap_nid);
- LASSERT(atomic_read(&conn->rac_refcount) > 0);
- atomic_inc(&conn->rac_refcount);
+ LASSERT(atomic_read(&conn->rac_refcount) > 0);
+ atomic_inc(&conn->rac_refcount);
}
static inline void
kranal_conn_decref(kra_conn_t *conn)
{
CDEBUG(D_NET, "%p->"LPX64"\n", conn, conn->rac_peer->rap_nid);
- LASSERT(atomic_read(&conn->rac_refcount) > 0);
- if (atomic_dec_and_test(&conn->rac_refcount))
+ LASSERT(atomic_read(&conn->rac_refcount) > 0);
+ if (atomic_dec_and_test(&conn->rac_refcount))
kranal_destroy_conn(conn);
}
static inline struct list_head *
-kranal_cqid2connlist (__u32 cqid)
+kranal_cqid2connlist (__u32 cqid)
{
unsigned int hash = cqid % kranal_data.kra_conn_hash_size;
-
+
return (&kranal_data.kra_conns [hash]);
}
static inline kra_conn_t *
-kranal_cqid2conn_locked (__u32 cqid)
+kranal_cqid2conn_locked (__u32 cqid)
{
struct list_head *conns = kranal_cqid2connlist(cqid);
struct list_head *tmp;
kra_conn_t *conn;
-
+
list_for_each(tmp, conns) {
conn = list_entry(tmp, kra_conn_t, rac_hashlist);
-
+
if (conn->rac_cqid == cqid)
return conn;
}
-
+
return NULL;
}
tx->tx_buftype == RANAL_BUF_PHYS_MAPPED);
}
-#if CONFIG_X86
static inline __u64
kranal_page2phys (struct page *p)
{
- __u64 page_number = p - mem_map;
-
- return (page_number << PAGE_SHIFT);
+ return page_to_phys(p);
}
-#else
-# error "no page->phys"
-#endif
-extern int kranal_listener_procint(ctl_table *table,
- int write, struct file *filp,
- void *buffer, size_t *lenp);
-extern void kranal_update_reaper_timeout(long timeout);
+extern void kranal_free_acceptsock (kra_acceptsock_t *ras);
+extern int kranal_listener_procint (ctl_table *table,
+ int write, struct file *filp,
+ void *buffer, size_t *lenp);
+extern void kranal_update_reaper_timeout (long timeout);
extern void kranal_tx_done (kra_tx_t *tx, int completion);
extern void kranal_unlink_peer_locked (kra_peer_t *peer);
-extern void kranal_schedule_conn(kra_conn_t *conn);
+extern void kranal_schedule_conn (kra_conn_t *conn);
extern kra_peer_t *kranal_create_peer (ptl_nid_t nid);
extern kra_peer_t *kranal_find_peer_locked (ptl_nid_t nid);
extern void kranal_post_fma (kra_conn_t *conn, kra_tx_t *tx);
extern int kranal_del_peer (ptl_nid_t nid, int single_share);
-extern void kranal_device_callback(RAP_INT32 devid);
+extern void kranal_device_callback (RAP_INT32 devid, RAP_PVOID arg);
extern int kranal_thread_start (int(*fn)(void *arg), void *arg);
extern int kranal_connd (void *arg);
extern int kranal_reaper (void *arg);
extern void kranal_close_conn_locked (kra_conn_t *conn, int error);
extern void kranal_terminate_conn_locked (kra_conn_t *conn);
extern void kranal_connect (kra_peer_t *peer);
+extern int kranal_conn_handshake (struct socket *sock, kra_peer_t *peer);
+extern void kranal_pause(int ticks);