#ifndef EXPORT_SYMTAB
# define EXPORT_SYMTAB
#endif
-
+#ifndef AUTOCONF_INCLUDED
#include <linux/config.h>
+#endif
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <net/sock.h>
#include <linux/in.h>
-#define DEBUG_SUBSYSTEM S_NAL
+#define DEBUG_SUBSYSTEM S_LND
-#include <libcfs/kp30.h>
-#include <portals/p30.h>
-#include <portals/lib-p30.h>
-#include <portals/nal.h>
+#include <libcfs/libcfs.h>
+#include <lnet/lnet.h>
+#include <lnet/lib-lnet.h>
/* CPU_{L,B}E #defines needed by Voltaire headers */
#include <asm/byteorder.h>
/* GCC 3.2.2, miscompiles this driver.
* See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9853. */
#define GCC_VERSION ((__GNUC__*100 + __GNUC_MINOR__)*100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 30203
-#error Invalid GCC version. Must use GCC >= 3.2.3
+#if (GCC_VERSION >= 30000) && (GCC_VERSION < 30203)
+# error Invalid GCC version. Must use GCC < 3.0.0 || GCC >= 3.2.3
#endif
-#if CONFIG_SMP
+#ifdef CONFIG_SMP
# define IBNAL_N_SCHED num_online_cpus() /* # schedulers */
#else
# define IBNAL_N_SCHED 1 /* # schedulers */
#endif
-/* sdp-connection.c */
+#define IBNAL_USE_FMR 1
+
+/* tunables fixed at compile time */
+#define IBNAL_PEER_HASH_SIZE 101 /* # peer lists */
+#define IBNAL_RESCHED 100 /* # scheduler loops before reschedule */
+#define IBNAL_MSG_QUEUE_SIZE 8 /* # messages/RDMAs in-flight */
+#define IBNAL_CREDIT_HIGHWATER 7 /* when eagerly to return credits */
+#define IBNAL_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+
+/* constants derived from sdp-connection.c */
#define IBNAL_QKEY 0
#define IBNAL_PKEY 0xffff
#define IBNAL_PKEY_IDX 0
#define IBNAL_SGID_IDX 0
#define IBNAL_SERVICE_LEVEL 0
#define IBNAL_STATIC_RATE 0
-#define IBNAL_RETRY_CNT 7
-#define IBNAL_RNR_CNT 7
#define IBNAL_EE_FLOW_CNT 1
#define IBNAL_LOCAL_SUB 1
#define IBNAL_TRAFFIC_CLASS 0
#define IBNAL_SOURCE_PATH_BIT 0
-#define IBNAL_OUS_DST_RD 32
+#define IBNAL_OUS_DST_RD 1
#define IBNAL_IB_MTU vv_mtu_1024
-/* sdp-hca-params.h */
+/* constants derived from sdp-hca-params.h */
#define PATH_RATE_2_5GB 2
#define MLX_IPD_1x 1
#define MLX_IPD_4x 0
#define IBNAL_R_2_STATIC_RATE(r) ((r) == PATH_RATE_2_5GB ? MLX_IPD_1x : MLX_IPD_4x)
/* other low-level IB constants */
-#define IBNAL_LOCAL_ACK_TIMEOUT 0x12
#define IBNAL_PKT_LIFETIME 5
#define IBNAL_ARB_INITIATOR_DEPTH 0
#define IBNAL_ARB_RESP_RES 0
#define IBNAL_FAILOVER_ACCEPTED 0
-#define IBNAL_SERVICE_NUMBER 0x11b9a2 /* Fixed service number */
-
-#define IBNAL_MIN_RECONNECT_INTERVAL HZ /* first failed connection retry... */
-#define IBNAL_MAX_RECONNECT_INTERVAL (60*HZ) /* ...exponentially increasing to this */
-
-#define IBNAL_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
-
-#define IBNAL_MSG_QUEUE_SIZE 8 /* # messages/RDMAs in-flight */
-#define IBNAL_CREDIT_HIGHWATER 7 /* when to eagerly return credits */
-
-#define IBNAL_NTX 64 /* # tx descs */
-#define IBNAL_NTX_NBLK 128 /* # reserved tx descs */
-/* reduced from 256 to ensure we register < 255 pages per region.
- * this can change if we register all memory. */
-
-#define IBNAL_PEER_HASH_SIZE 101 /* # peer lists */
-
-#define IBNAL_RESCHED 100 /* # scheduler loops before reschedule */
-
-#define IBNAL_CONCURRENT_PEERS 1000 /* # nodes all talking at once to me */
-
-#define IBNAL_RDMA_BASE 0x0eeb0000
-#define IBNAL_CKSUM 0
-#define IBNAL_WHOLE_MEM 1
-#if !IBNAL_WHOLE_MEM
-# error "incompatible with voltaire adaptor-tavor (REGISTER_RAM_IN_ONE_PHY_MR)"
-#endif
-
-/* default vals for runtime tunables */
-#define IBNAL_IO_TIMEOUT 50 /* default comms timeout (seconds) */
/************************/
/* derived constants... */
/* TX messages (shared by all connections) */
-#define IBNAL_TX_MSGS (IBNAL_NTX + IBNAL_NTX_NBLK)
-#define IBNAL_TX_MSG_BYTES (IBNAL_TX_MSGS * IBNAL_MSG_SIZE)
-#define IBNAL_TX_MSG_PAGES ((IBNAL_TX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
+#define IBNAL_TX_MSGS() (*kibnal_tunables.kib_ntx)
+#define IBNAL_TX_MSG_BYTES() (IBNAL_TX_MSGS() * IBNAL_MSG_SIZE)
+#define IBNAL_TX_MSG_PAGES() ((IBNAL_TX_MSG_BYTES() + PAGE_SIZE - 1)/PAGE_SIZE)
-#if IBNAL_WHOLE_MEM
-# define IBNAL_MAX_RDMA_FRAGS PTL_MD_MAX_IOV
-#else
+#if IBNAL_USE_FMR
# define IBNAL_MAX_RDMA_FRAGS 1
+# define IBNAL_CONCURRENT_SENDS IBNAL_RX_MSGS
+#else
+# define IBNAL_MAX_RDMA_FRAGS LNET_MAX_IOV
+# define IBNAL_CONCURRENT_SENDS IBNAL_MSG_QUEUE_SIZE
#endif
/* RX messages (per connection) */
-#define IBNAL_RX_MSGS IBNAL_MSG_QUEUE_SIZE
-#define IBNAL_RX_MSG_BYTES (IBNAL_RX_MSGS * IBNAL_MSG_SIZE)
-#define IBNAL_RX_MSG_PAGES ((IBNAL_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
+#define IBNAL_RX_MSGS (IBNAL_MSG_QUEUE_SIZE*2)
+#define IBNAL_RX_MSG_BYTES (IBNAL_RX_MSGS * IBNAL_MSG_SIZE)
+#define IBNAL_RX_MSG_PAGES ((IBNAL_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
-#define IBNAL_CQ_ENTRIES (IBNAL_TX_MSGS * (1 + IBNAL_MAX_RDMA_FRAGS) + \
- IBNAL_RX_MSGS * IBNAL_CONCURRENT_PEERS)
+#define IBNAL_CQ_ENTRIES() (IBNAL_TX_MSGS() * (1 + IBNAL_MAX_RDMA_FRAGS) + \
+ IBNAL_RX_MSGS * *kibnal_tunables.kib_concurrent_peers)
typedef struct
{
- int kib_io_timeout; /* comms timeout (seconds) */
- struct ctl_table_header *kib_sysctl; /* sysctl interface */
+ unsigned int *kib_service_number; /* IB service number */
+ int *kib_min_reconnect_interval; /* first failed connection retry... */
+ int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
+ int *kib_concurrent_peers; /* max # nodes all talking to me */
+ int *kib_cksum; /* checksum kib_msg_t? */
+ int *kib_timeout; /* comms timeout (seconds) */
+ int *kib_ntx; /* # tx descs */
+ int *kib_credits; /* # concurrent sends */
+ int *kib_peercredits; /* # concurrent sends to 1 peer */
+ int *kib_arp_retries; /* # times to retry ARP */
+ char **kib_hca_basename; /* HCA base name */
+ char **kib_ipif_basename; /* IPoIB interface base name */
+ int *kib_local_ack_timeout; /* IB RC QP ack timeout... */
+ int *kib_retry_cnt; /* ...and retry */
+ int *kib_rnr_cnt; /* RNR retries... */
+ int *kib_rnr_nak_timer; /* ...and interval */
+ int *kib_keepalive; /* keepalive interval */
+ int *kib_concurrent_sends; /* send work queue sizing */
+#if IBNAL_USE_FMR
+ int *kib_fmr_remaps; /* # FMR maps before unmap required */
+#endif
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+ cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
+#endif
} kib_tunables_t;
typedef struct
{
int ibp_npages; /* # pages */
- int ibp_mapped; /* mapped? */
- __u64 ibp_vaddr; /* mapped region vaddr */
- __u32 ibp_lkey; /* mapped region lkey */
- __u32 ibp_rkey; /* mapped region rkey */
- vv_mem_reg_h_t ibp_handle; /* mapped region handle */
struct page *ibp_pages[0];
} kib_pages_t;
+#if IBNAL_USE_FMR
typedef struct
{
- vv_mem_reg_h_t md_handle;
- __u32 md_lkey;
- __u32 md_rkey;
- __u64 md_addr;
+ vv_fmr_h_t md_fmrhandle; /* FMR handle */
+ int md_fmrcount; /* # mappings left */
+ int md_active; /* mapping in use? */
+ __u32 md_lkey; /* local key */
+ __u32 md_rkey; /* remote key */
+ __u64 md_addr; /* IO VM address */
} kib_md_t;
+#endif
typedef struct
{
__u64 kib_incarnation; /* which one am I */
int kib_shutdown; /* shut down? */
atomic_t kib_nthreads; /* # live threads */
+ lnet_ni_t *kib_ni; /* _the_ nal instance */
- __u64 kib_svc_id; /* service number I listen on */
vv_gid_t kib_port_gid; /* device/port GID */
vv_p_key_t kib_port_pkey; /* device/port pkey */
- struct semaphore kib_nid_mutex; /* serialise NID ops */
cm_cep_handle_t kib_listen_handle; /* IB listen handle */
rwlock_t kib_global_lock; /* stabilize peer/conn ops */
- spinlock_t kib_vverbs_lock; /* serialize vverbs calls */
int kib_ready; /* CQ callback fired */
int kib_checking_cq; /* a scheduler is checking the CQ */
spinlock_t kib_connd_lock; /* serialise */
wait_queue_head_t kib_sched_waitq; /* schedulers sleep here */
- struct list_head kib_sched_txq; /* tx requiring attention */
- struct list_head kib_sched_rxq; /* rx requiring attention */
spinlock_t kib_sched_lock; /* serialise */
struct kib_tx *kib_tx_descs; /* all the tx descriptors */
kib_pages_t *kib_tx_pages; /* premapped tx msg pages */
struct list_head kib_idle_txs; /* idle tx descriptors */
- struct list_head kib_idle_nblk_txs; /* idle reserved tx descriptors */
- wait_queue_head_t kib_idle_tx_waitq; /* block here for tx descriptor */
__u64 kib_next_tx_cookie; /* RDMA completion cookie */
spinlock_t kib_tx_lock; /* serialise */
#define IBNAL_INIT_CQ 7
#define IBNAL_INIT_ALL 8
-/************************************************************************
- * IB Wire message format.
- * These are sent in sender's byte order (i.e. receiver flips).
- */
-
-typedef struct kib_connparams
-{
- __u32 ibcp_queue_depth;
- __u32 ibcp_max_msg_size;
- __u32 ibcp_max_frags;
-} kib_connparams_t __attribute__((packed));
-
-typedef struct
-{
- ptl_hdr_t ibim_hdr; /* portals header */
- char ibim_payload[0]; /* piggy-backed payload */
-} kib_immediate_msg_t __attribute__((packed));
-
-/* YEUCH! the __u64 address is split into 2 __u32 fields to ensure proper
- * packing. Otherwise we can't fit enough frags into an IBNAL message (<=
- * smallest page size on any arch). */
-typedef struct
-{
- __u32 rf_nob; /* # of bytes */
- __u32 rf_addr_lo; /* lo 4 bytes of vaddr */
- __u32 rf_addr_hi; /* hi 4 bytes of vaddr */
-} kib_rdma_frag_t __attribute__((packed));
-
-typedef struct
-{
- __u32 rd_key; /* local/remote key */
- __u32 rd_nfrag; /* # fragments */
- kib_rdma_frag_t rd_frags[0]; /* buffer frags */
-} kib_rdma_desc_t __attribute__((packed));
-
-/* CAVEAT EMPTOR! We don't actually put ibprm_rd on the wire; it's just there
- * to remember the source buffers while we wait for the PUT_ACK */
-
-typedef struct
-{
- ptl_hdr_t ibprm_hdr; /* portals header */
- __u64 ibprm_cookie; /* opaque completion cookie */
- kib_rdma_frag_t ibprm_rd; /* source buffer */
-} kib_putreq_msg_t __attribute__((packed));
-
-typedef struct
-{
- __u64 ibpam_src_cookie; /* reflected completion cookie */
- __u64 ibpam_dst_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
-} kib_putack_msg_t __attribute__((packed));
-
-typedef struct
-{
- ptl_hdr_t ibgm_hdr; /* portals header */
- __u64 ibgm_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
-} kib_get_msg_t __attribute__((packed));
-
-typedef struct
-{
- __u64 ibcm_cookie; /* opaque completion cookie */
- __s32 ibcm_status; /* < 0 failure: >= 0 length */
-} kib_completion_msg_t __attribute__((packed));
-
-typedef struct
-{
- /* First 2 fields fixed FOR ALL TIME */
- __u32 ibm_magic; /* I'm an openibnal message */
- __u16 ibm_version; /* this is my version number */
-
- __u8 ibm_type; /* msg type */
- __u8 ibm_credits; /* returned credits */
- __u32 ibm_nob; /* # bytes in whole message */
- __u32 ibm_cksum; /* checksum (0 == no checksum) */
- __u64 ibm_srcnid; /* sender's NID */
- __u64 ibm_srcstamp; /* sender's incarnation */
- __u64 ibm_dstnid; /* destination's NID */
- __u64 ibm_dststamp; /* destination's incarnation */
- __u64 ibm_seq; /* sequence number */
-
- union {
- kib_connparams_t connparams;
- kib_immediate_msg_t immediate;
- kib_putreq_msg_t putreq;
- kib_putack_msg_t putack;
- kib_get_msg_t get;
- kib_completion_msg_t completion;
- } ibm_u __attribute__((packed));
-} kib_msg_t __attribute__((packed));
-
-#define IBNAL_MSG_MAGIC 0x0be91b91 /* unique magic */
-#define IBNAL_MSG_VERSION 4 /* current protocol version */
-
-#define IBNAL_MSG_CONNREQ 0xc0 /* connection request */
-#define IBNAL_MSG_CONNACK 0xc1 /* connection acknowledge */
-#define IBNAL_MSG_NOOP 0xd0 /* nothing (just credits) */
-#define IBNAL_MSG_IMMEDIATE 0xd1 /* immediate */
-#define IBNAL_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
-#define IBNAL_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
-#define IBNAL_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
-#define IBNAL_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
-#define IBNAL_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
-#define IBNAL_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
+#include "viblnd_wire.h"
/***********************************************************************/
{
struct list_head rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
- int rx_responded; /* responded to peer? */
- int rx_posted; /* posted? */
-#if IBNAL_WHOLE_MEM
+ int rx_nob; /* # bytes received (-1 while posted) */
vv_l_key_t rx_lkey; /* local key */
-#else
- __u64 rx_vaddr; /* pre-mapped buffer (hca vaddr) */
-#endif
kib_msg_t *rx_msg; /* pre-mapped buffer (host vaddr) */
vv_wr_t rx_wrq; /* receive work item */
vv_scatgat_t rx_gl; /* and its memory */
} kib_rx_t;
-#if IBNAL_WHOLE_MEM
-# define KIBNAL_RX_VADDR(rx) ((__u64)((unsigned long)((rx)->rx_msg)))
-# define KIBNAL_RX_LKEY(rx) ((rx)->rx_lkey)
-#else
-# define KIBNAL_RX_VADDR(rx) ((rx)->rx_vaddr)
-# define KIBNAL_RX_LKEY(rx) ((rx)->rx_conn->ibc_rx_pages->ibp_lkey)
-#endif
-
typedef struct kib_tx /* transmit message */
{
struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- int tx_isnblk; /* I'm reserved for non-blocking sends */
struct kib_conn *tx_conn; /* owning conn */
- int tx_mapped; /* mapped for RDMA? */
int tx_sending; /* # tx callbacks outstanding */
int tx_queued; /* queued for sending */
int tx_waiting; /* waiting for peer */
int tx_status; /* completion status */
unsigned long tx_deadline; /* completion deadline */
__u64 tx_cookie; /* completion cookie */
- lib_msg_t *tx_libmsg[2]; /* lib msgs to finalize on completion */
-#if IBNAL_WHOLE_MEM
+ lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
vv_l_key_t tx_lkey; /* local key for message buffer */
-#else
- kib_md_t tx_md; /* RDMA mapping (active/passive) */
- __u64 tx_vaddr; /* pre-mapped buffer (hca vaddr) */
-#endif
kib_msg_t *tx_msg; /* message buffer (host vaddr) */
int tx_nwrq; /* # send work items */
+#if IBNAL_USE_FMR
+ vv_wr_t tx_wrq[2]; /* send work items... */
+ vv_scatgat_t tx_gl[2]; /* ...and their memory */
+ kib_rdma_desc_t tx_rd[1]; /* rdma descriptor */
+ kib_md_t tx_md; /* FMR mapping descriptor */
+ __u64 *tx_pages; /* page phys addrs */
+#else
vv_wr_t *tx_wrq; /* send work items... */
vv_scatgat_t *tx_gl; /* ...and their memory */
kib_rdma_desc_t *tx_rd; /* rdma descriptor (src buffers) */
-} kib_tx_t;
-
-#if IBNAL_WHOLE_MEM
-# define KIBNAL_TX_VADDR(tx) ((__u64)((unsigned long)((tx)->tx_msg)))
-# define KIBNAL_TX_LKEY(tx) ((tx)->tx_lkey)
-#else
-# define KIBNAL_TX_VADDR(tx) ((tx)->tx_vaddr)
-# define KIBNAL_TX_LKEY(tx) (kibnal_data.kib_tx_pages->ibp_lkey)
#endif
-
-#define KIB_TX_UNMAPPED 0
-#define KIB_TX_MAPPED 1
+} kib_tx_t;
/* Passive connection request (listener callback) queued for handling by connd */
typedef struct kib_pcreq
__u64 ibc_incarnation; /* which instance of the peer */
__u64 ibc_txseq; /* tx sequence number */
__u64 ibc_rxseq; /* rx sequence number */
+ __u32 ibc_version; /* peer protocol version */
atomic_t ibc_refcount; /* # users */
int ibc_state; /* what's happening */
- atomic_t ibc_nob; /* # bytes buffered */
int ibc_nsends_posted; /* # uncompleted sends */
int ibc_credits; /* # credits I have */
int ibc_outstanding_credits; /* # credits to return */
+ int ibc_reserved_credits; /* # credits for ACK/DONE msgs */
int ibc_disconnect; /* some disconnect callback fired */
int ibc_comms_error; /* set on comms error */
+ unsigned long ibc_last_send; /* time of last send */
struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
+ struct list_head ibc_tx_queue_nocred; /* sends that don't need a cred */
+ struct list_head ibc_tx_queue_rsrvd; /* sends that need a reserved cred */
struct list_head ibc_tx_queue; /* send queue */
struct list_head ibc_active_txs; /* active tx awaiting completion */
spinlock_t ibc_lock; /* serialise */
} kib_conn_t;
#define IBNAL_CONN_INIT_NOTHING 0 /* incomplete init */
-#define IBNAL_CONN_INIT 1 /* completed init */
-#define IBNAL_CONN_ACTIVE_ARP 2 /* active arping */
-#define IBNAL_CONN_ACTIVE_CONNECT 3 /* active sending req */
-#define IBNAL_CONN_ACTIVE_CHECK_REPLY 4 /* active checking reply */
-#define IBNAL_CONN_ACTIVE_RTU 5 /* active sending rtu */
-#define IBNAL_CONN_PASSIVE_WAIT 6 /* passive waiting for rtu */
-#define IBNAL_CONN_ESTABLISHED 7 /* connection established */
-#define IBNAL_CONN_DISCONNECT1 8 /* disconnect phase 1 */
-#define IBNAL_CONN_DISCONNECT2 9 /* disconnect phase 2 */
-#define IBNAL_CONN_DISCONNECTED 10 /* disconnect complete */
+#define IBNAL_CONN_INIT_QP 1 /* QP allocated */
+#define IBNAL_CONN_INIT 2 /* completed init */
+#define IBNAL_CONN_ACTIVE_ARP 3 /* active arping */
+#define IBNAL_CONN_ACTIVE_CONNECT 4 /* active sending req */
+#define IBNAL_CONN_ACTIVE_CHECK_REPLY 5 /* active checking reply */
+#define IBNAL_CONN_ACTIVE_RTU 6 /* active sending rtu */
+#define IBNAL_CONN_PASSIVE_WAIT 7 /* passive waiting for rtu */
+#define IBNAL_CONN_ESTABLISHED 8 /* connection established */
+#define IBNAL_CONN_DISCONNECT1 9 /* disconnect phase 1 */
+#define IBNAL_CONN_DISCONNECT2 10 /* disconnect phase 2 */
+#define IBNAL_CONN_DISCONNECTED 11 /* disconnect complete */
typedef struct kib_peer
{
struct list_head ibp_list; /* stash on global peer list */
struct list_head ibp_connd_list; /* schedule on kib_connd_peers */
- ptl_nid_t ibp_nid; /* who's on the other end(s) */
+ lnet_nid_t ibp_nid; /* who's on the other end(s) */
__u32 ibp_ip; /* IP to query for peer conn params */
int ibp_port; /* port to qery for peer conn params */
__u64 ibp_incarnation; /* peer's incarnation */
int ibp_persistence; /* "known" peer refs */
struct list_head ibp_conns; /* all active connections */
struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- int ibp_connecting; /* connecting+accepting */
+ int ibp_connecting; /* current active connection attempts */
+ int ibp_accepting; /* current passive connection attempts */
+ int ibp_arp_count; /* # arp attempts */
unsigned long ibp_reconnect_time; /* when reconnect may be attempted */
unsigned long ibp_reconnect_interval; /* exponential backoff */
+ int ibp_error; /* errno on closing this peer */
+ cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
} kib_peer_t;
-extern lib_nal_t kibnal_lib;
extern kib_data_t kibnal_data;
extern kib_tunables_t kibnal_tunables;
+int kibnal_startup (lnet_ni_t *ni);
+void kibnal_shutdown (lnet_ni_t *ni);
+int kibnal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+int kibnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
+extern int kibnal_eager_recv (lnet_ni_t *ni, void *private,
+ lnet_msg_t *lntmsg, void **new_private);
+int kibnal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+ int delayed, unsigned int niov,
+ struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
extern void kibnal_init_msg(kib_msg_t *msg, int type, int body_nob);
-extern void kibnal_pack_msg(kib_msg_t *msg, int credits, ptl_nid_t dstnid,
- __u64 dststamp, __u64 seq);
-extern int kibnal_unpack_msg(kib_msg_t *msg, int nob);
-extern kib_peer_t *kibnal_create_peer(ptl_nid_t nid);
+extern void kibnal_pack_msg(kib_msg_t *msg, __u32 version, int credits,
+ lnet_nid_t dstnid, __u64 dststamp, __u64 seq);
+extern int kibnal_unpack_msg(kib_msg_t *msg, __u32 expected_version, int nob);
+extern int kibnal_create_peer(kib_peer_t **peerp, lnet_nid_t nid);
extern void kibnal_destroy_peer(kib_peer_t *peer);
-extern int kibnal_del_peer(ptl_nid_t nid, int single_share);
-extern kib_peer_t *kibnal_find_peer_locked(ptl_nid_t nid);
+extern int kibnal_add_persistent_peer (lnet_nid_t nid, __u32 ip);
+extern int kibnal_del_peer(lnet_nid_t nid);
+extern kib_peer_t *kibnal_find_peer_locked(lnet_nid_t nid);
extern void kibnal_unlink_peer_locked(kib_peer_t *peer);
+extern void kibnal_peer_alive(kib_peer_t *peer);
extern int kibnal_close_stale_conns_locked(kib_peer_t *peer,
__u64 incarnation);
extern kib_conn_t *kibnal_create_conn(cm_cep_handle_t cep);
extern void kibnal_listen_callback(cm_cep_handle_t cep, cm_conn_data_t *info, void *arg);
-extern int kibnal_alloc_pages(kib_pages_t **pp, int npages, int access);
+extern int kibnal_alloc_pages(kib_pages_t **pp, int npages, int access);
extern void kibnal_free_pages(kib_pages_t *p);
extern void kibnal_check_sends(kib_conn_t *conn);
extern void kibnal_async_callback(vv_event_record_t ev);
extern void kibnal_cq_callback(unsigned long context);
extern void kibnal_passive_connreq(kib_pcreq_t *pcr, int reject);
-extern void kibnal_pause(int ticks);
+extern void kibnal_txlist_done (struct list_head *txlist, int status);
extern void kibnal_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
extern int kibnal_init_rdma(kib_tx_t *tx, int type, int nob,
kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
-static inline int
-wrq_signals_completion (vv_wr_t *wrq)
-{
- return wrq->completion_notification != 0;
-}
+extern int kibnal_tunables_init(void);
+extern void kibnal_tunables_fini(void);
#define kibnal_conn_addref(conn) \
do { \
#define kibnal_peer_addref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> "LPX64" (%d)++\n", \
- (peer), (peer)->ibp_nid, \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
atomic_read (&(peer)->ibp_refcount)); \
LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
atomic_inc(&(peer)->ibp_refcount); \
#define kibnal_peer_decref(peer) \
do { \
- CDEBUG(D_NET, "peer[%p] -> "LPX64" (%d)--\n", \
- (peer), (peer)->ibp_nid, \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
atomic_read (&(peer)->ibp_refcount)); \
LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
} while (0)
static inline struct list_head *
-kibnal_nid2peerlist (ptl_nid_t nid)
+kibnal_nid2peerlist (lnet_nid_t nid)
{
unsigned int hash = ((unsigned int)nid) % kibnal_data.kib_peer_hash_size;
static inline void
kibnal_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
{
- /* CAVEAT EMPTOR: tx takes caller's ref on conn */
-
+ struct list_head *q;
+
LASSERT (tx->tx_nwrq > 0); /* work items set up */
LASSERT (!tx->tx_queued); /* not queued for sending already */
+ tx->tx_queued = 1;
+ tx->tx_deadline = jiffies + (*kibnal_tunables.kib_timeout * HZ);
+
if (tx->tx_conn == NULL) {
kibnal_conn_addref(conn);
tx->tx_conn = conn;
+ LASSERT (tx->tx_msg->ibm_type != IBNAL_MSG_PUT_DONE);
} else {
LASSERT (tx->tx_conn == conn);
LASSERT (tx->tx_msg->ibm_type == IBNAL_MSG_PUT_DONE);
}
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies + kibnal_tunables.kib_io_timeout * HZ;
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+
+ if (conn->ibc_version == IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD) {
+ /* All messages have simple credit control */
+ q = &conn->ibc_tx_queue;
+ } else {
+ LASSERT (conn->ibc_version == IBNAL_MSG_VERSION);
+
+ switch (tx->tx_msg->ibm_type) {
+ case IBNAL_MSG_PUT_REQ:
+ case IBNAL_MSG_GET_REQ:
+ /* RDMA request: reserve a buffer for the RDMA reply
+ * before sending */
+ q = &conn->ibc_tx_queue_rsrvd;
+ break;
+
+ case IBNAL_MSG_PUT_NAK:
+ case IBNAL_MSG_PUT_ACK:
+ case IBNAL_MSG_PUT_DONE:
+ case IBNAL_MSG_GET_DONE:
+ /* RDMA reply/completion: no credits; peer has reserved
+ * a reply buffer */
+ q = &conn->ibc_tx_queue_nocred;
+ break;
+
+ case IBNAL_MSG_NOOP:
+ case IBNAL_MSG_IMMEDIATE:
+ /* Otherwise: consume a credit before sending */
+ q = &conn->ibc_tx_queue;
+ break;
+
+ default:
+ LBUG();
+ q = NULL;
+ }
+ }
+
+ list_add_tail(&tx->tx_list, q);
+}
+
+static inline int
+kibnal_send_keepalive(kib_conn_t *conn)
+{
+ return (*kibnal_tunables.kib_keepalive > 0) &&
+ time_after(jiffies, conn->ibc_last_send +
+ *kibnal_tunables.kib_keepalive*HZ);
}
+#ifndef IBNAL_VOIDSTAR_SGADDR
+# define IBNAL_VOIDSTAR_SGADDR 0
+#endif
+
+#if IBNAL_VOIDSTAR_SGADDR
+# if defined(CONFIG_HIGHMEM)
+# if defined(CONFIG_X86) && defined(CONFIG_HIGHMEM4G)
+ /* truncation to void* doesn't matter if 0 <= physmem < 4G
+ * so allow x86 with 32 bit phys addrs */
+# elif defined(CONFIG_IA64)
+ /* OK anyway on 64-bit arch */
+# else
+# error "Can't support HIGHMEM when vv_scatgat_t::v_address is void *"
+# endif
+# endif
+# define KIBNAL_ADDR2SG(a) ((void *)((unsigned long)(a)))
+# define KIBNAL_SG2ADDR(a) ((__u64)((unsigned long)(a)))
+static inline __u64 kibnal_addr2net (__u64 addr)
+{
+ void *netaddr;
+ vv_return_t vvrc = vv_va2advertise_addr(kibnal_data.kib_hca,
+ KIBNAL_ADDR2SG(addr),
+ &netaddr);
+ LASSERT (vvrc == vv_return_ok);
+ return KIBNAL_SG2ADDR(netaddr);
+}
+#else
+# define KIBNAL_ADDR2SG(a) a
+# define KIBNAL_SG2ADDR(a) a
+static inline __u64 kibnal_addr2net (__u64 addr)
+{
+ __u64 netaddr;
+ vv_return_t vvrc = vv_va2advertise_addr(kibnal_data.kib_hca,
+ addr,
+ &netaddr);
+ LASSERT (vvrc == vv_return_ok);
+ return netaddr;
+}
+#endif
+
/* CAVEAT EMPTOR: We rely on tx/rx descriptor alignment to allow us to use the
* lowest 2 bits of the work request id to stash the work item type (the op
* field is not valid when the wc completes in error). */
mb();
}
+#if IBNAL_USE_FMR
+
+static inline int
+kibnal_rd_size (kib_rdma_desc_t *rd)
+{
+ return rd->rd_nob;
+}
+
+#else
static inline __u64
kibnal_rf_addr (kib_rdma_frag_t *rf)
{
return size;
}
+#endif