/* need sane upper bound to limit copy overhead */
#define GNILND_MAX_IMMEDIATE (64<<10)
+/* allow for 4M transfers over gni. Note 2.5M used by DVS */
+#define GNILND_MAX_IOV 1024
/* Max number of connections to keep in purgatory per peer */
#define GNILND_PURGATORY_MAX 5
v2:
* - added checksum to FMA
* moved seq before paylod
- * WIRE_ATTR added for alignment
+ * __packed added for alignment
v3:
* added gnm_payload_len for FMA payload size
v4:
__u32 gnpr_host_id; /* ph. host ID of the NIC */
__u32 gnpr_cqid; /* cqid I want peer to use when sending events to me */
gni_smsg_attr_t gnpr_smsg_attr; /* my short msg. attributes */
-} WIRE_ATTR kgn_gniparams_t;
+} __packed kgn_gniparams_t;
typedef struct kgn_nak_data {
__s32 gnnd_errno; /* errno reason for NAK */
-} WIRE_ATTR kgn_nak_data_t;
+} __packed kgn_nak_data_t;
/* the first bits of the connreq struct CANNOT CHANGE FORM EVER
* without breaking the ability for us to properly NAK someone */
kgn_gniparams_t gncr_gnparams; /* sender's endpoint info */
kgn_nak_data_t gncr_nakdata; /* data (rc, etc) for NAK */
};
-} WIRE_ATTR kgn_connreq_t;
+} __packed kgn_connreq_t;
typedef struct {
gni_mem_handle_t gnrd_key;
__u64 gnrd_addr;
__u32 gnrd_nob;
-} WIRE_ATTR kgn_rdma_desc_t;
+} __packed kgn_rdma_desc_t;
typedef struct {
struct lnet_hdr gnim_hdr; /* LNet header */
/* LNet payload is in FMA "Message Data" */
-} WIRE_ATTR kgn_immediate_msg_t;
+} __packed kgn_immediate_msg_t;
typedef struct {
struct lnet_hdr gnprm_hdr; /* LNet header */
__u64 gnprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR kgn_putreq_msg_t;
+} __packed kgn_putreq_msg_t;
typedef struct {
__u64 gnpam_src_cookie; /* reflected completion cookie */
__u64 gnpam_dst_cookie; /* opaque completion cookie */
__u16 gnpam_payload_cksum; /* checksum for get msg */
kgn_rdma_desc_t gnpam_desc; /* sender's sink buffer */
-} WIRE_ATTR kgn_putack_msg_t;
+} __packed kgn_putack_msg_t;
typedef struct {
struct lnet_hdr gngm_hdr; /* LNet header */
__u64 gngm_cookie; /* opaque completion cookie */
__u16 gngm_payload_cksum; /* checksum for put msg */
kgn_rdma_desc_t gngm_desc; /* sender's sink buffer */
-} WIRE_ATTR kgn_get_msg_t;
+} __packed kgn_get_msg_t;
typedef struct {
int gncm_retval; /* error on NAK, size on REQ */
__u64 gncm_cookie; /* reflected completion cookie */
-} WIRE_ATTR kgn_completion_msg_t;
+} __packed kgn_completion_msg_t;
typedef struct { /* NB must fit in FMA "Prefix" */
__u32 gnm_magic; /* I'm an gni message */
kgn_get_msg_t get;
kgn_completion_msg_t completion;
} gnm_u;
-} WIRE_ATTR kgn_msg_t;
+} __packed kgn_msg_t;
/************************************************************************
* runtime tunable data
{
void *ret;
if (*kgnilnd_tunables.kgn_vzalloc_noretry)
- ret = __vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_NORETRY |
- __GFP_ZERO,
- PAGE_KERNEL);
+ ret = __ll_vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_ZERO |
+ __GFP_NORETRY);
else
- ret = __vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_ZERO,
- PAGE_KERNEL);
+ ret = __ll_vmalloc(size, __GFP_HIGHMEM | GFP_NOIO | __GFP_ZERO);
LIBCFS_ALLOC_POST(ret, size);
return ret;
wake_up_var(&kgnilnd_data); \
}while (0)
-#define kgnilnd_net_addref(net) \
-do { \
- int val = atomic_inc_return(&net->gnn_refcount); \
- LASSERTF(val > 1, "net %p refcount %d\n", net, val); \
- CDEBUG(D_NETTRACE, "net %p->%s++ (%d)\n", net, \
- libcfs_nid2str(net->gnn_ni->ni_nid), val); \
+#define kgnilnd_net_addref(net) \
+do { \
+ int val = atomic_inc_return(&net->gnn_refcount); \
+ LASSERTF(val > 1, "net %p refcount %d\n", net, val); \
+ CDEBUG(D_NETTRACE, "net %p->%s++ (%d)\n", net, \
+ libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
} while (0)
-#define kgnilnd_net_decref(net) \
-do { \
- int val = atomic_dec_return(&net->gnn_refcount); \
- LASSERTF(val >= 0, "net %p refcount %d\n", net, val); \
- CDEBUG(D_NETTRACE, "net %p->%s-- (%d)\n", net, \
- libcfs_nid2str(net->gnn_ni->ni_nid), val); \
+#define kgnilnd_net_decref(net) \
+do { \
+ int val = atomic_dec_return(&net->gnn_refcount); \
+ LASSERTF(val >= 0, "net %p refcount %d\n", net, val); \
+ CDEBUG(D_NETTRACE, "net %p->%s-- (%d)\n", net, \
+ libcfs_nidstr(&net->gnn_ni->ni_nid), val); \
} while (0)
#define kgnilnd_peer_addref(peer) \
if (conn->gnc_peer) {
loopback = conn->gnc_peer->gnp_nid ==
- conn->gnc_peer->gnp_net->gnn_ni->ni_nid;
+ lnet_nid_to_nid4(&conn->gnc_peer->gnp_net->gnn_ni->ni_nid);
} else {
/* short circuit - a conn that didn't complete
* setup never needs a purgatory hold */
return -ESHUTDOWN;
}
- list_for_each_entry(net, kgnilnd_netnum2netlist(LNET_NETNUM(LNET_NIDNET(nid))), gnn_list) {
- if (!net->gnn_shutdown && LNET_NIDNET(net->gnn_ni->ni_nid) == LNET_NIDNET(nid)) {
+ list_for_each_entry(net,
+ kgnilnd_netnum2netlist(LNET_NETNUM(LNET_NIDNET(nid))),
+ gnn_list) {
+ if (!net->gnn_shutdown &&
+ LNET_NID_NET(&net->gnn_ni->ni_nid) == LNET_NIDNET(nid)) {
kgnilnd_net_addref(net);
up_read(&kgnilnd_data.kgn_net_rw_sem);
*netp = net;
void kgnilnd_peer_increase_reconnect_locked(kgn_peer_t *peer);
void kgnilnd_queue_reply(kgn_conn_t *conn, kgn_tx_t *tx);
void kgnilnd_queue_tx(kgn_conn_t *conn, kgn_tx_t *tx);
-void kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net, struct lnet_process_id *target);
+void kgnilnd_launch_tx(kgn_tx_t *tx, kgn_net_t *net,
+ struct lnet_processid *target);
int kgnilnd_send_mapped_tx(kgn_tx_t *tx, int try_map_if_full);
void kgnilnd_consume_rx(kgn_rx_t *rx);