{
struct ksock_net *net = peer_ni->ksnp_ni->ni_data;
- CDEBUG (D_NET, "peer_ni %s %p deleted\n",
- libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
+ CDEBUG(D_NET, "peer_ni %s %p deleted\n",
+ libcfs_idstr(&peer_ni->ksnp_id), peer_ni);
LASSERT(refcount_read(&peer_ni->ksnp_refcount) == 0);
LASSERT(peer_ni->ksnp_accepting == 0);
conn->ksnc_conn_cb = NULL;
conn->ksnc_sock = sock;
/* 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection */
+ * being closed before establishment of connection
+ */
refcount_set(&conn->ksnc_sock_refcount, 2);
conn->ksnc_type = type;
ksocknal_lib_save_callback(sock, conn);
conn->ksnc_tx_ready = 0;
conn->ksnc_tx_scheduled = 0;
conn->ksnc_tx_carrier = NULL;
- atomic_set (&conn->ksnc_tx_nob, 0);
+ atomic_set(&conn->ksnc_tx_nob, 0);
LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_INTERFACES_NUM]));
peer2 = ksocknal_find_peer_locked(ni, &peerid);
if (peer2 == NULL) {
/* NB this puts an "empty" peer_ni in the peer_ni
- * table (which takes my ref) */
+ * table (which takes my ref)
+ */
hash_add(ksocknal_data.ksnd_peers,
&peer_ni->ksnp_list, nidhash(&peerid.nid));
} else {
rc = -ESTALE;
warn = "peer_ni/conn_cb removed";
goto failed_2;
- }
+ }
if (peer_ni->ksnp_proto == NULL) {
/* Never connected before.
}
/* Refuse to duplicate an existing connection, unless this is a
- * loopback connection */
+ * loopback connection
+ */
if (!rpc_cmp_addr((struct sockaddr *)&conn->ksnc_peeraddr,
(struct sockaddr *)&conn->ksnc_myaddr)) {
list_for_each_entry(conn2, &peer_ni->ksnp_conns, ksnc_list) {
CERROR("no schedulers available. node is unhealthy\n");
goto failed_2;
}
- /*
- * The cpt might have changed if we ended up selecting a non cpt
+ /* The cpt might have changed if we ended up selecting a non cpt
* native scheduler. So use the scheduler's cpt instead.
*/
cpt = sched->kss_cpt;
write_unlock_bh(global_lock);
}
- /*
- * If we get here without an error code, just use -EALREADY.
+ /* If we get here without an error code, just use -EALREADY.
* Depending on how we got here, the error may be positive
* or negative. Normalize the value for ksocknal_txlist_done().
*/
void
ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
{
- /* This just does the immmediate housekeeping, and queues the
- * connection for the reaper to terminate.
- * Caller holds ksnd_global_lock exclusively in irq context */
+ /* This just does the immmediate housekeeping, and queues the
+ * connection for the reaper to terminate.
+ * Caller holds ksnd_global_lock exclusively in irq context
+ */
struct ksock_peer_ni *peer_ni = conn->ksnc_peer;
struct ksock_conn_cb *conn_cb;
struct ksock_conn *conn2;
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
/* throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler */
+ * these TXs will be send to /dev/null by scheduler
+ */
list_for_each_entry(tx, &peer_ni->ksnp_tx_queue,
tx_list)
ksocknal_tx_prep(conn, tx);
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer_ni is dead if it's to another kernel and
- * there are no connections or connection attempts in existence. */
-
+ * there are no connections or connection attempts in existence.
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
if ((peer_ni->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
LIST_HEAD(zlist);
/* NB safe to finalize TXs because closing of socket will
- * abort all buffered data */
+ * abort all buffered data
+ */
LASSERT(conn->ksnc_sock == NULL);
spin_lock(&peer_ni->ksnp_lock);
LASSERT(!conn->ksnc_rx_scheduled);
LASSERT(list_empty(&conn->ksnc_tx_queue));
- /* complete current receive if any */
- switch (conn->ksnc_rx_state) {
- case SOCKNAL_RX_LNET_PAYLOAD:
- last_rcv = conn->ksnc_rx_deadline -
+ /* complete current receive if any */
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_LNET_PAYLOAD:
+ last_rcv = conn->ksnc_rx_deadline -
ksocknal_timeout();
CERROR("Completing partial receive from %s[%d], ip %pIScp, with error, wanted: %d, left: %d, last alive is %lld secs ago\n",
libcfs_idstr(&conn->ksnc_peer->ksnp_id),
int i;
struct ksock_conn *conn;
- for (index = 0; ; index++) {
+ for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
- i = 0;
- conn = NULL;
+ i = 0;
+ conn = NULL;
list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
- if (i++ == index) {
- ksocknal_conn_addref(conn);
- break;
- }
- }
+ if (i++ == index) {
+ ksocknal_conn_addref(conn);
+ break;
+ }
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
if (i <= index)
- break;
+ break;
- ksocknal_lib_push_conn (conn);
- ksocknal_conn_decref(conn);
- }
+ ksocknal_lib_push_conn(conn);
+ ksocknal_conn_decref(conn);
+ }
}
static int
struct libcfs_ioctl_data *data = arg;
int rc;
- switch(cmd) {
+ switch (cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
struct ksock_net *net = ni->ni_data;
struct ksock_interface *iface;
read_unlock(&ksocknal_data.ksnd_global_lock);
return rc;
- }
+ }
case IOC_LIBCFS_GET_PEER: {
__u32 myip = 0;
id.pid = LNET_PID_ANY;
return ksocknal_del_peer(ni, &id);
- case IOC_LIBCFS_GET_CONN: {
- int txmem;
- int rxmem;
- int nagle;
+ case IOC_LIBCFS_GET_CONN: {
+ int txmem;
+ int rxmem;
+ int nagle;
struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
struct sockaddr_in *psa = (void *)&conn->ksnc_peeraddr;
struct sockaddr_in *mysa = (void *)&conn->ksnc_myaddr;
- if (conn == NULL)
- return -ENOENT;
+ if (conn == NULL)
+ return -ENOENT;
- ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
+ ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
data->ioc_count = txmem;
data->ioc_nid = lnet_nid_to_nid4(&conn->ksnc_peer->ksnp_id.nid);
data->ioc_u32[2] = 0xFFFFFFFF;
data->ioc_u32[3] = conn->ksnc_type;
data->ioc_u32[4] = conn->ksnc_scheduler->kss_cpt;
- data->ioc_u32[5] = rxmem;
- data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
- ksocknal_conn_decref(conn);
- return 0;
- }
+ data->ioc_u32[5] = rxmem;
+ data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
+ ksocknal_conn_decref(conn);
+ return 0;
+ }
case IOC_LIBCFS_CLOSE_CONNECTION:
lnet_nid4_to_nid(data->ioc_nid, &id.nid);
}
static void
-ksocknal_free_buffers (void)
+ksocknal_free_buffers(void)
{
- LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
if (ksocknal_data.ksnd_schedulers != NULL)
cfs_percpt_free(ksocknal_data.ksnd_schedulers);
if (dev->reg_state != NETREG_REGISTERED)
continue;
/* A registration just happened: save the new index for
- * the device */
+ * the device
+ */
ksi->ksni_index = ifindex;
goto out;
}
if (dev->reg_state == NETREG_UNREGISTERING) {
/* Device is being unregistered, we need to clear the
- * index, it can change when device will be back */
+ * index, it can change when device will be back
+ */
ksi->ksni_index = -1;
goto out;
}
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %lld\n",
libcfs_kmem_read());
- LASSERT (ksocknal_data.ksnd_nnets == 0);
+ LASSERT(ksocknal_data.ksnd_nnets == 0);
if (ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL) {
unregister_netdevice_notifier(&ksocknal_dev_notifier_block);
cfs_percpt_for_each(sched, i, ksocknal_data.ksnd_schedulers) {
int nthrs;
- /*
- * make sure not to allocate more threads than there are
+ /* make sure not to allocate more threads than there are
* cores/CPUs in teh CPT
*/
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
} else {
- /*
- * max to half of CPUs, assume another half should be
+ /* max to half of CPUs, assume another half should be
* reserved for upper layer modules
*/
nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
INIT_LIST_HEAD(&sched->kss_tx_conns);
INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
init_waitqueue_head(&sched->kss_waitq);
- }
+ }
- ksocknal_data.ksnd_connd_starting = 0;
- ksocknal_data.ksnd_connd_failed_stamp = 0;
+ ksocknal_data.ksnd_connd_starting = 0;
+ ksocknal_data.ksnd_connd_failed_stamp = 0;
ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
- /* must have at least 2 connds to remain responsive to accepts while
- * connecting */
- if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
- *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
-
- if (*ksocknal_tunables.ksnd_nconnds_max <
- *ksocknal_tunables.ksnd_nconnds) {
- ksocknal_tunables.ksnd_nconnds_max =
- ksocknal_tunables.ksnd_nconnds;
- }
-
- for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
+ /* must have at least 2 connds to remain responsive to accepts while
+ * connecting
+ */
+ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
+ *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
+
+ if (*ksocknal_tunables.ksnd_nconnds_max <
+ *ksocknal_tunables.ksnd_nconnds) {
+ ksocknal_tunables.ksnd_nconnds_max =
+ ksocknal_tunables.ksnd_nconnds;
+ }
+
+ for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- CERROR("Can't spawn socknal connd: %d\n", rc);
- goto failed;
- }
- }
+ CERROR("Can't spawn socknal connd: %d\n", rc);
+ goto failed;
+ }
+ }
rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
- if (rc != 0) {
- CERROR ("Can't spawn socknal reaper: %d\n", rc);
- goto failed;
- }
+ if (rc != 0) {
+ CERROR("Can't spawn socknal reaper: %d\n", rc);
+ goto failed;
+ }
register_netdevice_notifier(&ksocknal_dev_notifier_block);
register_inetaddr_notifier(&ksocknal_inetaddr_notifier_block);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&ksocknal_inet6addr_notifier_block);
#endif
- /* flag everything initialised */
- ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
+ /* flag everything initialised */
+ ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
- return 0;
+ return 0;
- failed:
- ksocknal_base_shutdown();
- return -ENETDOWN;
+failed:
+ ksocknal_base_shutdown();
+ return -ENETDOWN;
}
static int
int rc, if_idx;
int dev_status;
- LASSERT (ni->ni_net->net_lnd == &the_ksocklnd);
+ LASSERT(ni->ni_net->net_lnd == &the_ksocklnd);
if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
rc = ksocknal_base_startup();
if (rc != 0)
ksi->ksni_index = ifaces[if_idx].li_index;
if (ifaces[if_idx].li_size == sizeof(struct in6_addr)) {
struct sockaddr_in6 *sa;
+
sa = (void *)&ksi->ksni_addr;
memset(sa, 0, sizeof(*sa));
sa->sin6_family = AF_INET6;
sizeof(struct in6_addr));
} else {
struct sockaddr_in *sa;
+
sa = (void *)&ksi->ksni_addr;
memset(sa, 0, sizeof(*sa));
sa->sin_family = AF_INET;
#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
#define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
-#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
+#define SOCKNAL_INSANITY_RECONN 5000 /* connd trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform. */
+ * no risk if we're not running on a CONFIG_HIGHMEM platform.
+ */
#ifdef CONFIG_HIGHMEM
# define SOCKNAL_RISK_KMAP_DEADLOCK 0
#else
enum ksocklnd_ni_lnd_tunables_attr {
LNET_NET_SOCKLND_TUNABLES_ATTR_UNSPEC = 0,
-
LNET_NET_SOCKLND_TUNABLES_ATTR_CONNS_PER_PEER,
LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TIMEOUT,
LNET_NET_SOCKLND_TUNABLES_ATTR_LND_TOS,
struct ksock_tunables {
/* "stuck" socket timeout (seconds) */
- int *ksnd_timeout;
+ int *ksnd_timeout;
/* # scheduler threads in each pool while starting */
- int *ksnd_nscheds;
- int *ksnd_nconnds; /* # connection daemons */
- int *ksnd_nconnds_max; /* max # connection daemons */
- int *ksnd_min_reconnectms; /* first connection retry after (ms)... */
- int *ksnd_max_reconnectms; /* ...exponentially increasing to this */
- int *ksnd_eager_ack; /* make TCP ack eagerly? */
- int *ksnd_typed_conns; /* drive sockets by type? */
- int *ksnd_min_bulk; /* smallest "large" message */
- int *ksnd_tx_buffer_size; /* socket tx buffer size */
- int *ksnd_rx_buffer_size; /* socket rx buffer size */
- int *ksnd_nagle; /* enable NAGLE? */
- int *ksnd_round_robin; /* round robin for multiple interfaces */
- int *ksnd_keepalive; /* # secs for sending keepalive NOOP */
- int *ksnd_keepalive_idle; /* # idle secs before 1st probe */
- int *ksnd_keepalive_count; /* # probes */
- int *ksnd_keepalive_intvl; /* time between probes */
- int *ksnd_credits; /* # concurrent sends */
- int *ksnd_peertxcredits; /* # concurrent sends to 1 peer_ni */
- int *ksnd_peerrtrcredits; /* # per-peer_ni router buffer credits */
- int *ksnd_peertimeout; /* seconds to consider peer_ni dead */
- int *ksnd_enable_csum; /* enable check sum */
- int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */
- int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */
- unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
- int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
- int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
- int *ksnd_irq_affinity; /* enable IRQ affinity? */
+ int *ksnd_nscheds;
+ int *ksnd_nconnds; /* # connection daemons */
+ int *ksnd_nconnds_max; /* max # connection daemons */
+ int *ksnd_min_reconnectms; /* first connection retry after (ms)... */
+ int *ksnd_max_reconnectms; /* ...exponentially increasing to this */
+ int *ksnd_eager_ack; /* make TCP ack eagerly? */
+ int *ksnd_typed_conns; /* drive sockets by type? */
+ int *ksnd_min_bulk; /* smallest "large" message */
+ int *ksnd_tx_buffer_size; /* socket tx buffer size */
+ int *ksnd_rx_buffer_size; /* socket rx buffer size */
+ int *ksnd_nagle; /* enable NAGLE? */
+ int *ksnd_round_robin; /* round robin for multiple interfaces */
+ int *ksnd_keepalive; /* # secs for sending keepalive NOOP */
+ int *ksnd_keepalive_idle; /* # idle secs before 1st probe */
+ int *ksnd_keepalive_count; /* # probes */
+ int *ksnd_keepalive_intvl; /* time between probes */
+ int *ksnd_credits; /* # concurrent sends */
+ int *ksnd_peertxcredits; /* # concurrent sends to 1 peer_ni */
+ int *ksnd_peerrtrcredits; /* # per-peer_ni router buffer credits */
+ int *ksnd_peertimeout; /* seconds to consider peer_ni dead */
+ int *ksnd_enable_csum; /* enable check sum */
+ int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */
+ int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */
+ unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
+ int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
+ int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
+ int *ksnd_irq_affinity; /* enable IRQ affinity? */
#ifdef SOCKNAL_BACKOFF
- int *ksnd_backoff_init; /* initial TCP backoff */
- int *ksnd_backoff_max; /* maximum TCP backoff */
+ int *ksnd_backoff_init; /* initial TCP backoff */
+ int *ksnd_backoff_max; /* maximum TCP backoff */
#endif
#if SOCKNAL_VERSION_DEBUG
- int *ksnd_protocol; /* protocol version */
+ int *ksnd_protocol; /* protocol version */
#endif
- int *ksnd_conns_per_peer; /* for typed mode, yields:
- * 1 + 2*conns_per_peer total
- * for untyped:
- * conns_per_peer total
- */
+ int *ksnd_conns_per_peer; /* for typed mode, yields:
+ * 1 + 2*conns_per_peer total
+ * for untyped:
+ * conns_per_peer total
+ */
};
struct ksock_net {
/* schedulers information */
struct ksock_sched **ksnd_schedulers;
- atomic_t ksnd_nactive_txs; /* #active txs */
+ atomic_t ksnd_nactive_txs; /* #active txs */
/* conns to close: reaper_lock*/
struct list_head ksnd_deathrow_conns;
/* conns to retry: reaper_lock*/
struct list_head ksnd_enomem_conns;
/* reaper sleeps here */
- wait_queue_head_t ksnd_reaper_waitq;
+ wait_queue_head_t ksnd_reaper_waitq;
/* when reaper will wake */
time64_t ksnd_reaper_waketime;
/* serialise */
- spinlock_t ksnd_reaper_lock;
+ spinlock_t ksnd_reaper_lock;
- int ksnd_enomem_tx; /* test ENOMEM sender */
- int ksnd_stall_tx; /* test sluggish sender */
- int ksnd_stall_rx; /* test sluggish receiver */
+ int ksnd_enomem_tx; /* test ENOMEM sender */
+ int ksnd_stall_tx; /* test sluggish sender */
+ int ksnd_stall_rx; /* test sluggish receiver */
/* incoming connection requests */
struct list_head ksnd_connd_connreqs;
/** time stamp of the last failed connecting attempt */
time64_t ksnd_connd_failed_stamp;
/** # starting connd */
- unsigned ksnd_connd_starting;
+ unsigned int ksnd_connd_starting;
/** time stamp of the last starting connd */
time64_t ksnd_connd_starting_stamp;
/** # running connd */
- unsigned ksnd_connd_running;
+ unsigned int ksnd_connd_running;
/* serialise */
spinlock_t ksnd_connd_lock;
#define KSOCK_NOOP_TX_SIZE ((int)offsetof(struct ksock_tx, tx_payload[0]))
/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to LNET_MAX_IOV frags of payload of either type. */
+ * header, or up to LNET_MAX_IOV frags of payload of either type.
+ */
union ksock_rxiovspace {
struct kvec iov[LNET_MAX_IOV];
struct bio_vec kiov[LNET_MAX_IOV];
/* READER */
/* where I enq waiting input or a forwarding descriptor */
- struct list_head ksnc_rx_list;
+ struct list_head ksnc_rx_list;
time64_t ksnc_rx_deadline; /* when (in seconds) receive times out */
- __u8 ksnc_rx_started; /* started receiving a message */
- __u8 ksnc_rx_ready; /* data ready to read */
- __u8 ksnc_rx_scheduled;/* being progressed */
- __u8 ksnc_rx_state; /* what is being read */
- int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- int ksnc_rx_nob_wanted; /* bytes actually wanted */
- int ksnc_rx_niov; /* # kvec frags */
- struct kvec *ksnc_rx_iov; /* the kvec frags */
- int ksnc_rx_nkiov; /* # page frags */
- struct bio_vec *ksnc_rx_kiov; /* the page frags */
- union ksock_rxiovspace ksnc_rx_iov_space;/* space for frag descriptors */
- __u32 ksnc_rx_csum; /* partial checksum for incoming
- * data */
- struct lnet_msg *ksnc_lnet_msg; /* rx lnet_finalize arg*/
+ __u8 ksnc_rx_started; /* started receiving a msg */
+ __u8 ksnc_rx_ready; /* data ready to read */
+ __u8 ksnc_rx_scheduled;/* being progressed */
+ __u8 ksnc_rx_state; /* what is being read */
+ int ksnc_rx_nob_left; /* # bytes to next hdr/body */
+ int ksnc_rx_nob_wanted; /* bytes actually wanted */
+ int ksnc_rx_niov; /* # kvec frags */
+ struct kvec *ksnc_rx_iov; /* the kvec frags */
+ int ksnc_rx_nkiov; /* # page frags */
+ struct bio_vec *ksnc_rx_kiov; /* the page frags */
+ union ksock_rxiovspace ksnc_rx_iov_space;/* for frag descriptors */
+ __u32 ksnc_rx_csum; /* partial checksum for
+ * incoming data */
+ struct lnet_msg *ksnc_lnet_msg; /* rx lnet_finalize arg */
struct ksock_msg ksnc_msg; /* incoming message buffer:
* V2.x message takes the
* whole struct
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
struct ksock_proto {
- int pro_version; /* version number of protocol */
- int (*pro_send_hello)(struct ksock_conn *, struct ksock_hello_msg *); /* handshake function */
- int (*pro_recv_hello)(struct ksock_conn *, struct ksock_hello_msg *, int);/* handshake function */
- void (*pro_pack)(struct ksock_tx *); /* message pack */
- void (*pro_unpack)(struct ksock_msg *, struct lnet_hdr *); /* message unpack */
- struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *); /* queue tx on the connection */
- int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64); /* queue ZC ack on the connection */
- int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int); /* handle ZC request */
- int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64); /* handle ZC ACK */
- int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int); /* msg type matches the connection type:
- * return value:
- * return MATCH_NO : no
- * return MATCH_YES : matching type
- * return MATCH_MAY : can be backup */
+ int pro_version; /* version number of protocol */
+ /* handshake function */
+ int (*pro_send_hello)(struct ksock_conn *conn,
+ struct ksock_hello_msg *hello);
+ /* handshake function */
+ int (*pro_recv_hello)(struct ksock_conn *conn,
+ struct ksock_hello_msg *hello, int timeout);
+ void (*pro_pack)(struct ksock_tx *tx);
+ /* message unpack */
+ void (*pro_unpack)(struct ksock_msg *tx_msg, struct lnet_hdr *hdr);
+ /* queue tx on the connection */
+ struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *conn,
+ struct ksock_tx *tx);
+ /* queue ZC ack on the connection */
+ int (*pro_queue_tx_zcack)(struct ksock_conn *conn, struct ksock_tx *tx,
+ __u64 cookie);
+ /* handle ZC request */
+ int (*pro_handle_zcreq)(struct ksock_conn *conn, __u64 cookie,
+ int noblock);
+ /* handle ZC ACK */
+ int (*pro_handle_zcack)(struct ksock_conn *conn, __u64 cookie,
+ __u64 noblock);
+ /* msg type matches the connection type:
+ * return value:
+ * return MATCH_NO : no
+ * return MATCH_YES : matching type
+ * return MATCH_MAY : can be backup
+ */
+ int (*pro_match_tx)(struct ksock_conn *conn, struct ksock_tx *tx,
+ int noblock);
};
extern const struct ksock_proto ksocknal_protocol_v1x;
static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
{
-#if 1
return crc32_le(crc, p, len);
-#else
- while (len-- > 0)
- crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
-
- return crc;
-#endif
}
static inline int
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- return (rc);
+ return rc;
}
static inline void
ksocknal_connsock_decref(struct ksock_conn *conn)
{
if (refcount_dec_and_test(&conn->ksnc_sock_refcount)) {
- LASSERT (conn->ksnc_closing);
+ LASSERT(conn->ksnc_closing);
sock_release(conn->ksnc_sock);
conn->ksnc_sock = NULL;
ksocknal_finalize_zcreq(conn);
}
extern void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
-extern void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx, int error);
+extern void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx,
+ int error);
static inline void
ksocknal_tx_decref(struct ksock_tx *tx)
int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov,
- struct bio_vec *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
+ int delayed, unsigned int niov, struct bio_vec *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
int ksocknal_accept(struct lnet_ni *ni, struct socket *sock);
unsigned int ksocknal_get_conn_count_by_type(struct ksock_conn_cb *conn_cb,