if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ refcount_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
tx->tx_lnetmsg = NULL;
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
- tx->tx_iov = &tx->tx_hdr;
tx->tx_niov = 1;
tx->tx_nonblk = nonblk;
}
static int
-ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx,
+ksocknal_send_hdr(struct ksock_conn *conn, struct ksock_tx *tx,
struct kvec *scratch_iov)
{
- struct kvec *iov = tx->tx_iov;
+ struct kvec *iov = &tx->tx_hdr;
int nob;
int rc;
LASSERT(tx->tx_niov > 0);
- /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
- rc = ksocknal_lib_send_iov(conn, tx, scratch_iov);
+ /* Never touch tx->tx_hdr inside ksocknal_lib_send_hdr() */
+ rc = ksocknal_lib_send_hdr(conn, tx, scratch_iov);
if (rc <= 0) /* sent nothing? */
return rc;
tx->tx_resid -= nob;
/* "consume" iov */
- do {
- LASSERT(tx->tx_niov > 0);
+ LASSERT(tx->tx_niov == 1);
- if (nob < (int) iov->iov_len) {
- iov->iov_base += nob;
- iov->iov_len -= nob;
- return rc;
- }
+ if (nob < (int) iov->iov_len) {
+ iov->iov_base += nob;
+ iov->iov_len -= nob;
+ return rc;
+ }
- nob -= iov->iov_len;
- tx->tx_iov = ++iov;
- tx->tx_niov--;
- } while (nob != 0);
+ LASSERT(nob == iov->iov_len);
+ tx->tx_niov--;
return rc;
}
ksocknal_data.ksnd_enomem_tx--;
rc = -EAGAIN;
} else if (tx->tx_niov != 0) {
- rc = ksocknal_send_iov(conn, tx, scratch_iov);
+ rc = ksocknal_send_hdr(conn, tx, scratch_iov);
} else {
rc = ksocknal_send_kiov(conn, tx, scratch_iov);
}
/* allocated send buffer bytes < computed; infer
* something got ACKed */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
}
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
+ LASSERT(refcount_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
/* ZC_REQ is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ CWARN("%u ENOMEM tx %p (%lld allocated)\n",
+ counter, conn, libcfs_kmem_read());
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_tx_prep(conn, tx);
- /* Ensure the frags we've been given EXACTLY match the number of
- * bytes we want to send. Many TCP/IP stacks disregard any total
+ /* Ensure the frags we've been given EXACTLY match the number of
+ * bytes we want to send. Many TCP/IP stacks disregard any total
* size parameters passed to them and just look at the frags.
- *
- * We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header. */
- LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
- lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
- (unsigned int)tx->tx_nob);
- LASSERT (tx->tx_niov >= 1);
- LASSERT (tx->tx_resid == tx->tx_nob);
+ *
+ * We always expect at least 1 mapped fragment containing the
+ * complete ksocknal message header.
+ */
+ LASSERT(lnet_iov_nob(tx->tx_niov, &tx->tx_hdr) +
+ lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
+ (unsigned int)tx->tx_nob);
+ LASSERT(tx->tx_niov >= 1);
+ LASSERT(tx->tx_resid == tx->tx_nob);
CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = 0;
ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
tx->tx_lnetmsg = lntmsg;
tx->tx_niov = 1;
- tx->tx_iov = &tx->tx_hdr;
tx->tx_kiov = tx->tx_payload;
tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
payload_niov, payload_kiov,
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (--ksocknal_data.ksnd_nthreads == 0)
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
wake_up_var(&ksocknal_data.ksnd_nthreads);
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
struct lnet_process_id *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
- int nloops = 0;
long id = (long)arg;
struct page **rx_scratch_pgs;
struct kvec *scratch_iov;
did_something = 1;
}
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ if (!did_something || /* nothing to do */
+ need_resched()) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
- nloops = 0;
-
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
- timeout = active ? lnet_get_lnd_timeout() :
+ timeout = active ? ksocknal_timeout() :
lnet_acceptor_timeout();
rc = lnet_sock_read(sock, &hello->kshm_magic,
int retry_later = 0;
int rc = 0;
- deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
+ deadline = ktime_get_seconds() + ksocknal_timeout();
write_lock_bh(&ksocknal_data.ksnd_global_lock);
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
- int nloops = 0;
int cons_retry = 0;
init_waitqueue_entry(&wait, current);
}
if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
+ if (!need_resched())
continue;
spin_unlock_bh(connd_lock);
- nloops = 0;
cond_resched();
spin_lock_bh(connd_lock);
continue;
add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
- nloops = 0;
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
* times the timeout interval.
*/
- lnd_timeout = lnet_get_lnd_timeout();
+ lnd_timeout = ksocknal_timeout();
if (lnd_timeout > n * p)
chunk = (chunk * n * p) / lnd_timeout;
if (chunk == 0)