if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ refcount_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
/* allocated send buffer bytes < computed; infer
* something got ACKed */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = bufnob;
smp_mb();
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_rx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
smp_mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
}
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
+ LASSERT(refcount_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
/* ZC_REQ is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ CWARN("%u ENOMEM tx %p (%lld allocated)\n",
+ counter, conn, libcfs_kmem_read());
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = ktime_get_seconds();
conn->ksnc_tx_bufnob = 0;
ksocknal_find_connecting_route_locked (peer_ni) != NULL) {
/* the message is going to be pinned to the peer_ni */
tx->tx_deadline = ktime_get_seconds() +
- lnet_get_lnd_timeout();
+ ksocknal_timeout();
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer_ni->ksnp_tx_queue);
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (--ksocknal_data.ksnd_nthreads == 0)
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
wake_up_var(&ksocknal_data.ksnd_nthreads);
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
struct lnet_process_id *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
- int nloops = 0;
long id = (long)arg;
struct page **rx_scratch_pgs;
struct kvec *scratch_iov;
did_something = 1;
}
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ if (!did_something || /* nothing to do */
+ need_resched()) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
- nloops = 0;
-
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
- timeout = active ? lnet_get_lnd_timeout() :
+ timeout = active ? ksocknal_timeout() :
lnet_acceptor_timeout();
rc = lnet_sock_read(sock, &hello->kshm_magic,
int retry_later = 0;
int rc = 0;
- deadline = ktime_get_seconds() + lnet_get_lnd_timeout();
+ deadline = ktime_get_seconds() + ksocknal_timeout();
write_lock_bh(&ksocknal_data.ksnd_global_lock);
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
- int nloops = 0;
int cons_retry = 0;
init_waitqueue_entry(&wait, current);
}
if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
+ if (!need_resched())
continue;
spin_unlock_bh(connd_lock);
- nloops = 0;
cond_resched();
spin_lock_bh(connd_lock);
continue;
add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
- nloops = 0;
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
* times the timeout interval.
*/
- lnd_timeout = lnet_get_lnd_timeout();
+ lnd_timeout = ksocknal_timeout();
if (lnd_timeout > n * p)
chunk = (chunk * n * p) / lnd_timeout;
if (chunk == 0)