if (tx == NULL)
return NULL;
- atomic_set(&tx->tx_refcount, 1);
+ refcount_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
}
- LASSERT(atomic_read(&tx->tx_refcount) == 1);
+ LASSERT(refcount_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx, error);
}
}
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
- CWARN("%u ENOMEM tx %p (%u allocated)\n",
- counter, conn, atomic_read(&libcfs_kmemory));
+ CWARN("%u ENOMEM tx %p (%lld allocated)\n",
+ counter, conn, libcfs_kmem_read());
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (--ksocknal_data.ksnd_nthreads == 0)
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
wake_up_var(&ksocknal_data.ksnd_nthreads);
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
struct lnet_process_id *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(refcount_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */