}
wait_var_event_warning(&ksocknal_data.ksnd_nthreads,
- ksocknal_data.ksnd_nthreads == 0,
+ atomic_read(&ksocknal_data.ksnd_nthreads) == 0,
"waiting for %d threads to terminate\n",
- ksocknal_data.ksnd_nthreads);
+ atomic_read(&ksocknal_data.ksnd_nthreads));
ksocknal_free_buffers();
/* hash table of all my known peers */
DECLARE_HASHTABLE(ksnd_peers, SOCKNAL_PEER_HASH_BITS);
- int ksnd_nthreads; /* # live threads */
+ atomic_t ksnd_nthreads; /* # live threads */
int ksnd_shuttingdown; /* tell threads to exit */
/* schedulers information */
struct ksock_sched **ksnd_schedulers;
if (IS_ERR(task))
return PTR_ERR(task);
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- ksocknal_data.ksnd_nthreads++;
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ atomic_inc(&ksocknal_data.ksnd_nthreads);
return 0;
}
void
ksocknal_thread_fini (void)
{
- write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (--ksocknal_data.ksnd_nthreads == 0)
+ if (atomic_dec_and_test(&ksocknal_data.ksnd_nthreads))
wake_up_var(&ksocknal_data.ksnd_nthreads);
- write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int