list_add_tail (<x->ltx_tx.tx_list, ltx->ltx_idle);
/* normal tx desc => wakeup anyone blocking for one */
- if (ltx->ltx_idle == &ksocknal_data.ksnd_idle_ltx_list &&
- waitqueue_active (&ksocknal_data.ksnd_idle_ltx_waitq))
+ if (ltx->ltx_idle == &ksocknal_data.ksnd_idle_ltx_list)
wake_up (&ksocknal_data.ksnd_idle_ltx_waitq);
spin_unlock_irqrestore (&ksocknal_data.ksnd_idle_ltx_lock, flags);
spin_lock_irqsave (&sched->kss_lock, flags);
list_add_tail (&tx->tx_list, &sched->kss_zctxdone_list);
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ wake_up (&sched->kss_waitq);
spin_unlock_irqrestore (&sched->kss_lock, flags);
EXIT;
list_add_tail (&route->ksnr_connect_list,
&ksocknal_data.ksnd_autoconnectd_routes);
-
- if (waitqueue_active (&ksocknal_data.ksnd_autoconnectd_waitq))
- wake_up (&ksocknal_data.ksnd_autoconnectd_waitq);
+ wake_up (&ksocknal_data.ksnd_autoconnectd_waitq);
spin_unlock_irqrestore (&ksocknal_data.ksnd_autoconnectd_lock, flags);
}
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ wake_up (&sched->kss_waitq);
}
spin_unlock_irqrestore (&sched->kss_lock, flags);
spin_lock_irqsave (&sched->kss_lock, flags);
list_add_tail (&conn->ksnc_rx_list, &sched->kss_rx_conns);
-
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ wake_up (&sched->kss_waitq);
spin_unlock_irqrestore (&sched->kss_lock, flags);
}
/* extra ref for scheduler */
atomic_inc (&conn->ksnc_refcount);
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ wake_up (&sched->kss_waitq);
}
spin_unlock_irqrestore (&sched->kss_lock, flags);
/* extra ref for scheduler */
atomic_inc (&conn->ksnc_refcount);
- if (waitqueue_active (&sched->kss_waitq))
- wake_up (&sched->kss_waitq);
+ wake_up (&sched->kss_waitq);
}
spin_unlock_irqrestore (&sched->kss_lock, flags);