spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
+ if (!list_empty (&(ks_data.ksnd_freetsdus))) {
LASSERT(ks_data.ksnd_nfreetsdus > 0);
- KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
- cfs_list_del(&(KsTsdu->Link));
+ KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
+ list_del(&(KsTsdu->Link));
ks_data.ksnd_nfreetsdus--;
} else {
if (ks_data.ksnd_nfreetsdus > 128) {
KsFreeKsTsdu(KsTsdu);
} else {
- cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
ks_data.ksnd_nfreetsdus++;
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
*Length = 0;
- cfs_list_for_each_entry_typed(KsTsdu,
- &TsduMgr->TsduList,KS_TSDU, Link) {
+ list_for_each_entry(KsTsdu, &TsduMgr->TsduList, Link) {
ULONG start = 0;
LASSERT(TsduMgr->TotalBytes >= length);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
ULONG start = 0;
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
start = KsTsdu->StartOffset;
if (KsTsdu->StartOffset >= KsTsdu->LastOffset) {
/* remove KsTsdu from list */
- cfs_list_del(&KsTsdu->Link);
+ list_del(&KsTsdu->Link);
TsduMgr->NumOfTsdu--;
KsPutKsTsdu(KsTsdu);
}
/* retrieve the latest Tsdu buffer form TsduMgr
list if the list is not empty. */
- if (cfs_list_empty(&(TsduMgr->TsduList))) {
+ if (list_empty(&(TsduMgr->TsduList))) {
LASSERT(TsduMgr->NumOfTsdu == 0);
KsTsdu = NULL;
} else {
LASSERT(TsduMgr->NumOfTsdu > 0);
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
/* if this Tsdu does not contain enough space, we need
allocate a new Tsdu queue. */
if (NULL == KsTsdu) {
KsTsdu = KsAllocateKsTsdu();
if (NULL != KsTsdu) {
- cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
TsduMgr->NumOfTsdu++;
}
}
} else {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
/* remove the KsTsdu from TsduMgr list to release the lock */
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
while (length > BytesRecved) {
KsTsdu = NULL;
} else {
TsduMgr->NumOfTsdu++;
- cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
}
}
FALSE
);
- CFS_INIT_LIST_HEAD(
+ INIT_LIST_HEAD(
&(TsduMgr->TsduList)
);
KsRemoveTdiEngine(TsduMgr);
KeSetEvent(&(TsduMgr->Event), 0, FALSE);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
if (KsTsdu->StartOffset == KsTsdu->LastOffset) {
// KsTsdu is empty now, we need free it ...
//
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
KsFreeKsTsdu(KsTsdu);
LASSERT(parent->kstc_type == kstt_listener);
LASSERT(parent->kstc_state == ksts_listening);
- if (cfs_list_empty(&(parent->listener.kstc_listening.list))) {
+ if (list_empty(&(parent->listener.kstc_listening.list))) {
child = NULL;
} else {
- cfs_list_t * tmp;
+ struct list_head * tmp;
/* check the listening queue and try to get a free connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ child = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
/* attach it into global list in ks_data */
- cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
+ list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
spin_unlock(&(ks_data.ksnd_tconn_lock));
spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
- cfs_list_del(&tconn->kstc_list);
+ list_del(&tconn->kstc_list);
ks_data.ksnd_ntconns--;
/* if this is the last tconn, it would be safe for
RtlInitUnicodeString(&(tconn->kstc_dev), TCP_DEVICE_NAME);
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
cfs_init_event( &(tconn->listener.kstc_accept_event),
TRUE,
if (tconn->child.kstc_queued) {
- cfs_list_del(&(tconn->child.kstc_link));
+ list_del(&(tconn->child.kstc_link));
if (tconn->child.kstc_queueno) {
if (!engs->queued) {
spin_lock(&engm->lock);
if (!engs->queued) {
- cfs_list_add_tail(&engs->link, &engm->list);
+ list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tconn = tconn;
engs->emgr = engm;
LASSERT(engm != NULL);
spin_lock(&engm->lock);
if (engs->queued) {
- cfs_list_del(&engs->link);
+ list_del(&engs->link);
engs->queued = FALSE;
engs->tconn = NULL;
engs->emgr = NULL;
tflags = TDI_SEND_NON_BLOCKING;
}
- if (cfs_list_empty(&TsduMgr->TsduList)) {
+ if (list_empty(&TsduMgr->TsduList)) {
LASSERT(TsduMgr->TotalBytes == 0);
ks_unlock_tsdumgr(TsduMgr);
goto errorout;
{
ks_engine_mgr_t * engm = context;
ks_engine_slot_t * engs;
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * tconn;
cfs_set_thread_priority(31);
cfs_wait_event_internal(&engm->start, 0);
spin_lock(&engm->lock);
- if (cfs_list_empty(&engm->list)) {
+ if (list_empty(&engm->list)) {
spin_unlock(&engm->lock);
continue;
}
list = engm->list.next;
- cfs_list_del(list);
- engs = cfs_list_entry(list, ks_engine_slot_t, link);
+ list_del(list);
+ engs = list_entry(list, ks_engine_slot_t, link);
LASSERT(engs->emgr == engm);
LASSERT(engs->queued);
engs->emgr = NULL;
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
spin_lock_init(&ks_data.ksnd_tconn_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
+ INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
/* initialize tsdu related globals */
spin_lock_init(&ks_data.ksnd_tsdu_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
+ INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
0, 0, NULL);
spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
+ INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
kthread_run(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], "");
}
ks_fini_tdi_data()
{
PKS_TSDU KsTsdu = NULL;
- cfs_list_t * list = NULL;
+ struct list_head * list = NULL;
int i;
/* clean up the pnp handler and address slots */
/* we need wait until all the tconn are freed */
spin_lock(&(ks_data.ksnd_tconn_lock));
- if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
+ if (list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
spin_unlock(&(ks_data.ksnd_tconn_lock));
/* clean up all the tsud buffers in the free list */
spin_lock(&(ks_data.ksnd_tsdu_lock));
- cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
- KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
+ list_for_each (list, &ks_data.ksnd_freetsdus) {
+ KsTsdu = list_entry (list, KS_TSDU, Link);
kmem_cache_free(
ks_data.ksnd_tsdu_slab,
if (backlog) {
spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
- cfs_list_add( &backlog->child.kstc_link,
+ list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
void
ks_stop_listen(ks_tconn_t *tconn)
{
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * backlog;
/* reset all tdi event callbacks to NULL */
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* cleanup all the listening backlog child connections */
- cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) {
- backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link);
+ list_for_each (list, &(tconn->listener.kstc_listening.list)) {
+ backlog = list_entry(list, ks_tconn_t, child.kstc_link);
/* destory and free it */
ks_put_tconn(backlog);
ks_tconn_t ** child
)
{
- cfs_list_t * tmp;
+ struct list_head * tmp;
ks_tconn_t * backlog = NULL;
ks_replenish_backlogs(parent, parent->listener.nbacklog);
/* check the listening queue and try to search the accepted connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ backlog = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(backlog->kstc_lock));
LASSERT(backlog->kstc_state == ksts_connected);
LASSERT(backlog->child.kstc_busy);
- cfs_list_del(&(backlog->child.kstc_link));
- cfs_list_add(&(backlog->child.kstc_link),
+ list_del(&(backlog->child.kstc_link));
+ list_add(&(backlog->child.kstc_link),
&(parent->listener.kstc_accepted.list));
parent->listener.kstc_accepted.num++;
parent->listener.kstc_listening.num--;