spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
+ if (!list_empty (&(ks_data.ksnd_freetsdus))) {
LASSERT(ks_data.ksnd_nfreetsdus > 0);
- KsTsdu = cfs_list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
- cfs_list_del(&(KsTsdu->Link));
+ KsTsdu = list_entry(ks_data.ksnd_freetsdus.next, KS_TSDU, Link);
+ list_del(&(KsTsdu->Link));
ks_data.ksnd_nfreetsdus--;
} else {
- KsTsdu = (PKS_TSDU) cfs_mem_cache_alloc(
+ KsTsdu = (PKS_TSDU) kmem_cache_alloc(
ks_data.ksnd_tsdu_slab, 0);
}
PKS_TSDU KsTsdu
)
{
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
if (ks_data.ksnd_nfreetsdus > 128) {
KsFreeKsTsdu(KsTsdu);
} else {
- cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
ks_data.ksnd_nfreetsdus++;
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
*Length = 0;
- cfs_list_for_each_entry_typed(KsTsdu,
- &TsduMgr->TsduList,KS_TSDU, Link) {
+ list_for_each_entry(KsTsdu, &TsduMgr->TsduList, Link) {
ULONG start = 0;
LASSERT(TsduMgr->TotalBytes >= length);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
ULONG start = 0;
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
start = KsTsdu->StartOffset;
if (KsTsdu->StartOffset >= KsTsdu->LastOffset) {
/* remove KsTsdu from list */
- cfs_list_del(&KsTsdu->Link);
+ list_del(&KsTsdu->Link);
TsduMgr->NumOfTsdu--;
KsPutKsTsdu(KsTsdu);
}
/* retrieve the latest Tsdu buffer form TsduMgr
list if the list is not empty. */
- if (cfs_list_empty(&(TsduMgr->TsduList))) {
+ if (list_empty(&(TsduMgr->TsduList))) {
LASSERT(TsduMgr->NumOfTsdu == 0);
KsTsdu = NULL;
} else {
LASSERT(TsduMgr->NumOfTsdu > 0);
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.prev, KS_TSDU, Link);
/* if this Tsdu does not contain enough space, we need
allocate a new Tsdu queue. */
if (NULL == KsTsdu) {
KsTsdu = KsAllocateKsTsdu();
if (NULL != KsTsdu) {
- cfs_list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add_tail(&(KsTsdu->Link), &(TsduMgr->TsduList));
TsduMgr->NumOfTsdu++;
}
}
} else {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
/* remove the KsTsdu from TsduMgr list to release the lock */
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
while (length > BytesRecved) {
KsTsdu = NULL;
} else {
TsduMgr->NumOfTsdu++;
- cfs_list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
+ list_add(&(KsTsdu->Link), &(TsduMgr->TsduList));
}
}
FALSE
);
- CFS_INIT_LIST_HEAD(
+ INIT_LIST_HEAD(
&(TsduMgr->TsduList)
);
KsRemoveTdiEngine(TsduMgr);
KeSetEvent(&(TsduMgr->Event), 0, FALSE);
- while (!cfs_list_empty(&TsduMgr->TsduList)) {
+ while (!list_empty(&TsduMgr->TsduList)) {
- KsTsdu = cfs_list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
+ KsTsdu = list_entry(TsduMgr->TsduList.next, KS_TSDU, Link);
LASSERT(KsTsdu->Magic == KS_TSDU_MAGIC);
if (KsTsdu->StartOffset == KsTsdu->LastOffset) {
// KsTsdu is empty now, we need free it ...
//
- cfs_list_del(&(KsTsdu->Link));
+ list_del(&(KsTsdu->Link));
TsduMgr->NumOfTsdu--;
KsFreeKsTsdu(KsTsdu);
list = RemoveHeadList(&ks_data.ksnd_addrs_list);
slot = CONTAINING_RECORD(list, ks_addr_slot_t, link);
- cfs_free(slot);
+ kfree(slot);
ks_data.ksnd_naddrs--;
}
return;
}
- slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
+ slot = kmalloc(sizeof(ks_addr_slot_t) + DeviceName->Length, __GFP_ZERO);
if (slot != NULL) {
spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
LASSERT(parent->kstc_type == kstt_listener);
LASSERT(parent->kstc_state == ksts_listening);
- if (cfs_list_empty(&(parent->listener.kstc_listening.list))) {
+ if (list_empty(&(parent->listener.kstc_listening.list))) {
child = NULL;
} else {
- cfs_list_t * tmp;
+ struct list_head * tmp;
/* check the listening queue and try to get a free connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ child = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
/* free the Context structure... */
ASSERT(Context->Magic == KS_TCP_CONTEXT_MAGIC);
Context->Magic = 'CDAB';
- cfs_free(Context);
+ kfree(Context);
}
/* free the Irp */
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* free the Irp structure */
/* there's still data in tdi internal queue, we need issue a new
Irp to receive all of them. first allocate the tcp context */
- context = cfs_alloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
+ context = kmalloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
ks_abort_tconn(tconn);
ks_tconn_t * tconn = NULL;
/* allocate ksoc_tconn_t from the slab cache memory */
- tconn = (ks_tconn_t *)cfs_mem_cache_alloc(
- ks_data.ksnd_tconn_slab, CFS_ALLOC_ZERO);
+ tconn = (ks_tconn_t *)kmem_cache_alloc(
+ ks_data.ksnd_tconn_slab, __GFP_ZERO);
if (tconn) {
/* attach it into global list in ks_data */
- cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
+ list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
spin_unlock(&(ks_data.ksnd_tconn_lock));
void
ks_free_tconn(ks_tconn_t * tconn)
{
- LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
+ LASSERT(atomic_read(&(tconn->kstc_refcount)) == 0);
spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
- cfs_list_del(&tconn->kstc_list);
+ list_del(&tconn->kstc_list);
ks_data.ksnd_ntconns--;
/* if this is the last tconn, it would be safe for
spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
- cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
+ kmem_cache_free(ks_data.ksnd_tconn_slab, tconn);
KsPrint((3, "ks_free_tconn: tconn %p is freed.\n", tconn));
}
RtlInitUnicodeString(&(tconn->kstc_dev), TCP_DEVICE_NAME);
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
- CFS_INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_listening.list));
+ INIT_LIST_HEAD(&(tconn->listener.kstc_accepted.list));
cfs_init_event( &(tconn->listener.kstc_accept_event),
TRUE,
ks_tconn_t * tconn
)
{
- cfs_atomic_inc(&(tconn->kstc_refcount));
+ atomic_inc(&(tconn->kstc_refcount));
}
/*
ks_tconn_t *tconn
)
{
- if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
+ if (atomic_dec_and_test(&(tconn->kstc_refcount))) {
spin_lock(&(tconn->kstc_lock));
if (tconn->child.kstc_queued) {
- cfs_list_del(&(tconn->child.kstc_link));
+ list_del(&(tconn->child.kstc_link));
if (tconn->child.kstc_queueno) {
if (!engs->queued) {
spin_lock(&engm->lock);
if (!engs->queued) {
- cfs_list_add_tail(&engs->link, &engm->list);
+ list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tconn = tconn;
engs->emgr = engm;
LASSERT(engm != NULL);
spin_lock(&engm->lock);
if (engs->queued) {
- cfs_list_del(&engs->link);
+ list_del(&engs->link);
engs->queued = FALSE;
engs->tconn = NULL;
engs->emgr = NULL;
length = KsQueryMdlsSize(mdl);
/* we need allocate the ks_tx_t structure from memory pool. */
- context = cfs_alloc(sizeof(ks_tdi_tx_t), 0);
+ context = kmalloc(sizeof(ks_tdi_tx_t), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* here need free the Irp. */
tflags = TDI_SEND_NON_BLOCKING;
}
- if (cfs_list_empty(&TsduMgr->TsduList)) {
+ if (list_empty(&TsduMgr->TsduList)) {
LASSERT(TsduMgr->TotalBytes == 0);
ks_unlock_tsdumgr(TsduMgr);
goto errorout;
{
ks_engine_mgr_t * engm = context;
ks_engine_slot_t * engs;
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * tconn;
cfs_set_thread_priority(31);
cfs_wait_event_internal(&engm->start, 0);
spin_lock(&engm->lock);
- if (cfs_list_empty(&engm->list)) {
+ if (list_empty(&engm->list)) {
spin_unlock(&engm->lock);
continue;
}
list = engm->list.next;
- cfs_list_del(list);
- engs = cfs_list_entry(list, ks_engine_slot_t, link);
+ list_del(list);
+ engs = list_entry(list, ks_engine_slot_t, link);
LASSERT(engs->emgr == engm);
LASSERT(engs->queued);
engs->emgr = NULL;
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
spin_lock_init(&ks_data.ksnd_tconn_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
+ INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
- ks_data.ksnd_tconn_slab = cfs_mem_cache_create(
- "tcon", sizeof(ks_tconn_t) , 0, 0);
+ ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
+ 0, 0, NULL);
if (!ks_data.ksnd_tconn_slab) {
rc = -ENOMEM;
/* initialize tsdu related globals */
spin_lock_init(&ks_data.ksnd_tsdu_lock);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
+ INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
- ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
- "tsdu", ks_data.ksnd_tsdu_size, 0, 0);
+ ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
+ 0, 0, NULL);
if (!ks_data.ksnd_tsdu_slab) {
rc = -ENOMEM;
}
/* initialize engine threads list */
- ks_data.ksnd_engine_nums = cfs_num_online_cpus();
+ ks_data.ksnd_engine_nums = num_online_cpus();
if (ks_data.ksnd_engine_nums < 4) {
ks_data.ksnd_engine_nums = 4;
}
- ks_data.ksnd_engine_mgr = cfs_alloc(sizeof(ks_engine_mgr_t) *
- ks_data.ksnd_engine_nums,CFS_ALLOC_ZERO);
+ ks_data.ksnd_engine_mgr = kmalloc(sizeof(ks_engine_mgr_t) *
+ ks_data.ksnd_engine_nums, __GFP_ZERO);
if (ks_data.ksnd_engine_mgr == NULL) {
rc = -ENOMEM;
goto errorout;
}
for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
- cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
- cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
- CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
- cfs_create_thread(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], 0);
+ cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
+ cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
+ INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
+ kthread_run(KsDeliveryEngineThread, &ks_data.ksnd_engine_mgr[i], "");
}
/* register pnp handlers to watch network condition */
/* do cleanup in case we get failures */
if (rc < 0) {
if (ks_data.ksnd_tconn_slab) {
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
}
}
ks_fini_tdi_data()
{
PKS_TSDU KsTsdu = NULL;
- cfs_list_t * list = NULL;
+ struct list_head * list = NULL;
int i;
/* clean up the pnp handler and address slots */
/* we need wait until all the tconn are freed */
spin_lock(&(ks_data.ksnd_tconn_lock));
- if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
+ if (list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
spin_unlock(&(ks_data.ksnd_tconn_lock));
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
/* it's safe to delete the tconn slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
spin_lock(&(ks_data.ksnd_tsdu_lock));
- cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
- KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
+ list_for_each (list, &ks_data.ksnd_freetsdus) {
+ KsTsdu = list_entry (list, KS_TSDU, Link);
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
+kmem_cache_destroy(ks_data.ksnd_tsdu_slab);
ks_data.ksnd_tsdu_slab = NULL;
/* good! it's smooth to do the cleaning up...*/
if (backlog) {
spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
- cfs_list_add( &backlog->child.kstc_link,
+ list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
void
ks_stop_listen(ks_tconn_t *tconn)
{
- cfs_list_t * list;
+ struct list_head * list;
ks_tconn_t * backlog;
/* reset all tdi event callbacks to NULL */
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* cleanup all the listening backlog child connections */
- cfs_list_for_each (list, &(tconn->listener.kstc_listening.list)) {
- backlog = cfs_list_entry(list, ks_tconn_t, child.kstc_link);
+ list_for_each (list, &(tconn->listener.kstc_listening.list)) {
+ backlog = list_entry(list, ks_tconn_t, child.kstc_link);
/* destory and free it */
ks_put_tconn(backlog);
ks_tconn_t ** child
)
{
- cfs_list_t * tmp;
+ struct list_head * tmp;
ks_tconn_t * backlog = NULL;
ks_replenish_backlogs(parent, parent->listener.nbacklog);
/* check the listening queue and try to search the accepted connecton */
- cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
- backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
+ list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
+ backlog = list_entry (tmp, ks_tconn_t, child.kstc_link);
spin_lock(&(backlog->kstc_lock));
LASSERT(backlog->kstc_state == ksts_connected);
LASSERT(backlog->child.kstc_busy);
- cfs_list_del(&(backlog->child.kstc_link));
- cfs_list_add(&(backlog->child.kstc_link),
+ list_del(&(backlog->child.kstc_link));
+ list_add(&(backlog->child.kstc_link),
&(parent->listener.kstc_accepted.list));
parent->listener.kstc_accepted.num++;
parent->listener.kstc_listening.num--;
spin_lock(&ks_data.ksnd_addrs_lock);
- *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
+ *names = kmalloc(sizeof(char *) * ks_data.ksnd_naddrs, __GFP_ZERO);
if (*names == NULL) {
goto errorout;
}
void libcfs_ipif_free_enumeration(char **names, int n)
{
if (names) {
- cfs_free(names);
+ kfree(names);
}
}