struct trace_page *tage;
page = cfs_alloc_page(gfp);
- if (page != NULL) {
- tage = cfs_alloc(sizeof *tage, gfp);
- if (tage == NULL)
- cfs_free_page(page);
- tage->page = page;
- } else
- tage = NULL;
+ if (page == NULL)
+ return NULL;
+
+ tage = cfs_alloc(sizeof(*tage), gfp);
+ if (tage == NULL) {
+ cfs_free_page(page);
+ return NULL;
+ }
+
+ tage->page = page;
return tage;
}
static void tage_free(struct trace_page *tage)
{
LASSERT(tage != NULL);
+ LASSERT(tage->page != NULL);
- if (tage->page != NULL)
- cfs_free_page(tage->page);
+ cfs_free_page(tage->page);
cfs_free(tage);
}
static int tage_invariant(struct trace_page *tage)
{
- return
- tage != NULL &&
+ return (tage != NULL &&
+ tage->page != NULL &&
tage->used <= CFS_PAGE_SIZE &&
- cfs_page_count(tage->page) > 0;
+ cfs_page_count(tage->page) > 0);
}
/* return a page that has 'len' bytes left at the end */
* to using the last page in the ring buffer. */
goto ring_buffer;
}
+
tage->used = 0;
tage->cpu = smp_processor_id();
list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages--;
}
put_pages_on_daemon_list_on_cpu(&pc);
+
+ LASSERT(!list_empty(&tcd->tcd_pages));
}
- LASSERT(!list_empty(&tcd->tcd_pages));
+
+ if (list_empty(&tcd->tcd_pages))
+ return NULL;
tage = tage_from_list(tcd->tcd_pages.next);
tage->used = 0;
tcd = trace_get_tcd(flags);
spin_lock(&pc->pc_lock);
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ list_splice(&tcd->tcd_pages, &pc->pc_pages);
+ CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
+ list_splice(&tcd->tcd_daemon_pages, &pc->pc_pages);
+ CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
tcd->tcd_cur_daemon_pages = 0;
}
spin_unlock(&pc->pc_lock);
}
}
CFS_MMSPACE_CLOSE;
- cfs_filp_close(filp);
+ cfs_filp_close(filp);
put_pages_on_daemon_list(&pc);
}
complete(&tctl->tctl_stop);