ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
spin_unlock(&ras->ras_lock);
}
ras = ll_ras_get(f);
spin_lock(&ras->ras_lock);
- cfs_list_del_init(&rar->lrr_linkage);
+ list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
{
struct ll_ra_read *scan;
- cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
ccc_index(cp), *max_index);
- if (*max_index == 0 || ccc_index(cp) > *max_index)
- rc = cl_page_is_under_lock(env, io, page, max_index);
+ /* Disable the optimization on prefetching maximum readahead
+ * index because there is a race with lock cancellation. This
+ * optimization will be revived later.
+ * if (*max_index == 0 || ccc_index(cp) > *max_index) */
+ rc = cl_page_is_under_lock(env, io, page, max_index);
if (rc == 0) {
cp->cpg_defer_uptodate = 1;
cp->cpg_ra_used = 0;
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+ INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*