* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_CLASS
+#include <linux/sched.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_fid.h>
*/
#define cl_io_for_each(slice, io) \
- cfs_list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+ list_for_each_entry((slice), &io->ci_layers, cis_linkage)
#define cl_io_for_each_reverse(slice, io) \
- cfs_list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+ list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
static inline int cl_io_type_is_valid(enum cl_io_type type)
{
{
return cl_env_info(env)->clt_current_io != NULL;
}
-EXPORT_SYMBOL(cl_io_is_going);
/**
* cl_io invariant that holds at all times when exported cl_io_*() functions
LINVRNT(cl_io_invariant(io));
ENTRY;
- while (!cfs_list_empty(&io->ci_layers)) {
+ while (!list_empty(&io->ci_layers)) {
slice = container_of(io->ci_layers.prev, struct cl_io_slice,
cis_linkage);
- cfs_list_del_init(&slice->cis_linkage);
+ list_del_init(&slice->cis_linkage);
if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
/*
ENTRY;
io->ci_type = iot;
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
- CFS_INIT_LIST_HEAD(&io->ci_lockset.cls_done);
- CFS_INIT_LIST_HEAD(&io->ci_layers);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_done);
+ INIT_LIST_HEAD(&io->ci_layers);
result = 0;
cl_object_for_each(scan, obj) {
}
EXPORT_SYMBOL(cl_io_rw_init);
-static inline const struct lu_fid *
-cl_lock_descr_fid(const struct cl_lock_descr *descr)
-{
- return lu_object_fid(&descr->cld_obj->co_lu);
-}
-
static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
const struct cl_lock_descr *d1)
{
- return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
- __diff_normalize(d0->cld_start, d1->cld_start);
-}
-
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- int ret;
-
- ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
- if (ret)
- return ret;
- if (d0->cld_end < d1->cld_start)
- return -1;
- if (d0->cld_start > d0->cld_end)
- return 1;
- return 0;
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
+ return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
+ lu_object_fid(&d1->cld_obj->co_lu));
}
/*
done = 1;
prev = NULL;
- cfs_list_for_each_entry_safe(curr, temp,
- &io->ci_lockset.cls_todo,
- cill_linkage) {
- if (prev != NULL) {
- switch (cl_lock_descr_sort(&prev->cill_descr,
- &curr->cill_descr)) {
- case 0:
- /*
- * IMPOSSIBLE: Identical locks are
- * already removed at
- * this point.
- */
- default:
- LBUG();
- case +1:
- cfs_list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
- done = 0;
- continue; /* don't change prev: it's
- * still "previous" */
- case -1: /* already in order */
- break;
- }
- }
- prev = curr;
- }
- } while (!done);
- EXIT;
-}
-
-/**
- * Check whether \a queue contains locks matching \a need.
- *
- * \retval +ve there is a matching lock in the \a queue
- * \retval 0 there are no matching locks in the \a queue
- */
-int cl_queue_match(const cfs_list_t *queue,
- const struct cl_lock_descr *need)
-{
- struct cl_io_lock_link *scan;
-
- ENTRY;
- cfs_list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- RETURN(+1);
- }
- RETURN(0);
+ list_for_each_entry_safe(curr, temp, &io->ci_lockset.cls_todo,
+ cill_linkage) {
+ if (prev != NULL) {
+ switch (cl_lock_descr_sort(&prev->cill_descr,
+ &curr->cill_descr)) {
+ case 0:
+ /*
+ * IMPOSSIBLE: Identical locks are
+ * already removed at
+ * this point.
+ */
+ default:
+ LBUG();
+ case +1:
+ list_move_tail(&curr->cill_linkage,
+ &prev->cill_linkage);
+ done = 0;
+ continue; /* don't change prev: it's
+ * still "previous" */
+ case -1: /* already in order */
+ break;
+ }
+ }
+ prev = curr;
+ }
+ } while (!done);
+ EXIT;
}
-EXPORT_SYMBOL(cl_queue_match);
-static int cl_queue_merge(const cfs_list_t *queue,
- const struct cl_lock_descr *need)
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
{
- struct cl_io_lock_link *scan;
-
- ENTRY;
- cfs_list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
- continue;
- cl_lock_descr_merge(&scan->cill_descr, need);
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
- scan->cill_descr.cld_end);
- RETURN(+1);
- }
- RETURN(0);
+ d0->cld_start = min(d0->cld_start, d1->cld_start);
+ d0->cld_end = max(d0->cld_end, d1->cld_end);
-}
+ if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+ d0->cld_mode = CLM_WRITE;
-static int cl_lockset_match(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_match(&set->cls_curr, need) ||
- cl_queue_match(&set->cls_done, need);
+ if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+ d0->cld_mode = CLM_GROUP;
}
static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
+ const struct cl_lock_descr *need)
{
- return cl_queue_merge(&set->cls_todo, need) ||
- cl_lockset_match(set, need);
-}
-
-static int cl_lockset_lock_one(const struct lu_env *env,
- struct cl_io *io, struct cl_lockset *set,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock;
- int result;
-
- ENTRY;
-
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
+ struct cl_io_lock_link *scan;
- if (!IS_ERR(lock)) {
- link->cill_lock = lock;
- cfs_list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
- result = cl_wait(env, lock);
- if (result == 0)
- cfs_list_move(&link->cill_linkage,
- &set->cls_done);
- } else
- result = 0;
- } else
- result = PTR_ERR(lock);
- RETURN(result);
-}
-
-static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock = link->cill_lock;
+ ENTRY;
+ list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
+ if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
+ continue;
- ENTRY;
- cfs_list_del_init(&link->cill_linkage);
- if (lock != NULL) {
- cl_lock_release(env, lock, "io", io);
- link->cill_lock = NULL;
- }
- if (link->cill_fini != NULL)
- link->cill_fini(env, link);
- EXIT;
+ /* Merge locks for the same object because ldlm lock server
+ * may expand the lock extent, otherwise there is a deadlock
+ * case if two conflicted locks are queueud for the same object
+ * and lock server expands one lock to overlap the another.
+ * The side effect is that it can generate a multi-stripe lock
+ * that may cause casacading problem */
+ cl_lock_descr_merge(&scan->cill_descr, need);
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+ scan->cill_descr.cld_end);
+ RETURN(+1);
+ }
+ RETURN(0);
}
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_lockset *set)
+ struct cl_lockset *set)
{
- struct cl_io_lock_link *link;
- struct cl_io_lock_link *temp;
- struct cl_lock *lock;
- int result;
+ struct cl_io_lock_link *link;
+ struct cl_io_lock_link *temp;
+ int result;
- ENTRY;
- result = 0;
- cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between. */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- cfs_list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- cfs_list_move(&link->cill_linkage,
- &set->cls_done);
- else
- break;
- }
- }
- RETURN(result);
+ ENTRY;
+ result = 0;
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ result = cl_lock_request(env, io, &link->cill_lock);
+ if (result < 0)
+ break;
+
+ list_move(&link->cill_linkage, &set->cls_done);
+ }
+ RETURN(result);
}
/**
ENTRY;
set = &io->ci_lockset;
- cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ list_del_init(&link->cill_linkage);
+ if (link->cill_fini != NULL)
+ link->cill_fini(env, link);
+ }
- cfs_list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+ list_del_init(&link->cill_linkage);
+ cl_lock_release(env, &link->cill_lock);
+ if (link->cill_fini != NULL)
+ link->cill_fini(env, link);
+ }
- cfs_list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
- }
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
- scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
- }
- io->ci_state = CIS_UNLOCKED;
- LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
- EXIT;
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
+ scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
+ }
+ io->ci_state = CIS_UNLOCKED;
+ LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
+ EXIT;
}
EXPORT_SYMBOL(cl_io_unlock);
}
EXIT;
}
-EXPORT_SYMBOL(cl_io_rw_advance);
/**
* Adds a lock to a lockset.
if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
result = +1;
else {
- cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+ list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
RETURN(result);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr)
{
- struct cl_io_lock_link *link;
- int result;
+ struct cl_io_lock_link *link;
+ int result;
- ENTRY;
- OBD_ALLOC_PTR(link);
- if (link != NULL) {
- link->cill_descr = *descr;
- link->cill_fini = cl_free_io_lock_link;
- result = cl_io_lock_add(env, io, link);
- if (result) /* lock match */
- link->cill_fini(env, link);
- } else
- result = -ENOMEM;
+ ENTRY;
+ OBD_ALLOC_PTR(link);
+ if (link != NULL) {
+ link->cill_descr = *descr;
+ link->cill_fini = cl_free_io_lock_link;
+ result = cl_io_lock_add(env, io, link);
+ if (result) /* lock match */
+ link->cill_fini(env, link);
+ } else
+ result = -ENOMEM;
- RETURN(result);
+ RETURN(result);
}
EXPORT_SYMBOL(cl_io_lock_alloc_add);
}
EXPORT_SYMBOL(cl_io_end);
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
- LINVRNT(slice != NULL);
- return slice;
-}
-
/**
- * Called by read io, when page has to be read from the server.
+ * Called by read io, to decide the readahead extent
*
- * \see cl_io_operations::cio_read_page()
+ * \see cl_io_operations::cio_read_ahead()
*/
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+ pgoff_t start, struct cl_read_ahead *ra)
{
- const struct cl_io_slice *scan;
- struct cl_2queue *queue;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- ENTRY;
+ const struct cl_io_slice *scan;
+ int result = 0;
- queue = &io->ci_queue;
+ LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
+ LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
+ LINVRNT(cl_io_invariant(io));
+ ENTRY;
- cl_2queue_init(queue);
- /*
- * ->cio_read_page() methods called in the loop below are supposed to
- * never block waiting for network (the only subtle point is the
- * creation of new pages for read-ahead that might result in cache
- * shrinking, but currently only clean pages are shrunk and this
- * requires no network io).
- *
- * Should this ever starts blocking, retry loop would be needed for
- * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
- */
- cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_read_page != NULL) {
- const struct cl_page_slice *slice;
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->cio_read_ahead == NULL)
+ continue;
- slice = cl_io_slice_page(scan, page);
- LINVRNT(slice != NULL);
- result = scan->cis_iop->cio_read_page(env, scan, slice);
- if (result != 0)
- break;
- }
- }
- if (result == 0 && queue->c2_qin.pl_nr > 0)
- result = cl_io_submit_rw(env, io, CRT_READ, queue);
- /*
- * Unlock unsent pages in case of error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
- RETURN(result);
+ result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
+ if (result != 0)
+ break;
+ }
+ RETURN(result > 0 ? 0 : result);
}
-EXPORT_SYMBOL(cl_io_read_page);
+EXPORT_SYMBOL(cl_io_read_ahead);
/**
* Commit a list of contiguous pages into writeback cache.
/*
* If ->cio_submit() failed, no pages were sent.
*/
- LASSERT(ergo(result != 0, cfs_list_empty(&queue->c2_qout.pl_pages)));
+ LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
RETURN(result);
}
EXPORT_SYMBOL(cl_io_submit_rw);
* If \a timeout is zero, it means to wait for the IO unconditionally.
*/
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
+ enum cl_req_type iot, struct cl_2queue *queue,
long timeout)
{
- struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
- struct cl_page *pg;
- int rc;
+ struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
+ struct cl_page *pg;
+ int rc;
- cl_page_list_for_each(pg, &queue->c2_qin) {
- LASSERT(pg->cp_sync_io == NULL);
- pg->cp_sync_io = anchor;
- }
+ cl_page_list_for_each(pg, &queue->c2_qin) {
+ LASSERT(pg->cp_sync_io == NULL);
+ pg->cp_sync_io = anchor;
+ }
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
rc = cl_io_submit_rw(env, io, iot, queue);
- if (rc == 0) {
- /*
- * If some pages weren't sent for any reason (e.g.,
- * read found up-to-date pages in the cache, or write found
- * clean pages), count them as completed to avoid infinite
- * wait.
- */
- cl_page_list_for_each(pg, &queue->c2_qin) {
- pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, +1);
- }
-
- /* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
- } else {
- LASSERT(cfs_list_empty(&queue->c2_qout.pl_pages));
- cl_page_list_for_each(pg, &queue->c2_qin)
- pg->cp_sync_io = NULL;
- }
- return rc;
+ if (rc == 0) {
+ /*
+ * If some pages weren't sent for any reason (e.g.,
+ * read found up-to-date pages in the cache, or write found
+ * clean pages), count them as completed to avoid infinite
+ * wait.
+ */
+ cl_page_list_for_each(pg, &queue->c2_qin) {
+ pg->cp_sync_io = NULL;
+ cl_sync_io_note(env, anchor, 1);
+ }
+
+ /* wait for the IO to be finished. */
+ rc = cl_sync_io_wait(env, anchor, timeout);
+ cl_page_list_assume(env, io, &queue->c2_qout);
+ } else {
+ LASSERT(list_empty(&queue->c2_qout.pl_pages));
+ cl_page_list_for_each(pg, &queue->c2_qin)
+ pg->cp_sync_io = NULL;
+ }
+ return rc;
}
EXPORT_SYMBOL(cl_io_submit_sync);
}
return result;
}
-EXPORT_SYMBOL(cl_io_cancel);
/**
* Main io loop.
struct cl_object *obj,
const struct cl_io_operations *ops)
{
- cfs_list_t *linkage = &slice->cis_linkage;
+ struct list_head *linkage = &slice->cis_linkage;
LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
- cfs_list_empty(linkage));
+ list_empty(linkage));
ENTRY;
- cfs_list_add_tail(linkage, &io->ci_layers);
+ list_add_tail(linkage, &io->ci_layers);
slice->cis_io = io;
slice->cis_obj = obj;
slice->cis_iop = ops;
{
ENTRY;
plist->pl_nr = 0;
- CFS_INIT_LIST_HEAD(&plist->pl_pages);
+ INIT_LIST_HEAD(&plist->pl_pages);
plist->pl_owner = current;
EXIT;
}
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == current);
- LASSERT(cfs_list_empty(&page->cp_batch));
- cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
+ LASSERT(list_empty(&page->cp_batch));
+ list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_get(page);
LINVRNT(plist->pl_owner == current);
ENTRY;
- cfs_list_del_init(&page->cp_batch);
+ list_del_init(&page->cp_batch);
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LINVRNT(src->pl_owner == current);
ENTRY;
- cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
+ list_move_tail(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
LINVRNT(src->pl_owner == current);
ENTRY;
- cfs_list_move(&page->cp_batch, &dst->pl_pages);
+ list_move(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
}
EXPORT_SYMBOL(cl_page_list_splice);
-void cl_page_disown0(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-
/**
* Disowns pages in a queue.
*/
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
- cfs_list_del_init(&page->cp_batch);
+ list_del_init(&page->cp_batch);
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
}
RETURN(result);
}
-EXPORT_SYMBOL(cl_page_list_own);
/**
* Assumes all pages in a queue.
cl_page_list_for_each(page, plist)
cl_page_assume(env, io, page);
}
-EXPORT_SYMBOL(cl_page_list_assume);
/**
* Discards all pages in a queue.
cl_page_discard(env, io, page);
EXIT;
}
-EXPORT_SYMBOL(cl_page_list_discard);
/**
* Initialize dual page queue.
cl_page_list_assume(env, io, &queue->c2_qin);
cl_page_list_assume(env, io, &queue->c2_qout);
}
-EXPORT_SYMBOL(cl_2queue_assume);
/**
* Finalize both page lists of a 2-queue.
const struct cl_req_operations *ops)
{
ENTRY;
- cfs_list_add_tail(&slice->crs_linkage, &req->crq_layers);
+ list_add_tail(&slice->crs_linkage, &req->crq_layers);
slice->crs_dev = dev;
slice->crs_ops = ops;
slice->crs_req = req;
{
unsigned i;
- LASSERT(cfs_list_empty(&req->crq_pages));
+ LASSERT(list_empty(&req->crq_pages));
LASSERT(req->crq_nrpages == 0);
- LINVRNT(cfs_list_empty(&req->crq_layers));
+ LINVRNT(list_empty(&req->crq_layers));
LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
ENTRY;
ENTRY;
result = 0;
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
if (dev->cd_ops->cdo_req_init != NULL) {
result = dev->cd_ops->cdo_req_init(env,
/*
* for the lack of list_for_each_entry_reverse_safe()...
*/
- while (!cfs_list_empty(&req->crq_layers)) {
- slice = cfs_list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
- cfs_list_del_init(&slice->crs_linkage);
+ while (!list_empty(&req->crq_layers)) {
+ slice = list_entry(req->crq_layers.prev,
+ struct cl_req_slice, crs_linkage);
+ list_del_init(&slice->crs_linkage);
if (slice->crs_ops->cro_completion != NULL)
slice->crs_ops->cro_completion(env, slice, rc);
}
{
struct cl_object *obj;
struct cl_req_obj *rqo;
- int i;
+ unsigned int i;
ENTRY;
- LASSERT(cfs_list_empty(&page->cp_flight));
+ LASSERT(list_empty(&page->cp_flight));
LASSERT(page->cp_req == NULL);
CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
req, req->crq_type, req->crq_nrpages);
- cfs_list_add_tail(&page->cp_flight, &req->crq_pages);
+ list_add_tail(&page->cp_flight, &req->crq_pages);
++req->crq_nrpages;
page->cp_req = req;
obj = cl_object_top(page->cp_obj);
ENTRY;
- LASSERT(!cfs_list_empty(&page->cp_flight));
+ LASSERT(!list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
- cfs_list_del_init(&page->cp_flight);
+ list_del_init(&page->cp_flight);
--req->crq_nrpages;
page->cp_req = NULL;
EXIT;
*/
int cl_req_prep(const struct lu_env *env, struct cl_req *req)
{
- int i;
+ unsigned int i;
int result;
const struct cl_req_slice *slice;
LASSERT(req->crq_o[i].ro_obj != NULL);
result = 0;
- cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
if (slice->crs_ops->cro_prep != NULL) {
result = slice->crs_ops->cro_prep(env, slice);
if (result != 0)
* for the same request.
*/
void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, obd_valid flags)
+ struct cl_req_attr *attr, u64 flags)
{
const struct cl_req_slice *slice;
struct cl_page *page;
- int i;
+ unsigned int i;
- LASSERT(!cfs_list_empty(&req->crq_pages));
+ LASSERT(!list_empty(&req->crq_pages));
ENTRY;
/* Take any page to use as a model. */
- page = cfs_list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+ page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
for (i = 0; i < req->crq_nrobjs; ++i) {
- cfs_list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
const struct cl_page_slice *scan;
const struct cl_object *obj;
}
EXPORT_SYMBOL(cl_req_attr_set);
-/* XXX complete(), init_completion(), and wait_for_completion(), until they are
- * implemented in libcfs. */
-#ifdef __KERNEL__
-# include <linux/sched.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-#endif
+/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
+ * wait for the IO to finish. */
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+ wake_up_all(&anchor->csi_waitq);
+
+ /* it's safe to nuke or reuse anchor now */
+ atomic_set(&anchor->csi_barrier, 0);
+}
+EXPORT_SYMBOL(cl_sync_io_end);
/**
- * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+ * Initialize synchronous io wait anchor
*/
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *))
{
ENTRY;
+ memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
- atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nr);
+ atomic_set(&anchor->csi_barrier, nr > 0);
anchor->csi_sync_rc = 0;
+ anchor->csi_end_io = end;
+ LASSERT(end != NULL);
EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init);
/**
- * Wait until all transfer completes. Transfer completion routine has to call
- * cl_sync_io_note() for every page.
+ * Wait until all IO completes. Transfer completion routine has to call
+ * cl_sync_io_note() for every entity.
*/
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
- long timeout)
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
+ long timeout)
{
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
- NULL, NULL, NULL);
- int rc;
- ENTRY;
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
+ NULL, NULL, NULL);
+ int rc;
+ ENTRY;
- LASSERT(timeout >= 0);
+ LASSERT(timeout >= 0);
- rc = l_wait_event(anchor->csi_waitq,
+ rc = l_wait_event(anchor->csi_waitq,
atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
- if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel "
- "%d remaining pages\n",
+ &lwi);
+ if (rc < 0) {
+ CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
- (void)cl_io_cancel(env, io, queue);
-
- lwi = (struct l_wait_info) { 0 };
- (void)l_wait_event(anchor->csi_waitq,
+ lwi = (struct l_wait_info) { 0 };
+ (void)l_wait_event(anchor->csi_waitq,
atomic_read(&anchor->csi_sync_nr) == 0,
- &lwi);
- } else {
- rc = anchor->csi_sync_rc;
- }
+ &lwi);
+ } else {
+ rc = anchor->csi_sync_rc;
+ }
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
- cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
-#ifdef __KERNEL__
cpu_relax();
-#endif
}
-
- POISON(anchor, 0x5a, sizeof *anchor);
RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
/**
* Indicate that transfer of a single page completed.
*/
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret)
{
- ENTRY;
- if (anchor->csi_sync_rc == 0 && ioret < 0)
- anchor->csi_sync_rc = ioret;
- /*
- * Synchronous IO done without releasing page lock (e.g., as a part of
- * ->{prepare,commit}_write(). Completion is used to signal the end of
- * IO.
- */
+ ENTRY;
+ if (anchor->csi_sync_rc == 0 && ioret < 0)
+ anchor->csi_sync_rc = ioret;
+ /*
+ * Synchronous IO done without releasing page lock (e.g., as a part of
+ * ->{prepare,commit}_write(). Completion is used to signal the end of
+ * IO.
+ */
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- wake_up_all(&anchor->csi_waitq);
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
+ LASSERT(anchor->csi_end_io != NULL);
+ anchor->csi_end_io(env, anchor);
+ /* Can't access anchor any more */
}
EXIT;
}