* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Client IO.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
switch(io->ci_type) {
case CIT_READ:
case CIT_WRITE:
+ break;
case CIT_FAULT:
case CIT_FSYNC:
LASSERT(!io->ci_need_restart);
ENTRY;
- if (io->ci_lockreq == CILR_PEEK) {
- lock = cl_lock_peek(env, io, &link->cill_descr, "io", io);
- if (lock == NULL)
- lock = ERR_PTR(-ENODATA);
- } else
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
+ lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
link->cill_lock = lock;
*/
void cl_page_list_init(struct cl_page_list *plist)
{
- ENTRY;
- plist->pl_nr = 0;
- CFS_INIT_LIST_HEAD(&plist->pl_pages);
- plist->pl_owner = cfs_current();
- EXIT;
+ ENTRY;
+ plist->pl_nr = 0;
+ CFS_INIT_LIST_HEAD(&plist->pl_pages);
+ plist->pl_owner = current;
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_init);
*/
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
- ENTRY;
- /* it would be better to check that page is owned by "current" io, but
- * it is not passed here. */
- LASSERT(page->cp_owner != NULL);
- LINVRNT(plist->pl_owner == cfs_current());
+ ENTRY;
+ /* it would be better to check that page is owned by "current" io, but
+ * it is not passed here. */
+ LASSERT(page->cp_owner != NULL);
+ LINVRNT(plist->pl_owner == current);
lockdep_off();
mutex_lock(&page->cp_mutex);
lockdep_on();
- LASSERT(cfs_list_empty(&page->cp_batch));
- cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
- ++plist->pl_nr;
- page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
- cl_page_get(page);
- EXIT;
+ LASSERT(cfs_list_empty(&page->cp_batch));
+ cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
+ ++plist->pl_nr;
+ lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
+ cl_page_get(page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_add);
* Removes a page from a page list.
*/
void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
+ struct cl_page_list *plist, struct cl_page *page)
{
- LASSERT(plist->pl_nr > 0);
- LINVRNT(plist->pl_owner == cfs_current());
+ LASSERT(plist->pl_nr > 0);
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cfs_list_del_init(&page->cp_batch);
+ ENTRY;
+ cfs_list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
lockdep_on();
- --plist->pl_nr;
- lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
- cl_page_put(env, page);
- EXIT;
+ --plist->pl_nr;
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
+ cl_page_put(env, page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_del);
* Moves a page from one page list to another.
*/
void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page)
-{
- LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == cfs_current());
- LINVRNT(src->pl_owner == cfs_current());
-
- ENTRY;
- cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
- --src->pl_nr;
- ++dst->pl_nr;
- lu_ref_set_at(&page->cp_reference,
- page->cp_queue_ref, "queue", src, dst);
- EXIT;
+ struct cl_page *page)
+{
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
+
+ ENTRY;
+ cfs_list_move_tail(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_move);
*/
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
{
- struct cl_page *page;
- struct cl_page *tmp;
+ struct cl_page *page;
+ struct cl_page *tmp;
- LINVRNT(list->pl_owner == cfs_current());
- LINVRNT(head->pl_owner == cfs_current());
+ LINVRNT(list->pl_owner == current);
+ LINVRNT(head->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, tmp, list)
- cl_page_list_move(head, list, page);
- EXIT;
+ ENTRY;
+ cl_page_list_for_each_safe(page, tmp, list)
+ cl_page_list_move(head, list, page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_splice);
* Disowns pages in a queue.
*/
void cl_page_list_disown(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+ struct cl_io *io, struct cl_page_list *plist)
{
- struct cl_page *page;
- struct cl_page *temp;
+ struct cl_page *page;
+ struct cl_page *temp;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(plist->pl_nr > 0);
+ ENTRY;
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(plist->pl_nr > 0);
- cfs_list_del_init(&page->cp_batch);
+ cfs_list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
lockdep_on();
- --plist->pl_nr;
- /*
- * cl_page_disown0 rather than usual cl_page_disown() is used,
- * because pages are possibly in CPS_FREEING state already due
- * to the call to cl_page_list_discard().
- */
- /*
- * XXX cl_page_disown0() will fail if page is not locked.
- */
- cl_page_disown0(env, io, page);
- lu_ref_del(&page->cp_reference, "queue", plist);
- cl_page_put(env, page);
- }
- EXIT;
+ --plist->pl_nr;
+ /*
+ * cl_page_disown0 rather than usual cl_page_disown() is used,
+ * because pages are possibly in CPS_FREEING state already due
+ * to the call to cl_page_list_discard().
+ */
+ /*
+ * XXX cl_page_disown0() will fail if page is not locked.
+ */
+ cl_page_disown0(env, io, page);
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ plist);
+ cl_page_put(env, page);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_disown);
*/
void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
{
- struct cl_page *page;
- struct cl_page *temp;
+ struct cl_page *page;
+ struct cl_page *temp;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- ENTRY;
- cl_page_list_for_each_safe(page, temp, plist)
- cl_page_list_del(env, plist, page);
- LASSERT(plist->pl_nr == 0);
- EXIT;
+ ENTRY;
+ cl_page_list_for_each_safe(page, temp, plist)
+ cl_page_list_del(env, plist, page);
+ LASSERT(plist->pl_nr == 0);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_fini);
* Owns all pages in a queue.
*/
int cl_page_list_own(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
-{
- struct cl_page *page;
- struct cl_page *temp;
- pgoff_t index = 0;
- int result;
-
- LINVRNT(plist->pl_owner == cfs_current());
-
- ENTRY;
- result = 0;
- cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(index <= page->cp_index);
- index = page->cp_index;
- if (cl_page_own(env, io, page) == 0)
- result = result ?: page->cp_error;
- else
- cl_page_list_del(env, plist, page);
- }
- RETURN(result);
+ struct cl_io *io, struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ struct cl_page *temp;
+ pgoff_t index = 0;
+ int result;
+
+ LINVRNT(plist->pl_owner == current);
+
+ ENTRY;
+ result = 0;
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(index <= page->cp_index);
+ index = page->cp_index;
+ if (cl_page_own(env, io, page) == 0)
+ result = result ?: page->cp_error;
+ else
+ cl_page_list_del(env, plist, page);
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_page_list_own);
* Assumes all pages in a queue.
*/
void cl_page_list_assume(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist)
+ struct cl_io *io, struct cl_page_list *plist)
{
- struct cl_page *page;
+ struct cl_page *page;
- LINVRNT(plist->pl_owner == cfs_current());
+ LINVRNT(plist->pl_owner == current);
- cl_page_list_for_each(page, plist)
- cl_page_assume(env, io, page);
+ cl_page_list_for_each(page, plist)
+ cl_page_assume(env, io, page);
}
EXPORT_SYMBOL(cl_page_list_assume);
* Discards all pages in a queue.
*/
void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
+ struct cl_page_list *plist)
{
- struct cl_page *page;
+ struct cl_page *page;
- LINVRNT(plist->pl_owner == cfs_current());
- ENTRY;
- cl_page_list_for_each(page, plist)
- cl_page_discard(env, io, page);
- EXIT;
+ LINVRNT(plist->pl_owner == current);
+ ENTRY;
+ cl_page_list_for_each(page, plist)
+ cl_page_discard(env, io, page);
+ EXIT;
}
EXPORT_SYMBOL(cl_page_list_discard);
/**
- * Unmaps all pages in a queue from user virtual memory.
- */
-int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *plist)
-{
- struct cl_page *page;
- int result;
-
- LINVRNT(plist->pl_owner == cfs_current());
- ENTRY;
- result = 0;
- cl_page_list_for_each(page, plist) {
- result = cl_page_unmap(env, io, page);
- if (result != 0)
- break;
- }
- RETURN(result);
-}
-EXPORT_SYMBOL(cl_page_list_unmap);
-
-/**
* Initialize dual page queue.
*/
void cl_2queue_init(struct cl_2queue *queue)
if (req->crq_o != NULL) {
for (i = 0; i < req->crq_nrobjs; ++i) {
struct cl_object *obj = req->crq_o[i].ro_obj;
- if (obj != NULL) {
- lu_object_ref_del_at(&obj->co_lu,
- req->crq_o[i].ro_obj_ref,
- "cl_req", req);
- cl_object_put(env, obj);
- }
+ if (obj != NULL) {
+ lu_object_ref_del_at(&obj->co_lu,
+ &req->crq_o[i].ro_obj_ref,
+ "cl_req", req);
+ cl_object_put(env, obj);
+ }
}
OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
}
if (rqo->ro_obj == NULL) {
rqo->ro_obj = obj;
cl_object_get(obj);
- rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
- "cl_req", req);
+ lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
+ "cl_req", req);
break;
}
}
*/
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
- ENTRY;
- cfs_waitq_init(&anchor->csi_waitq);
- cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
- anchor->csi_sync_rc = 0;
- EXIT;
+ ENTRY;
+ init_waitqueue_head(&anchor->csi_waitq);
+ cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
+ cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+ anchor->csi_sync_rc = 0;
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init);
}
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
- POISON(anchor, 0x5a, sizeof *anchor);
- RETURN(rc);
+
+ /* wait until cl_sync_io_note() has done wakeup */
+ while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+#ifdef __KERNEL__
+ cpu_relax();
+#endif
+ }
+
+ POISON(anchor, 0x5a, sizeof *anchor);
+ RETURN(rc);
}
EXPORT_SYMBOL(cl_sync_io_wait);
* IO.
*/
LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
- if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr))
- cfs_waitq_broadcast(&anchor->csi_waitq);
- EXIT;
+ if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ wake_up_all(&anchor->csi_waitq);
+ /* it's safe to nuke or reuse anchor now */
+ cfs_atomic_set(&anchor->csi_barrier, 0);
+ }
+ EXIT;
}
EXPORT_SYMBOL(cl_sync_io_note);