#include <lustre_fid.h>
#include <cl_object.h>
#include "cl_internal.h"
+#include <libcfs/crypto/llcrypt.h>
/*****************************************************************************
*
case CIT_GLIMPSE:
break;
case CIT_LADVISE:
+ case CIT_LSEEK:
break;
default:
LBUG();
*/
int cl_io_loop(const struct lu_env *env, struct cl_io *io)
{
- int result = 0;
+ int result = 0;
+ int rc = 0;
LINVRNT(cl_io_is_loopable(io));
ENTRY;
}
}
cl_io_iter_fini(env, io);
- } while (result == 0 && io->ci_continue);
+ if (result)
+ rc = result;
+ } while ((result == 0 || result == -EIOCBQUEUED) &&
+ io->ci_continue);
+
+ if (rc && !result)
+ result = rc;
if (result == -EWOULDBLOCK && io->ci_ndelay) {
io->ci_need_restart = 1;
ENTRY;
plist->pl_nr = 0;
INIT_LIST_HEAD(&plist->pl_pages);
- plist->pl_owner = current;
EXIT;
}
EXPORT_SYMBOL(cl_page_list_init);
/* it would be better to check that page is owned by "current" io, but
* it is not passed here. */
LASSERT(page->cp_owner != NULL);
- LINVRNT(plist->pl_owner == current);
LASSERT(list_empty(&page->cp_batch));
list_add_tail(&page->cp_batch, &plist->pl_pages);
{
LASSERT(plist->pl_nr > 0);
LASSERT(cl_page_is_vmlocked(env, page));
- LINVRNT(plist->pl_owner == current);
ENTRY;
list_del_init(&page->cp_batch);
struct cl_page *page)
{
LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == current);
- LINVRNT(src->pl_owner == current);
ENTRY;
list_move_tail(&page->cp_batch, &dst->pl_pages);
struct cl_page *page)
{
LASSERT(src->pl_nr > 0);
- LINVRNT(dst->pl_owner == current);
- LINVRNT(src->pl_owner == current);
ENTRY;
list_move(&page->cp_batch, &dst->pl_pages);
struct cl_page *page;
struct cl_page *tmp;
- LINVRNT(list->pl_owner == current);
- LINVRNT(head->pl_owner == current);
ENTRY;
cl_page_list_for_each_safe(page, tmp, list)
struct cl_page *page;
struct cl_page *temp;
- LINVRNT(plist->pl_owner == current);
ENTRY;
cl_page_list_for_each_safe(page, temp, plist) {
struct cl_page *page;
struct cl_page *temp;
- LINVRNT(plist->pl_owner == current);
ENTRY;
cl_page_list_for_each_safe(page, temp, plist)
{
struct cl_page *page;
- LINVRNT(plist->pl_owner == current);
cl_page_list_for_each(page, plist)
cl_page_assume(env, io, page);
{
struct cl_page *page;
- LINVRNT(plist->pl_owner == current);
ENTRY;
cl_page_list_for_each(page, plist)
cl_page_discard(env, io, page);
/* release pages */
while (aio->cda_pages.pl_nr > 0) {
struct cl_page *page = cl_page_list_first(&aio->cda_pages);
+ struct page *vmpage = cl_page_vmpage(page);
+ struct inode *inode = vmpage ? page2inode(vmpage) : NULL;
cl_page_get(page);
+ /* We end up here in case of Direct IO only. For encrypted file,
+ * mapping was set on pages in ll_direct_rw_pages(), so it has
+ * to be cleared now before page cleanup.
+ * PageChecked flag was also set there, so we clean up here.
+ */
+ if (inode && IS_ENCRYPTED(inode)) {
+ vmpage->mapping = NULL;
+ ClearPageChecked(vmpage);
+ }
cl_page_list_del(env, &aio->cda_pages, page);
cl_page_delete(env, page);
cl_page_put(env, page);
}
- if (!is_sync_kiocb(aio->cda_iocb))
+ if (!is_sync_kiocb(aio->cda_iocb) && !aio->cda_no_aio_complete)
aio_complete(aio->cda_iocb, ret ?: aio->cda_bytes, 0);
EXIT;
NULL : aio, cl_aio_end);
cl_page_list_init(&aio->cda_pages);
aio->cda_iocb = iocb;
+ aio->cda_no_aio_complete = 0;
}
return aio;
}
EXPORT_SYMBOL(cl_aio_alloc);
+void cl_aio_free(struct cl_dio_aio *aio)
+{
+ if (aio)
+ OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
+}
+EXPORT_SYMBOL(cl_aio_free);
+
/**
* Indicate that transfer of a single page completed.
* If anchor->csi_aio is set, we are responsible for freeing
* memory here rather than when cl_sync_io_wait() completes.
*/
- if (aio)
- OBD_SLAB_FREE_PTR(aio, cl_dio_aio_kmem);
+ cl_aio_free(aio);
}
EXIT;
}