#define DEBUG_SUBSYSTEM S_LLITE
-#include <lustre_lite.h>
#include "llite_internal.h"
#include <linux/lustre_compat25.h>
* aligned truncate). Lustre leaves partially truncated page in the cache,
* relying on struct inode::i_size to limit further accesses.
*/
-static void ll_invalidatepage(struct page *vmpage, unsigned long offset)
+static void ll_invalidatepage(struct page *vmpage,
+#ifdef HAVE_INVALIDATE_RANGE
+ unsigned int offset, unsigned int length
+#else
+ unsigned long offset
+#endif
+ )
{
struct inode *inode;
struct lu_env *env;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
- /*
- * It is safe to not check anything in invalidatepage/releasepage
- * below because they are run with page locked and all our io is
- * happening with locked page too
- */
- if (offset == 0) {
+ /*
+ * It is safe to not check anything in invalidatepage/releasepage
+ * below because they are run with page locked and all our io is
+ * happening with locked page too
+ */
+#ifdef HAVE_INVALIDATE_RANGE
+ if (offset == 0 && length == PAGE_CACHE_SIZE) {
+#else
+ if (offset == 0) {
+#endif
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
inode = vmpage->mapping->host;
* If this page holds the last refc of cl_object, the following
* call path may cause reschedule:
* cl_page_put -> cl_page_free -> cl_object_put ->
- * lu_object_put -> lu_object_free -> lov_delete_raid0 ->
- * cl_locks_prune.
+ * lu_object_put -> lu_object_free -> lov_delete_raid0.
*
* However, the kernel can't get rid of this inode until all pages have
* been cleaned up. Now that we hold page lock here, it's pretty safe
return result;
}
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
- struct cl_page *page = vvp_vmpage_page_transient(vmpage);
- struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
- struct vvp_page *cpg;
-
- /*
- * XXX should page method be called here?
- */
- LASSERT(&obj->co_cl == page->cp_obj);
- cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- /*
- * XXX cannot do much here, because page is possibly not locked:
- * sys_munmap()->...
- * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
- */
- vvp_write_pending(obj, cpg);
-#endif
- RETURN(__set_page_dirty_nobuffers(vmpage));
-}
-
#define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
int rw, struct inode *inode,
struct ll_dio_pages *pv)
{
- struct cl_page *clp;
- struct cl_2queue *queue;
- struct cl_object *obj = io->ci_obj;
- int i;
- ssize_t rc = 0;
- loff_t file_offset = pv->ldp_start_offset;
- long size = pv->ldp_size;
- int page_count = pv->ldp_nr;
- struct page **pages = pv->ldp_pages;
- long page_size = cl_page_size(obj);
- bool do_io;
- int io_pages = 0;
- ENTRY;
+ struct cl_page *clp;
+ struct cl_2queue *queue;
+ struct cl_object *obj = io->ci_obj;
+ int i;
+ ssize_t rc = 0;
+ loff_t file_offset = pv->ldp_start_offset;
+ size_t size = pv->ldp_size;
+ int page_count = pv->ldp_nr;
+ struct page **pages = pv->ldp_pages;
+ size_t page_size = cl_page_size(obj);
+ bool do_io;
+ int io_pages = 0;
+ ENTRY;
queue = &io->ci_queue;
cl_2queue_init(queue);
/* check the page type: if the page is a host page, then do
* write directly */
if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *vmpage = cl_page_vmpage(clp);
struct page *src_page;
struct page *dst_page;
void *src;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct ccc_object *obj = cl_inode2ccc(inode);
long count = iov_length(iov, nr_segs);
long tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
io = ccc_env_io(env)->cui_cl.cis_io;
LASSERT(io != NULL);
- /* 0. Need locking between buffered and direct access. and race with
- * size changing by concurrent truncates and writes.
- * 1. Need inode mutex to operate transient pages.
- */
- if (rw == READ)
- mutex_lock(&inode->i_mutex);
-
- LASSERT(obj->cob_transient_pages == 0);
for (seg = 0; seg < nr_segs; seg++) {
long iov_left = iov[seg].iov_len;
unsigned long user_addr = (unsigned long)iov[seg].iov_base;
}
}
out:
- LASSERT(obj->cob_transient_pages == 0);
- if (rw == READ)
- mutex_unlock(&inode->i_mutex);
-
if (tot_bytes > 0) {
struct ccc_io *cio = ccc_env_io(env);
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg)
{
- struct cl_object *obj = io->ci_obj;
struct cl_attr *attr = ccc_env_thread_attr(env);
- loff_t offset = cl_offset(obj, pg->cp_index);
+ struct cl_object *obj = io->ci_obj;
+ struct ccc_page *cp = cl_object_page_slice(obj, pg);
+ loff_t offset = cl_offset(obj, ccc_index(cp));
int result;
cl_object_attr_lock(obj);
result = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
if (result == 0) {
- struct ccc_page *cp;
-
- cp = cl2ccc_page(cl_page_at(pg, &vvp_device_type));
-
/*
* If are writing to a new page, no need to read old data.
* The extent locking will have updated the KMS, and for our
struct page **pagep, void **fsdata)
{
struct ll_cl_context *lcc;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- lcc = ll_cl_init(file, NULL);
- if (IS_ERR(lcc))
- GOTO(out, result = PTR_ERR(lcc));
+ lcc = ll_cl_find(file);
+ if (lcc == NULL)
+ GOTO(out, result = -EIO);
env = lcc->lcc_env;
io = lcc->lcc_io;
/* To avoid deadlock, try to lock page first. */
vmpage = grab_cache_page_nowait(mapping, index);
- if (unlikely(vmpage == NULL || PageDirty(vmpage))) {
+
+ if (unlikely(vmpage == NULL ||
+ PageDirty(vmpage) || PageWriteback(vmpage))) {
struct ccc_io *cio = ccc_env_io(env);
struct cl_page_list *plist = &cio->u.write.cui_queue;
* because it holds page lock of a dirty page and request for
* more grants. It's okay for the dirty page to be the first
* one in commit page list, though. */
- if (vmpage != NULL && PageDirty(vmpage) && plist->pl_nr > 0) {
+ if (vmpage != NULL && plist->pl_nr > 0) {
unlock_page(vmpage);
page_cache_release(vmpage);
vmpage = NULL;
unlock_page(vmpage);
page_cache_release(vmpage);
}
- if (!IS_ERR(lcc))
- ll_cl_fini(lcc);
} else {
*pagep = vmpage;
*fsdata = lcc;
struct page *vmpage, void *fsdata)
{
struct ll_cl_context *lcc = fsdata;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct ccc_io *cio;
struct cl_page *page;
LASSERT(from == 0);
cio->u.write.cui_to = from + copied;
+ /* To address the deadlock in balance_dirty_pages() where
+ * this dirty page may be written back in the same thread. */
+ if (PageDirty(vmpage))
+ unplug = true;
+
/* We may have one full RPC, commit it soon */
if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
unplug = true;
} else {
cl_page_disown(env, io, page);
+ lcc->lcc_page = NULL;
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+
/* page list is not contiguous now, commit it now */
unplug = true;
}
-
if (unplug ||
file->f_flags & O_SYNC || IS_SYNC(file->f_dentry->d_inode))
result = vvp_io_write_commit(env, io);
- ll_cl_fini(lcc);
RETURN(result >= 0 ? copied : result);
}
#ifdef CONFIG_MIGRATION
-int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page
+static int ll_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page
#ifdef HAVE_MIGRATEPAGE_4ARGS
- , enum migrate_mode mode
+ , enum migrate_mode mode
#endif
- )
+ )
{
/* Always fail page migration until we have a proper implementation */
return -EIO;
#endif
#ifndef MS_HAS_NEW_AOPS
-struct address_space_operations ll_aops = {
- .readpage = ll_readpage,
-// .readpages = ll_readpages,
+const struct address_space_operations ll_aops = {
+ .readpage = ll_readpage,
.direct_IO = ll_direct_IO_26,
.writepage = ll_writepage,
.writepages = ll_writepages,
- .set_page_dirty = ll_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = ll_write_begin,
.write_end = ll_write_end,
.invalidatepage = ll_invalidatepage,
#ifdef CONFIG_MIGRATION
.migratepage = ll_migratepage,
#endif
- .bmap = NULL
};
#else
-struct address_space_operations_ext ll_aops = {
+const struct address_space_operations_ext ll_aops = {
.orig_aops.readpage = ll_readpage,
.orig_aops.direct_IO = ll_direct_IO_26,
.orig_aops.writepage = ll_writepage,
.orig_aops.writepages = ll_writepages,
- .orig_aops.set_page_dirty = ll_set_page_dirty,
+ .orig_aops.set_page_dirty = __set_page_dirty_nobuffers,
.orig_aops.invalidatepage = ll_invalidatepage,
.orig_aops.releasepage = ll_releasepage,
#ifdef CONFIG_MIGRATION
.orig_aops.migratepage = ll_migratepage,
#endif
- .orig_aops.bmap = NULL,
.write_begin = ll_write_begin,
.write_end = ll_write_end
};