'PAGE_CACHE_SIZE', 'PAGE_SIZE',
'PAGE_CACHE_SHIFT', 'PAGE_SHIFT',
+ 'page_cache_release', 'put_page',
+ 'page_cache_get', 'get_page',
+
'cfs_proc_dir_entry_t', 'struct proc_dir_entry',
'cfs_rcu_head_t', 'struct rcu_head',
]) # LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
#
+# Kernel version 4.6 removed both struct task_struct and struct mm_struct
+# arguments to get_user_pages
+#
+AC_DEFUN([LIBCFS_GET_USER_PAGES_6ARG], [
+LB_CHECK_COMPILE([if 'get_user_pages()' takes 6 arguments],
+get_user_pages_6arg, [
+ #include <linux/mm.h>
+],[
+ int rc;
+
+ rc = get_user_pages(0, 0, 0, 0, NULL, NULL);
+],[
+ AC_DEFINE(HAVE_GET_USER_PAGES_6ARG, 1,
+ [get_user_pages takes 6 arguments])
+])
+]) # LIBCFS_GET_USER_PAGES_6ARG
+
+#
# LIBCFS_PROG_LINUX
#
# LibCFS linux kernel checks
LIBCFS_CRYPTO_HASH_HELPERS
# 4.6
LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
+LIBCFS_GET_USER_PAGES_6ARG
]) # LIBCFS_PROG_LINUX
#
int bytes, rc, offset;
void *maddr;
- rc = get_user_pages(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+#ifdef HAVE_GET_USER_PAGES_6ARG
+ rc = get_user_pages(addr, 1, write, 1, &page, &vma);
+#else
+ rc = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma);
+#endif
if (rc <= 0)
break;
buf, maddr + offset, bytes);
}
kunmap(page);
- page_cache_release(page);
+ put_page(page);
len -= bytes;
buf += bytes;
addr += bytes;
int i;
for (i = 0; i < desc->bd_iov_count ; i++)
- page_cache_release(BD_GET_KIOV(desc, i).kiov_page);
+ put_page(BD_GET_KIOV(desc, i).kiov_page);
}
static inline void ptlrpc_release_bulk_noop(struct ptlrpc_bulk_desc *desc)
static inline void ll_delete_from_page_cache(struct page *page)
{
ll_remove_from_page_cache(page);
- page_cache_release(page);
+ put_page(page);
}
#else /* HAVE_DELETE_FROM_PAGE_CACHE */
#define ll_delete_from_page_cache(page) delete_from_page_cache(page)
truncate_complete_page(page->mapping, page);
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
/**
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage != NULL) {
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
}
}
lock_page(vmpage);
if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
if (!printed && ++count > 16) {
if (vmpage != NULL) {
if (rc != 0)
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
if (msg != NULL) {
ll_ra_stats_inc(inode, which);
break;
if (do_dirty)
set_page_dirty_lock(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
#if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW)
* one in commit page list, though. */
if (vmpage != NULL && plist->pl_nr > 0) {
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
vmpage = NULL;
}
if (result < 0) {
if (vmpage != NULL) {
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
if (io)
io->ci_result = result;
int result = 0;
ENTRY;
- page_cache_release(vmpage);
+ put_page(vmpage);
LASSERT(lcc != NULL);
env = lcc->lcc_env;
id.vpi_index = vmpage->index;
/* Cant support over 16T file */
nr = !(vmpage->index > 0xffffffff);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
lu_object_ref_del(&clob->co_lu, "dump", current);
page = cl_vmpage_page(vmpage, clob);
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
seq_printf(f, "%8x@"DFID": ", id.vpi_index,
struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage != NULL);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
vpg->vpg_page = vmpage;
- page_cache_get(vmpage);
+ get_page(vmpage);
if (page->cp_type == CPT_CACHEABLE) {
/* in cache, decref in vvp_page_delete */
stripe_hash = le64_to_cpu(dp->ldp_hash_end);
kunmap(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
/* reach the end of current stripe, go to next stripe */
le64_to_cpu(ent->lde_hash)) {
min_ent = ent;
kunmap(min_page);
- page_cache_release(min_page);
+ put_page(min_page);
min_idx = i;
min_page = page;
} else {
kunmap(page);
- page_cache_release(page);
+ put_page(page);
page = NULL;
}
} else {
out:
if (*ppage != NULL) {
kunmap(*ppage);
- page_cache_release(*ppage);
+ put_page(*ppage);
}
*stripe_offset = min_idx;
*entp = min_ent;
out:
if (min_ent_page != NULL) {
kunmap(min_ent_page);
- page_cache_release(min_ent_page);
+ put_page(min_ent_page);
}
if (unlikely(rc != 0)) {
truncate_complete_page(page->mapping, page);
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
if (found > 0 && !radix_tree_exceptional_entry(page)) {
struct lu_dirpage *dp;
- page_cache_get(page);
+ get_page(page);
spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
page = NULL;
}
} else {
- page_cache_release(page);
+ put_page(page);
page = ERR_PTR(-EIO);
}
} else {
static inline void delete_from_page_cache(struct page *page)
{
remove_from_page_cache(page);
- page_cache_release(page);
+ put_page(page);
}
#endif
page = page_pool[i];
if (rc < 0 || i >= rd_pgs) {
- page_cache_release(page);
+ put_page(page);
continue;
}
else
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
" rc = %d\n", offset, ret);
- page_cache_release(page);
+ put_page(page);
}
if (page_pool != &page0)
ENTRY;
atomic_dec(&eco->eo_npages);
- page_cache_release(slice->cpl_page->cp_vmpage);
+ put_page(slice->cpl_page->cp_vmpage);
EXIT;
}
struct echo_object *eco = cl2echo_obj(obj);
ENTRY;
- page_cache_get(page->cp_vmpage);
+ get_page(page->cp_vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
atomic_inc(&eco->eo_npages);
continue;
LASSERT(PageLocked(lnb[i].lnb_page));
unlock_page(lnb[i].lnb_page);
- page_cache_release(lnb[i].lnb_page);
+ put_page(lnb[i].lnb_page);
lu_object_put(env, &dt->do_lu);
lnb[i].lnb_page = NULL;
}
desc->bd_nob += len;
if (pin)
- page_cache_get(page);
+ get_page(page);
kiov->kiov_page = page;
kiov->kiov_offset = pageoffset;
void tgt_mod_exit(void)
{
if (tgt_page_to_corrupt != NULL)
- page_cache_release(tgt_page_to_corrupt);
+ put_page(tgt_page_to_corrupt);
lu_context_key_degister(&tgt_thread_key);
lu_context_key_degister(&tgt_session_key);