# Kernel version 4.6 adds lock_page_memcg(page)
# Linux commit v5.15-12273-gab2f9d2d3626
# mm: unexport {,un}lock_page_memcg
+# and removed in v6.4-rc4-327-g6c77b607ee26
+# mm: kill lock|unlock_page_memcg()
#
AC_DEFUN([LC_SRC_LOCK_PAGE_MEMCG], [
LB2_LINUX_TEST_SRC([lock_page_memcg], [
]) # LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
#
+# LC_FOLIO_MEMCG_LOCK
+#
+# kernel v5.15-rc3-45-gf70ad4487415
+# mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()
+# Use folio_memcg_[un]lock when [un]lock_page_memcg is removed.
+#
+AC_DEFUN([LC_SRC_FOLIO_MEMCG_LOCK], [
+ LB2_LINUX_TEST_SRC([folio_memcg_lock], [
+ #include <linux/memcontrol.h>
+ ],[
+ folio_memcg_lock(NULL);
+ ],[-Werror])
+])
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK], [
+ AC_MSG_CHECKING([if 'folio_memcg_lock' is defined])
+ LB2_LINUX_TEST_RESULT([folio_memcg_lock], [
+ AC_DEFINE(HAVE_FOLIO_MEMCG_LOCK, 1, [folio_memcg_lock is defined])
+ ])
+]) # LC_FOLIO_MEMCG_LOCK
+
+#
# LC_HAVE_KIOCB_COMPLETE_2ARGS
#
# kernel v5.15-rc6-145-g6b19b766e8f0
]) # LC_HAVE_KIOCB_COMPLETE_2ARGS
#
+# LC_FOLIO_MEMCG_LOCK_EXPORTED
+#
+# Linux commit v5.15-12272-g913ffbdd9985
+# mm: unexport folio_memcg_{,un}lock
+#
+AC_DEFUN([LC_FOLIO_MEMCG_LOCK_EXPORTED], [
+LB_CHECK_EXPORT([folio_memcg_lock], [mm/memcontrol.c],
+ [AC_DEFINE(FOLIO_MEMCG_LOCK_EXPORTED, 1,
+ [folio_memcg_{,un}lock are exported])])
+]) # LC_FOLIO_MEMCG_LOCK_EXPORTED
+
+#
# LC_EXPORTS_DELETE_FROM_PAGE_CACHE
#
# Linux commit v5.16-rc4-44-g452e9e6992fe
]) # LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
#
+# LC_HAVE_FOLIO_BATCH_REINIT
+#
+# linux kernel v6.2-rc4-254-g811561288397
+# mm: pagevec: add folio_batch_reinit()
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH_REINIT], [
+ LB2_LINUX_TEST_SRC([folio_batch_reinit_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct folio_batch fbatch __attribute__ ((unused));
+
+ folio_batch_reinit(&fbatch);
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH_REINIT], [
+ AC_MSG_CHECKING([if 'folio_batch_reinit' is available])
+ LB2_LINUX_TEST_RESULT([folio_batch_reinit_exists], [
+ AC_DEFINE(HAVE_FOLIO_BATCH_REINIT, 1,
+ ['folio_batch_reinit' is available])
+ ])
+]) # LC_HAVE_FOLIO_BATCH_REINIT
+
+#
+# LC_HAVE_FOLIO_BATCH
+#
+# linux kernel v5.16-rc4-36-g10331795fb79
+# pagevec: Add folio_batch
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH], [
+ LB2_LINUX_TEST_SRC([struct_folio_batch_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct folio_batch fbatch __attribute__ ((unused));
+
+ folio_batch_init(&fbatch);
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH], [
+ AC_MSG_CHECKING([if 'struct folio_batch' is available])
+ LB2_LINUX_TEST_RESULT([struct_folio_batch_exists], [
+ AC_DEFINE(HAVE_FOLIO_BATCH, 1,
+ ['struct folio_batch' is available])
+ ])
+]) # LC_HAVE_FOLIO_BATCH
+
+#
+# LC_HAVE_STRUCT_PAGEVEC
+#
+# linux kernel v6.4-rc4-438-g1e0877d58b1e
+# mm: remove struct pagevec
+#
+AC_DEFUN([LC_SRC_HAVE_STRUCT_PAGEVEC], [
+ LB2_LINUX_TEST_SRC([struct_pagevec_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct pagevec *pvec = NULL;
+ (void)pvec;
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_STRUCT_PAGEVEC], [
+ AC_MSG_CHECKING([if 'struct pagevec' is available])
+ LB2_LINUX_TEST_RESULT([struct_pagevec_exists], [
+ AC_DEFINE(HAVE_PAGEVEC, 1,
+ ['struct pagevec' is available])
+ ])
+]) # LC_HAVE_STRUCT_PAGEVEC
+
+#
# LC_PROG_LINUX
#
# Lustre linux kernel checks
# 5.16
LC_SRC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+ LC_SRC_FOLIO_MEMCG_LOCK
LC_SRC_HAVE_KIOCB_COMPLETE_2ARGS
# 5.17
LC_SRC_HAVE_MNT_IDMAP_ARG
LC_SRC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
LC_SRC_HAVE_U64_CAPABILITY
+ LC_SRC_HAVE_FOLIO_BATCH_REINIT
# 6.5
LC_SRC_HAVE_FILEMAP_SPLICE_READ
LC_SRC_HAVE_ENUM_ITER_PIPE
LC_SRC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+ LC_SRC_HAVE_FOLIO_BATCH
+ LC_SRC_HAVE_STRUCT_PAGEVEC
# kernel patch to extend integrity interface
LC_SRC_BIO_INTEGRITY_PREP_FN
# 5.16
LC_HAVE_SECURITY_DENTRY_INIT_WITH_XATTR_NAME_ARG
+ LC_FOLIO_MEMCG_LOCK
LC_HAVE_KIOCB_COMPLETE_2ARGS
+ LC_FOLIO_MEMCG_LOCK_EXPORTED
LC_EXPORTS_DELETE_FROM_PAGE_CACHE
# 5.17
LC_HAVE_MNT_IDMAP_ARG
LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
LC_HAVE_U64_CAPABILITY
+ LC_HAVE_FOLIO_BATCH_REINIT
# 6.5
LC_HAVE_FILEMAP_SPLICE_READ
LC_HAVE_ENUM_ITER_PIPE
LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+ LC_HAVE_FOLIO_BATCH
+ LC_HAVE_STRUCT_PAGEVEC
# kernel patch to extend integrity interface
LC_BIO_INTEGRITY_PREP_FN
#include <linux/pagevec.h>
#include <libcfs/linux/linux-misc.h>
#include <lustre_dlm.h>
+#include <lustre_compat.h>
struct obd_info;
struct inode;
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct pagevec *);
+ struct folio_batch *);
struct cl_read_ahead {
/* Maximum page index the readahead window will end.
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
-void cl_pagevec_put (const struct lu_env *env,
- struct cl_page *page,
- struct pagevec *pvec);
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+ struct folio_batch *fbatch);
void cl_page_print (const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <linux/pagevec.h>
#include <linux/workqueue.h>
#include <libcfs/linux/linux-fs.h>
#include <obd_support.h>
#define smp_store_mb(var, value) set_mb(var, value)
#endif
-#ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec)
-#else
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
-#endif
-
#ifdef HAVE_D_COUNT
# define ll_d_count(d) d_count(d)
#else
#define ll_xa_unlock_irqrestore(lockp, flags) spin_unlock_irqrestore(lockp, flags)
#endif
-/* Linux commit v5.15-12273-gab2f9d2d3626
- * mm: unexport {,un}lock_page_memcg
- *
- * Note that the functions are still defined or declared breaking
- * the simple approach of just defining the missing functions here
- */
-#ifdef HAVE_LOCK_PAGE_MEMCG
-#define vvp_lock_page_memcg(page) lock_page_memcg((page))
-#define vvp_unlock_page_memcg(page) unlock_page_memcg((page))
-#else
-#define vvp_lock_page_memcg(page)
-#define vvp_unlock_page_memcg(page)
-#endif
-
#ifndef KMEM_CACHE_USERCOPY
#define kmem_cache_create_usercopy(name, size, align, flags, useroffset, \
usersize, ctor) \
#endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */
}
+#ifdef HAVE_FOLIO_BATCH
+# define ll_folio_batch_init(batch, n) folio_batch_init(batch)
+# define fbatch_at(fbatch, f) ((fbatch)->folios[(f)])
+# define fbatch_at_npgs(fbatch, f) folio_nr_pages((fbatch)->folios[(f)])
+# define fbatch_at_pg(fbatch, f, pg) folio_page((fbatch)->folios[(f)], (pg))
+# define folio_batch_add_page(fbatch, page) \
+ folio_batch_add(fbatch, page_folio(page))
+# ifndef HAVE_FOLIO_BATCH_REINIT
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+# endif /* HAVE_FOLIO_BATCH_REINIT */
+
+#else /* !HAVE_FOLIO_BATCH */
+
+# ifdef HAVE_PAGEVEC
+# define folio_batch pagevec
+# endif
+# define folio_batch_init(pvec) pagevec_init(pvec)
+# define folio_batch_reinit(pvec) pagevec_reinit(pvec)
+# define folio_batch_count(pvec) pagevec_count(pvec)
+# define folio_batch_space(pvec) pagevec_space(pvec)
+# define folio_batch_add_page(pvec, page) \
+ pagevec_add(pvec, page)
+# define folio_batch_release(pvec) \
+ pagevec_release(((struct pagevec *)pvec))
+# ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
+# define ll_folio_batch_init(pvec, n) pagevec_init(pvec)
+# else
+# define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n)
+# endif
+# define fbatch_at(pvec, n) ((pvec)->pages[(n)])
+# define fbatch_at_npgs(pvec, n) 1
+# define fbatch_at_pg(pvec, n, pg) ((pvec)->pages[(n)])
+#endif /* HAVE_FOLIO_BATCH */
+
#ifndef HAVE_FLUSH___WORKQUEUE
#define __flush_workqueue(wq) flush_scheduled_work()
#endif
union ldlm_policy_data oti_policy;
struct cl_attr oti_attr;
struct cl_io oti_io;
- struct pagevec oti_pagevec;
+ struct folio_batch oti_fbatch;
void *oti_pvec[OTI_PVEC_SIZE];
/**
* Fields used by cl_lock_discard_pages().
unsigned int (*vvp_account_page_dirtied)(struct page *page,
struct address_space *mapping);
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+ defined(HAVE_KALLSYMS_LOOKUP_NAME)
+void (*vvp_folio_memcg_lock)(struct folio *folio);
+void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#endif
/**
* A mutex serializing calls to vvp_inode_fini() under extreme memory
#endif
#endif
+#if !defined(FOLIO_MEMCG_LOCK_EXPORTED) && defined(HAVE_FOLIO_MEMCG_LOCK) && \
+ defined(HAVE_KALLSYMS_LOOKUP_NAME)
+ vvp_folio_memcg_lock = (void *)
+ cfs_kallsyms_lookup_name("folio_memcg_lock");
+ LASSERT(vvp_folio_memcg_lock);
+
+ vvp_folio_memcg_unlock = (void *)
+ cfs_kallsyms_lookup_name("folio_memcg_unlock");
+ LASSERT(vvp_folio_memcg_unlock);
+#endif
+
return 0;
out_kmem:
struct address_space *mapping);
#endif
+#ifdef HAVE_FOLIO_MEMCG_LOCK
+#ifdef FOLIO_MEMCG_LOCK_EXPORTED
+#define folio_memcg_lock_page(page) folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page) folio_memcg_unlock(page_folio((page)))
+#elif defined(HAVE_KALLSYMS_LOOKUP_NAME)
+/* Use kallsyms_lookup_name to acquire folio_memcg_[un]lock */
+extern void (*vvp_folio_memcg_lock)(struct folio *folio);
+extern void (*vvp_folio_memcg_unlock)(struct folio *folio);
+#define folio_memcg_lock_page(page) \
+ vvp_folio_memcg_lock(page_folio((page)))
+#define folio_memcg_unlock_page(page) \
+ vvp_folio_memcg_unlock(page_folio((page)))
+#endif
+#elif defined HAVE_LOCK_PAGE_MEMCG
+#define folio_memcg_lock_page(page) lock_page_memcg((page))
+#define folio_memcg_unlock_page(page) unlock_page_memcg((page))
+#else
+#define folio_memcg_lock_page(page)
+#define folio_memcg_unlock_page(page)
+#endif
+
extern const struct file_operations vvp_dump_pgcache_file_ops;
#endif /* VVP_INTERNAL_H */
#include "llite_internal.h"
#include "vvp_internal.h"
+#include <lustre_compat.h>
#include <libcfs/linux/linux-misc.h>
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
* Backwards compat for 3.x, 5.x kernels relating to memcg handling
* & rename of radix tree to xarray.
*/
-void vvp_set_pagevec_dirty(struct pagevec *pvec)
+void vvp_set_batch_dirty(struct folio_batch *fbatch)
{
- struct page *page = pvec->pages[0];
- int count = pagevec_count(pvec);
+ struct page *page = fbatch_at_pg(fbatch, 0, 0);
+ int count = folio_batch_count(fbatch);
int i;
+#if !defined(HAVE_FOLIO_BATCH) || defined(HAVE_KALLSYMS_LOOKUP_NAME)
+ int pg, npgs;
+#endif
#ifdef HAVE_KALLSYMS_LOOKUP_NAME
struct address_space *mapping = page->mapping;
unsigned long flags;
unsigned long skip_pages = 0;
+ int pgno;
int dirtied = 0;
#endif
*/
#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
if (!vvp_account_page_dirtied) {
- for (i = 0; i < count; i++)
- __set_page_dirty_nobuffers(pvec->pages[i]);
+ for (i = 0; i < count; i++) {
+#ifdef HAVE_FOLIO_BATCH
+ filemap_dirty_folio(page->mapping, fbatch->folios[i]);
+#else
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
+ __set_page_dirty_nobuffers(page);
+ }
+#endif
+ }
EXIT;
}
#endif
+ /* account_page_dirtied is available directly or via kallsyms */
#ifdef HAVE_KALLSYMS_LOOKUP_NAME
- for (i = 0; i < count; i++) {
- page = pvec->pages[i];
+ for (pgno = i = 0; i < count; i++) {
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
- ClearPageReclaim(page);
+ ClearPageReclaim(page);
- vvp_lock_page_memcg(page);
- if (TestSetPageDirty(page)) {
- /* page is already dirty .. no extra work needed
- * set a flag for the i'th page to be skipped
- */
- vvp_unlock_page_memcg(page);
- skip_pages |= (1 << i);
+ folio_memcg_lock_page(page);
+ if (TestSetPageDirty(page)) {
+ /* page is already dirty .. no extra work needed
+ * set a flag for the i'th page to be skipped
+ */
+ folio_memcg_unlock_page(page);
+ skip_pages |= (1ul << pgno++);
+ LASSERTF(pgno <= BITS_PER_LONG,
+ "Limit exceeded pgno: %d/%d\n", pgno,
+ BITS_PER_LONG);
+ }
}
}
* dirty_nobuffers should be impossible because we hold the page lock.)
* 4. All mappings are the same because i/o is only to one file.
*/
- for (i = 0; i < count; i++) {
- page = pvec->pages[i];
- /* if the i'th page was unlocked above, skip it here */
- if ((skip_pages >> i) & 1)
- continue;
-
- LASSERTF(page->mapping == mapping,
- "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
- page, page->mapping, mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- ll_account_page_dirtied(page, mapping);
- dirtied++;
- vvp_unlock_page_memcg(page);
+ for (pgno = i = 0; i < count; i++) {
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
+ /* if the i'th page was unlocked above, skip it here */
+ if ((skip_pages >> pgno++) & 1)
+ continue;
+
+ LASSERTF(page->mapping == mapping,
+ "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n",
+ page, page->mapping, mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ ll_account_page_dirtied(page, mapping);
+ dirtied++;
+ folio_memcg_unlock_page(page);
+ }
}
ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
}
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
+ struct page *vmpage;
+ struct cl_page *page;
+ int pg, npgs;
int count = 0;
int i = 0;
ENTRY;
- count = pagevec_count(pvec);
+ count = folio_batch_count(fbatch);
LASSERT(count > 0);
for (i = 0; i < count; i++) {
- struct page *vmpage = pvec->pages[i];
- SetPageUptodate(vmpage);
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++)
+ SetPageUptodate(fbatch_at_pg(fbatch, i, pg));
}
- vvp_set_pagevec_dirty(pvec);
+ vvp_set_batch_dirty(fbatch);
for (i = 0; i < count; i++) {
- struct page *vmpage = pvec->pages[i];
- struct cl_page *page = (struct cl_page *) vmpage->private;
- cl_page_disown(env, io, page);
- lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
- cl_page_put(env, page);
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ vmpage = fbatch_at_pg(fbatch, i, pg);
+ page = (struct cl_page *) vmpage->private;
+ cl_page_disown(env, io, page);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+ cl_page_put(env, page);
+ }
}
EXIT;
}
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
- vvp_set_pagevec_dirty(pvec);
+ vvp_set_batch_dirty(fbatch);
}
static int vvp_io_fault_start(const struct lu_env *env,
}
static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
struct cl_object *obj = cp->cp_obj;
unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
LASSERT(vmpage != NULL);
LASSERT((struct cl_page *)vmpage->private != cp);
- if (pvec != NULL) {
- if (!pagevec_add(pvec, vmpage))
- pagevec_release(pvec);
+ if (fbatch != NULL) {
+ if (!folio_batch_add_page(fbatch, vmpage))
+ folio_batch_release(fbatch);
} else {
put_page(vmpage);
}
EXPORT_SYMBOL(cl_page_get);
/**
- * Releases a reference to a page, use the pagevec to release the pages
+ * Releases a reference to a page, use the folio_batch to release the pages
* in batch if provided.
*
- * Users need to do a final pagevec_release() to release any trailing pages.
+ * Users need to do a final folio_batch_release() to release any trailing pages.
*/
-void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
- struct pagevec *pvec)
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+ struct folio_batch *fbatch)
{
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
* Page is no longer reachable by other threads. Tear
* it down.
*/
- cl_page_free(env, page, pvec);
+ cl_page_free(env, page, fbatch);
}
EXIT;
}
-EXPORT_SYMBOL(cl_pagevec_put);
+EXPORT_SYMBOL(cl_batch_put);
/**
- * Releases a reference to a page, wrapper to cl_pagevec_put
+ * Releases a reference to a page, wrapper to cl_batch_put
*
* When last reference is released, page is returned to the cache, unless it
* is in cl_page_state::CPS_FREEING state, in which case it is immediately
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- cl_pagevec_put(env, page, NULL);
+ cl_batch_put(env, page, NULL);
}
EXPORT_SYMBOL(cl_page_put);
struct client_obd *cli = osc_cli(obj);
struct osc_async_page *oap;
struct osc_async_page *tmp;
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits -
PAGE_SHIFT;
io = osc_env_thread_io(env);
io->ci_obj = cl_object_top(osc2cl(obj));
io->ci_ignore_layout = 1;
- pvec = &osc_env_info(env)->oti_pagevec;
- ll_pagevec_init(pvec, 0);
+ fbatch = &osc_env_info(env)->oti_fbatch;
+ ll_folio_batch_init(fbatch, 0);
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc < 0)
GOTO(out, rc);
}
lu_ref_del(&page->cp_reference, "truncate", current);
- cl_pagevec_put(env, page, pvec);
+ cl_batch_put(env, page, fbatch);
--ext->oe_nr_pages;
++nr_pages;
}
- pagevec_release(pvec);
+ folio_batch_release(fbatch);
EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
ext->oe_nr_pages == 0),
struct osc_async_page *oap = &ops->ops_oap;
struct osc_object *osc = oap->oap_obj;
struct client_obd *cli = osc_cli(osc);
- struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
pgoff_t index;
unsigned int tmp;
unsigned int grants = 0;
/* We must not hold a page lock while we do osc_enter_cache()
* or osc_extent_find(), so we must mark dirty & unlock
- * any pages in the write commit pagevec. */
- if (pagevec_count(pvec)) {
- cb(env, io, pvec);
- pagevec_reinit(pvec);
+ * any pages in the write commit folio_batch.
+ */
+ if (folio_batch_count(fbatch)) {
+ cb(env, io, fbatch);
+ folio_batch_reinit(fbatch);
}
if (grants == 0) {
osc_page_gang_cbt cb, void *cbdata)
{
struct osc_page *ops;
- struct pagevec *pagevec;
+ struct folio_batch *fbatch;
void **pvec;
pgoff_t idx;
unsigned int nr;
idx = start;
pvec = osc_env_info(env)->oti_pvec;
- pagevec = &osc_env_info(env)->oti_pagevec;
- ll_pagevec_init(pagevec, 0);
+ fbatch = &osc_env_info(env)->oti_fbatch;
+ ll_folio_batch_init(fbatch, 0);
spin_lock(&osc->oo_tree_lock);
while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
idx, OTI_PVEC_SIZE)) > 0) {
ops = pvec[i];
page = ops->ops_cl.cpl_page;
lu_ref_del(&page->cp_reference, "gang_lookup", current);
- cl_pagevec_put(env, page, pagevec);
+ cl_batch_put(env, page, fbatch);
}
- pagevec_release(pagevec);
+ folio_batch_release(fbatch);
if (nr < OTI_PVEC_SIZE || end_of_region)
break;
struct cl_page_list *qin, int from, int to,
cl_commit_cbt cb)
{
- struct cl_io *io = ios->cis_io;
- struct osc_io *oio = cl2osc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct osc_io *oio = cl2osc_io(env, ios);
struct osc_object *osc = cl2osc(ios->cis_obj);
- struct cl_page *page;
- struct cl_page *last_page;
+ struct cl_page *page;
+ struct cl_page *last_page;
struct osc_page *opg;
- struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
int result = 0;
ENTRY;
}
}
- ll_pagevec_init(pvec, 0);
+ ll_folio_batch_init(fbatch, 0);
while (qin->pl_nr > 0) {
struct osc_async_page *oap;
cl_page_list_del(env, qin, page);
/* if there are no more slots, do the callback & reinit */
- if (pagevec_add(pvec, page->cp_vmpage) == 0) {
- (*cb)(env, io, pvec);
- pagevec_reinit(pvec);
+ if (!folio_batch_add_page(fbatch, page->cp_vmpage)) {
+ (*cb)(env, io, fbatch);
+ folio_batch_reinit(fbatch);
}
}
/* The shrink interval is in seconds, so we can update it once per
osc_update_next_shrink(osc_cli(osc));
- /* Clean up any partially full pagevecs */
- if (pagevec_count(pvec) != 0)
- (*cb)(env, io, pvec);
+ /* Clean up any partially full folio_batches */
+ if (folio_batch_count(fbatch) != 0)
+ (*cb)(env, io, fbatch);
/* Can't access these pages any more. Page can be in transfer and
* complete at any time. */
}
}
-static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
+static void discard_cl_pages(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
{
- struct pagevec *pagevec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
int i;
- ll_pagevec_init(pagevec, 0);
+ ll_folio_batch_init(fbatch, 0);
for (i = 0; i < max_index; i++) {
struct cl_page *page = pvec[i];
cl_page_delete(env, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
- cl_pagevec_put(env, page, pagevec);
+ cl_batch_put(env, page, fbatch);
pvec[i] = NULL;
}
- pagevec_release(pagevec);
+ folio_batch_release(fbatch);
}
/**
spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
index = 0;
cl_io_fini(env, io);
pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
spin_unlock(&cli->cl_lru_list_lock);
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
index = 0;
spin_lock(&cli->cl_lru_list_lock);
spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
cl_io_fini(env, io);
cl_object_put(env, clobj);
struct niobuf_local *lnb, int npages)
{
struct osd_thread_info *oti = osd_oti_get(env);
- struct pagevec pvec;
+ struct folio_batch fbatch;
int i;
- ll_pagevec_init(&pvec, 0);
+ ll_folio_batch_init(&fbatch, 0);
for (i = 0; i < npages; i++) {
struct page *page = lnb[i].lnb_page;
} else {
if (lnb[i].lnb_locked)
unlock_page(page);
- if (pagevec_add(&pvec, page) == 0)
- pagevec_release(&pvec);
+ if (folio_batch_add_page(&fbatch, page) == 0)
+ folio_batch_release(&fbatch);
}
lnb[i].lnb_page = NULL;
LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used);
- /* Release any partial pagevec */
- pagevec_release(&pvec);
+ /* Release any partial folio_batch */
+ folio_batch_release(&fbatch);
RETURN(0);
}