]) # LC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX
#
+# LC_HAVE_FILEMAP_GET_FOLIOS
+#
+# Linux commit v5.19-rc3-342-gbe0ced5e9cb8
+# filemap: Add filemap_get_folios()
+#
+AC_DEFUN([LC_SRC_HAVE_FILEMAP_GET_FOLIOS], [
+ LB2_LINUX_TEST_SRC([filemap_get_folios], [
+ #include <linux/pagemap.h>
+ ],[
+ struct address_space *m = NULL;
+ pgoff_t start = 0;
+ struct folio_batch *fbatch = NULL;
+ (void)filemap_get_folios(m, &start, ULONG_MAX, fbatch);
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FILEMAP_GET_FOLIOS], [
+ AC_MSG_CHECKING([if filemap_get_folios() exists])
+ LB2_LINUX_TEST_RESULT([filemap_get_folios], [
+ AC_DEFINE(HAVE_FILEMAP_GET_FOLIOS, 1,
+ [filemap_get_folios() exists])
+ ])
+]) # LC_HAVE_FILEMAP_GET_FOLIOS
+
+#
# LC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO
#
# Linux commit v5.19-rc3-392-g5490da4f06d1
])
AC_DEFUN([LC_HAVE_FILEMAP_GET_FOLIOS_CONTIG], [
AC_MSG_CHECKING([if filemap_get_folios_contig() is available])
- LB2_LINUX_TEST_RESULT([filldir_ctx_return_bool], [
+ LB2_LINUX_TEST_RESULT([filemap_get_folios_contig], [
AC_DEFINE(HAVE_FILEMAP_GET_FOLIOS_CONTIG, 1,
[filemap_get_folios_contig() is available])
])
]) # LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
#
+# LC_HAVE_FOLIO_BATCH_REINIT
+#
+# linux kernel v6.2-rc4-254-g811561288397
+# mm: pagevec: add folio_batch_reinit()
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH_REINIT], [
+ LB2_LINUX_TEST_SRC([folio_batch_reinit_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct folio_batch fbatch __attribute__ ((unused));
+
+ folio_batch_reinit(&fbatch);
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH_REINIT], [
+ AC_MSG_CHECKING([if 'folio_batch_reinit' is available])
+ LB2_LINUX_TEST_RESULT([folio_batch_reinit_exists], [
+ AC_DEFINE(HAVE_FOLIO_BATCH_REINIT, 1,
+ ['folio_batch_reinit' is available])
+ ])
+]) # LC_HAVE_FOLIO_BATCH_REINIT
+
+#
# LC_HAVE_IOV_ITER_IOVEC
#
# linux kernel v6.3-rc4-32-g6eb203e1a868
]) # LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
#
+# LC_HAVE_FOLIO_BATCH
+#
+# linux kernel v5.16-rc4-36-g10331795fb79
+# pagevec: Add folio_batch
+#
+AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH], [
+ LB2_LINUX_TEST_SRC([struct_folio_batch_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct folio_batch fbatch __attribute__ ((unused));
+
+ folio_batch_init(&fbatch);
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_FOLIO_BATCH], [
+ AC_MSG_CHECKING([if 'struct folio_batch' is available])
+ LB2_LINUX_TEST_RESULT([struct_folio_batch_exists], [
+ AC_DEFINE(HAVE_FOLIO_BATCH, 1,
+ ['struct folio_batch' is available])
+ ])
+]) # LC_HAVE_FOLIO_BATCH
+
+#
+# LC_HAVE_STRUCT_PAGEVEC
+#
+# linux kernel v6.4-rc4-438-g1e0877d58b1e
+# mm: remove struct pagevec
+#
+AC_DEFUN([LC_SRC_HAVE_STRUCT_PAGEVEC], [
+ LB2_LINUX_TEST_SRC([struct_pagevec_exists], [
+ #include <linux/pagevec.h>
+ ],[
+ struct pagevec *pvec = NULL;
+ (void)pvec;
+ ],[-Werror])
+])
+AC_DEFUN([LC_HAVE_STRUCT_PAGEVEC], [
+ AC_MSG_CHECKING([if 'struct pagevec' is available])
+ LB2_LINUX_TEST_RESULT([struct_pagevec_exists], [
+ AC_DEFINE(HAVE_PAGEVEC, 1,
+ ['struct pagevec' is available])
+ ])
+]) # LC_HAVE_STRUCT_PAGEVEC
+
+#
# LC_PROG_LINUX
#
# Lustre linux kernel checks
LC_SRC_HAVE_ADDRESS_SPACE_OPERATIONS_RELEASE_FOLIO
LC_SRC_HAVE_LSMCONTEXT_INIT
LC_SRC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX
+ LC_SRC_HAVE_FILEMAP_GET_FOLIOS
# 6.0
LC_SRC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO
LC_SRC_HAVE_U64_CAPABILITY
LC_SRC_HAVE_MNT_IDMAP_ARG
LC_SRC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
+ LC_SRC_HAVE_FOLIO_BATCH_REINIT
# 6.4
LC_SRC_HAVE_IOV_ITER_IOVEC
LC_SRC_HAVE_FILEMAP_SPLICE_READ
LC_SRC_HAVE_ENUM_ITER_PIPE
LC_SRC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+ LC_SRC_HAVE_FOLIO_BATCH
+ LC_SRC_HAVE_STRUCT_PAGEVEC
# kernel patch to extend integrity interface
LC_SRC_BIO_INTEGRITY_PREP_FN
LC_HAVE_ADDRESS_SPACE_OPERATIONS_RELEASE_FOLIO
LC_HAVE_LSMCONTEXT_INIT
LC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX
+ LC_HAVE_FILEMAP_GET_FOLIOS
# 6.0
LC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO
LC_HAVE_U64_CAPABILITY
LC_HAVE_MNT_IDMAP_ARG
LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK
+ LC_HAVE_FOLIO_BATCH_REINIT
# 6.4
LC_HAVE_IOV_ITER_IOVEC
LC_HAVE_FILEMAP_SPLICE_READ
LC_HAVE_ENUM_ITER_PIPE
LC_HAVE_GET_USER_PAGES_WITHOUT_VMA
+ LC_HAVE_FOLIO_BATCH
+ LC_HAVE_STRUCT_PAGEVEC
# kernel patch to extend integrity interface
LC_BIO_INTEGRITY_PREP_FN
* See comments below for the description of i/o, page, and dlm-locking
* design.
*
- * @{
*/
/*
#include <linux/pagevec.h>
#include <libcfs/linux/linux-misc.h>
#include <lustre_dlm.h>
+#include <lustre_compat.h>
struct obd_info;
struct inode;
struct lu_device cd_lu_dev;
};
-/** \addtogroup cl_object cl_object
- * @{ */
+/* cl_object */
/**
* "Data attributes" of cl_object. Data attributes can be updated
* independently for a sub-object, and top-object's attributes are calculated
};
enum {
- /** configure layout, set up a new stripe, must be called while
- * holding layout lock. */
+ /** configure layout, new stripe, must must be holding layout lock. */
OBJECT_CONF_SET = 0,
- /** invalidate the current stripe configuration due to losing
- * layout lock. */
+ /** invalidate the current stripe config when losing layout lock. */
OBJECT_CONF_INVALIDATE = 1,
- /** wait for old layout to go away so that new layout can be
- * set up. */
+ /** wait for old layout to go away so that new layout can be set up. */
OBJECT_CONF_WAIT = 2
};
* cl_object_operations::coo_attr_get() is used.
*/
int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ const struct cl_attr *attr, unsigned int valid);
/**
* Mark the inode dirty. By this way, the inode will add into the
* writeback list of the corresponding @bdi_writeback, and then it will
* Extended header for client object.
*/
struct cl_object_header {
- /** Standard lu_object_header. cl_object::co_lu::lo_header points
- * here. */
+ /* Standard lu_object_header. cl_object::co_lu::lo_header points here.*/
struct lu_object_header coh_lu;
/**
&(obj)->co_lu.lo_header->loh_layers,\
co_lu.lo_linkage)
-/** @} cl_object */
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
-/** \addtogroup cl_page cl_page
- * @{ */
-
/** \struct cl_page
* Layered client page.
*
};
enum cl_page_type {
- /** Host page, the page is from the host inode which the cl_page
- * belongs to. */
+ /** Host page, the page is from the host inode which the cl_page
+ * belongs to.
+ */
CPT_CACHEABLE = 1,
- /** Transient page, the transient cl_page is used to bind a cl_page
- * to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO and lockless IO. */
+ /** Transient page, the transient cl_page is used to bind a cl_page
+ * to vmpage which is not belonging to the same object of cl_page.
+ * it is used in DirectIO and lockless IO.
+ */
CPT_TRANSIENT,
CPT_NR
};
*
* Transfer methods.
*
- * @{
*/
/**
* Request type dependent vector of operations.
*
* \see cl_page_clip()
*/
- void (*cpo_clip)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int from, int to);
+ void (*cpo_clip)(const struct lu_env *env,
+ const struct cl_page_slice *slice, int from, int to);
/**
* Write out a page by kernel. This is only called by ll_writepage
* right now.
int (*cpo_flush)(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *io);
- /** @} transfer */
};
/**
*/
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
-/** @} cl_page */
-
-/** \addtogroup cl_lock cl_lock
- * @{ */
/** \struct cl_lock
*
* Extent locking on the client.
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
- /** Object slice corresponding to this lock slice. Immutable after
- * creation. */
+ /** Object slice corresponding to this lock slice. Immutable after
+ * creation.
+ */
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
* \see lov_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
- /** @{ */
/**
* Attempts to enqueue the lock. Called top-to-bottom.
*
*/
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
- /** @} */
/**
* Destructor. Frees resources and the slice.
*
/**
* Optional debugging helper. Prints given lock slice.
*/
- int (*clo_print)(const struct lu_env *env,
- void *cookie, lu_printer_t p,
- const struct cl_lock_slice *slice);
+ int (*clo_print)(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice);
};
#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
LBUG(); \
} while (0)
-/** @} cl_lock */
-
/** \addtogroup cl_page_list cl_page_list
* Page list used to perform collective operations on a group of pages.
*
*
* \todo XXX concurrency control.
*
- * @{
*/
struct cl_page_list {
- unsigned pl_nr;
+ unsigned int pl_nr;
struct list_head pl_pages;
};
struct cl_page_list c2_qout;
};
-/** @} cl_page_list */
-
-/** \addtogroup cl_io cl_io
- * @{ */
/** \struct cl_io
* I/O
*
};
typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
- struct pagevec *);
+ struct folio_batch *);
struct cl_read_ahead {
/* Maximum page index the readahead window will end.
* This is determined DLM lock coverage, RPC and stripe boundary.
- * cra_end is included. */
+ * cra_end is included.
+ */
pgoff_t cra_end_idx;
/* optimal RPC size for this read, by pages */
unsigned long cra_rpc_pages;
/* Release callback. If readahead holds resources underneath, this
- * function should be called to release it. */
+ * function should be called to release it.
+ */
void (*cra_release)(const struct lu_env *env,
struct cl_read_ahead *ra);
* executed on them.
*/
int (*cio_submit)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- enum cl_req_type crt,
- struct cl_2queue *queue);
+ const struct cl_io_slice *slice,
+ enum cl_req_type crt, struct cl_2queue *queue);
/**
* Queue async page for write.
* The difference between cio_submit and cio_queue is that
/** discard all of dirty pages in a specific file range */
CL_FSYNC_DISCARD = 2,
/** start writeback and make sure they have reached storage before
- * return. OST_SYNC RPC must be issued and finished */
+ * return. OST_SYNC RPC must be issued and finished
+ */
CL_FSYNC_ALL = 3,
/** start writeback, thus the kernel can reclaim some memory */
CL_FSYNC_RECLAIM = 4,
* How many times the read has retried before this one.
* Set by the top level and consumed by the LOV.
*/
- unsigned ci_ndelay_tried;
+ unsigned int ci_ndelay_tried;
/**
* Designated mirror index for this I/O.
*/
- unsigned ci_designated_mirror;
+ unsigned int ci_designated_mirror;
/**
* Number of pages owned by this IO. For invariant checking.
*/
- unsigned ci_owned_nr;
+ unsigned int ci_owned_nr;
/**
* Range of write intent. Valid if ci_need_write_intent is set.
*/
struct lu_extent ci_write_intent;
};
-/** @} cl_io */
-
/**
* Per-transfer attributes.
*/
/** how many entities are in the cache right now */
CS_total,
/** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
+ * evicted) right now
+ */
CS_busy,
/** how many entities were created at all */
CS_create,
*
* Type conversion and accessory functions.
*/
-/** @{ */
static inline struct cl_site *lu2cl_site(const struct lu_site *site)
{
const struct cl_lock_operations *ops);
void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
struct cl_object *obj, const struct cl_io_operations *ops);
-/** @} helpers */
-/** \defgroup cl_object cl_object
- * @{ */
struct cl_object *cl_object_top (struct cl_object *o);
struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
const struct lu_fid *fid,
int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ const struct cl_attr *attr, unsigned int valid);
void cl_object_dirty_for_sync(const struct lu_env *env, struct cl_object *obj);
int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
static inline int cl_object_refc(struct cl_object *clob)
{
struct lu_object_header *header = clob->co_lu.lo_header;
+
return atomic_read(&header->loh_ref);
}
-/** @} cl_object */
-
-/** \defgroup cl_page cl_page
- * @{ */
+/* cl_page */
struct cl_page *cl_page_find (const struct lu_env *env,
struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
void cl_page_get (struct cl_page *page);
void cl_page_put (const struct lu_env *env,
struct cl_page *page);
-void cl_pagevec_put (const struct lu_env *env,
+void cl_batch_put (const struct lu_env *env,
struct cl_page *page,
- struct pagevec *pvec);
+ struct folio_batch *fbatch);
void cl_page_print (const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
*
* Functions dealing with the ownership of page by io.
*/
-/** @{ */
int cl_page_own (const struct lu_env *env,
struct cl_io *io, struct cl_page *page);
struct cl_io *io, struct cl_page *page);
int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
-/** @} ownership */
-
/**
* \name transfer
*
* Functions dealing with the preparation of a page for a transfer, and
* tracking transfer state.
*/
-/** @{ */
int cl_page_prep (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg, enum cl_req_type crt);
void cl_page_completion (const struct lu_env *env,
int cl_page_flush (const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
-/** @} transfer */
-
-
/**
* \name helper routines
* Functions to discard, delete and export a cl_page.
*/
-/** @{ */
void cl_page_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
size_t cl_page_size(const struct cl_object *obj);
-
void cl_lock_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_lock *lock);
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_lock_descr *descr);
-/* @} helper */
/**
* Data structure managing a client's cached pages. A count of
void cl_cache_incref(struct cl_client_cache *cache);
void cl_cache_decref(struct cl_client_cache *cache);
-/** @} cl_page */
-
-/** \defgroup cl_lock cl_lock
- * @{ */
+/* cl_lock */
int cl_lock_request(const struct lu_env *env, struct cl_io *io,
struct cl_lock *lock);
int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
struct cl_lock *lock, struct cl_sync_io *anchor);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-/** @} cl_lock */
-
-/** \defgroup cl_io cl_io
- * @{ */
-
+/* cl_io */
int cl_io_init (const struct lu_env *env, struct cl_io *io,
enum cl_io_type iot, struct cl_object *obj);
int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
#define CL_IO_SLICE_CLEAN(obj, base) memset_startat(obj, 0, base)
-/** @} cl_io */
-
-/** \defgroup cl_page_list cl_page_list
- * @{ */
-
+/* cl_page_list */
/**
* Last page in the page list.
*/
void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
-/** @} cl_page_list */
-
void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
struct cl_req_attr *attr);
-/** \defgroup cl_sync_io cl_sync_io
- * @{ */
-
+/* cl_sync_io */
struct cl_sync_io;
struct cl_dio_aio;
struct cl_sub_dio;
void ll_release_user_pages(struct page **pages, int npages);
-/** @} cl_sync_io */
-
/** \defgroup cl_env cl_env
*
* lu_env handling for a client.
* longer used environments instead of destroying them;
*
* \see lu_env, lu_context, lu_context_key
- * @{ */
+ */
+/* cl_env */
struct lu_env *cl_env_get(__u16 *refcheck);
struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags);
void cl_env_put(struct lu_env *env, __u16 *refcheck);
-unsigned cl_env_cache_purge(unsigned nr);
+unsigned int cl_env_cache_purge(unsigned int nr);
struct lu_env *cl_env_percpu_get(void);
void cl_env_percpu_put(struct lu_env *env);
-/** @} cl_env */
-
/*
* Misc
*/
struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
struct lu_device_type *ldt,
struct lu_device *next);
-/** @} clio */
int cl_global_init(void);
void cl_global_fini(void);
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <linux/pagevec.h>
#include <libcfs/linux/linux-fs.h>
#include <obd_support.h>
#define smp_store_mb(var, value) set_mb(var, value)
#endif
-#ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec)
-#else
-#define ll_pagevec_init(pvec, n) pagevec_init(pvec, n)
-#endif
-
#ifdef HAVE_D_COUNT
# define ll_d_count(d) d_count(d)
#else
#endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */
}
+#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS)
+# define ll_folio_batch_init(batch, n) folio_batch_init(batch)
+# define ll_filemap_get_folios(m, s, e, fbatch) \
+ filemap_get_folios(m, &s, e, fbatch)
+# define fbatch_at(fbatch, f) ((fbatch)->folios[(f)])
+# define fbatch_at_npgs(fbatch, f) folio_nr_pages((fbatch)->folios[(f)])
+# define fbatch_at_pg(fbatch, f, pg) folio_page((fbatch)->folios[(f)], (pg))
+# define folio_batch_add_page(fbatch, page) \
+ folio_batch_add(fbatch, page_folio(page))
+# ifndef HAVE_FOLIO_BATCH_REINIT
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+# endif /* HAVE_FOLIO_BATCH_REINIT */
+
+#else /* !HAVE_FOLIO_BATCH && !HAVE_FILEMAP_GET_FOLIOS */
+
+# ifdef HAVE_PAGEVEC
+# define folio_batch pagevec
+# endif
+# define folio_batch_init(pvec) pagevec_init(pvec)
+# define folio_batch_reinit(pvec) pagevec_reinit(pvec)
+# define folio_batch_count(pvec) pagevec_count(pvec)
+# define folio_batch_space(pvec) pagevec_space(pvec)
+# define folio_batch_add_page(pvec, page) \
+ pagevec_add(pvec, page)
+# define folio_batch_release(pvec) \
+ pagevec_release(((struct pagevec *)pvec))
+# ifdef HAVE_PAGEVEC_INIT_ONE_PARAM
+# define ll_folio_batch_init(pvec, n) pagevec_init(pvec)
+# else
+# define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n)
+# endif
+#ifdef HAVE_PAGEVEC_LOOKUP_THREE_PARAM
+# define ll_filemap_get_folios(m, s, e, pvec) \
+ pagevec_lookup(pvec, m, &s)
+#else
+# define ll_filemap_get_folios(m, s, e, pvec) \
+ pagevec_lookup(pvec, m, s, PAGEVEC_SIZE)
+#endif
+# define fbatch_at(pvec, n) ((pvec)->pages[(n)])
+# define fbatch_at_npgs(pvec, n) 1
+# define fbatch_at_pg(pvec, n, pg) ((pvec)->pages[(n)])
+#endif /* HAVE_FOLIO_BATCH && HAVE_FILEMAP_GET_FOLIOS */
+
#endif /* _LUSTRE_COMPAT_H */
union ldlm_policy_data oti_policy;
struct cl_attr oti_attr;
struct cl_io oti_io;
- struct pagevec oti_pagevec;
+ struct folio_batch oti_fbatch;
void *oti_pvec[OTI_PVEC_SIZE];
/**
* Fields used by cl_lock_discard_pages().
struct inode *pcc_inode)
{
#ifdef HAVE_ADD_TO_PAGE_CACHE_LOCKED
- struct pagevec pvec;
+ struct folio_batch fbatch;
pgoff_t index = 0;
- int nr_pages;
+ unsigned nr;
int rc = 0;
- ll_pagevec_init(&pvec, 0);
+ ll_folio_batch_init(&fbatch, 0);
for ( ; ; ) {
struct page *page;
int i;
-#ifdef HAVE_PAGEVEC_LOOKUP_THREE_PARAM
- nr_pages = pagevec_lookup(&pvec, pcc_inode->i_mapping, &index);
-#else
- nr_pages = pagevec_lookup(&pvec, pcc_inode->i_mapping, index,
- PAGEVEC_SIZE);
-#endif
- if (nr_pages <= 0)
+ nr = ll_filemap_get_folios(pcc_inode->i_mapping,
+ index, ~0UL, &fbatch);
+ if (nr == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- page = pvec.pages[i];
+ for (i = 0; i < nr; i++) {
+#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS)
+ page = &fbatch.folios[i]->page;
+#else
+ page = fbatch.pages[i];
+#endif
lock_page(page);
wait_on_page_writeback(page);
page->index, GFP_KERNEL);
if (rc) {
unlock_page(page);
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
return rc;
}
}
index = page->index + 1;
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
}
#include "llite_internal.h"
#include "vvp_internal.h"
+#include <lustre_compat.h>
#include <libcfs/linux/linux-misc.h>
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
* Backwards compat for 3.x, 5.x kernels relating to memcg handling
* & rename of radix tree to xarray.
*/
-void vvp_set_pagevec_dirty(struct pagevec *pvec)
+void vvp_set_batch_dirty(struct folio_batch *fbatch)
{
- struct page *page = pvec->pages[0];
- int count = pagevec_count(pvec);
+ struct page *page = fbatch_at_pg(fbatch, 0, 0);
+ int count = folio_batch_count(fbatch);
int i;
+#if !defined(HAVE_FILEMAP_GET_FOLIOS) || defined(HAVE_KALLSYMS_LOOKUP_NAME)
+ int pg, npgs;
+#endif
#ifdef HAVE_KALLSYMS_LOOKUP_NAME
struct address_space *mapping = page->mapping;
unsigned long flags;
unsigned long skip_pages = 0;
+ int pgno;
int dirtied = 0;
#endif
BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG);
LASSERTF(page->mapping,
- "mapping must be set. page %p, page->private (cl_page) %p\n",
+ "mapping must be set. page %px, page->private (cl_page) %px\n",
page, (void *) page->private);
/*
*/
#ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
if (!vvp_account_page_dirtied) {
- for (i = 0; i < count; i++)
- __set_page_dirty_nobuffers(pvec->pages[i]);
+ for (i = 0; i < count; i++) {
+#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS)
+ filemap_dirty_folio(page->mapping, fbatch->folios[i]);
+#else
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
+ __set_page_dirty_nobuffers(page);
+ }
+#endif
+ }
EXIT;
}
#endif
+ /* account_page_dirtied is available directly or via kallsyms */
#ifdef HAVE_KALLSYMS_LOOKUP_NAME
- for (i = 0; i < count; i++) {
- page = pvec->pages[i];
+ for (pgno = i = 0; i < count; i++) {
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
- ClearPageReclaim(page);
+ ClearPageReclaim(page);
- vvp_lock_page_memcg(page);
- if (TestSetPageDirty(page)) {
- /* page is already dirty .. no extra work needed
- * set a flag for the i'th page to be skipped
- */
- vvp_unlock_page_memcg(page);
- skip_pages |= (1 << i);
+ vvp_lock_page_memcg(page);
+ if (TestSetPageDirty(page)) {
+ /* page is already dirty .. no extra work needed
+ * set a flag for the i'th page to be skipped
+ */
+ vvp_unlock_page_memcg(page);
+ skip_pages |= (1ul << pgno++);
+ LASSERTF(pgno <= BITS_PER_LONG,
+ "Limit exceeded pgno: %d/%d\n", pgno,
+ BITS_PER_LONG);
+ }
}
}
* dirty_nobuffers should be impossible because we hold the page lock.)
* 4. All mappings are the same because i/o is only to one file.
*/
- for (i = 0; i < count; i++) {
- page = pvec->pages[i];
- /* if the i'th page was unlocked above, skip it here */
- if ((skip_pages >> i) & 1)
- continue;
-
- LASSERTF(page->mapping == mapping,
- "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n",
- page, page->mapping, mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- ll_account_page_dirtied(page, mapping);
- dirtied++;
- vvp_unlock_page_memcg(page);
+ for (pgno = i = 0; i < count; i++) {
+ npgs = fbatch_at_npgs(fbatch, f);
+ for (pg = 0; pg < npgs; pg++) {
+ page = fbatch_at_pg(fbatch, i, pg);
+ /* if the i'th page was unlocked above, skip it here */
+ if ((skip_pages >> pgno++) & 1)
+ continue;
+
+ LASSERTF(page->mapping == mapping,
+ "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n",
+ page, page->mapping, mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ ll_account_page_dirtied(page, mapping);
+ dirtied++;
+ vvp_unlock_page_memcg(page);
+ }
}
ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
}
static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
+ struct page *vmpage;
+ struct cl_page *page;
+ int pg, npgs;
int count = 0;
int i = 0;
ENTRY;
- count = pagevec_count(pvec);
+ count = folio_batch_count(fbatch);
LASSERT(count > 0);
for (i = 0; i < count; i++) {
- struct page *vmpage = pvec->pages[i];
- SetPageUptodate(vmpage);
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++)
+ SetPageUptodate(fbatch_at_pg(fbatch, i, pg));
}
- vvp_set_pagevec_dirty(pvec);
+ vvp_set_batch_dirty(fbatch);
for (i = 0; i < count; i++) {
- struct page *vmpage = pvec->pages[i];
- struct cl_page *page = (struct cl_page *) vmpage->private;
- cl_page_disown(env, io, page);
- lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
- cl_page_put(env, page);
+ npgs = fbatch_at_npgs(fbatch, i);
+ for (pg = 0; pg < npgs; pg++) {
+ vmpage = fbatch_at_pg(fbatch, i, pg);
+ page = (struct cl_page *) vmpage->private;
+ cl_page_disown(env, io, page);
+ lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+ cl_page_put(env, page);
+ }
}
EXIT;
}
static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
- vvp_set_pagevec_dirty(pvec);
+ vvp_set_batch_dirty(fbatch);
}
static int vvp_io_fault_start(const struct lu_env *env,
}
static void cl_page_free(const struct lu_env *env, struct cl_page *cp,
- struct pagevec *pvec)
+ struct folio_batch *fbatch)
{
struct cl_object *obj = cp->cp_obj;
unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize;
LASSERT(vmpage != NULL);
LASSERT((struct cl_page *)vmpage->private != cp);
- if (pvec != NULL) {
- if (!pagevec_add(pvec, vmpage))
- pagevec_release(pvec);
+ if (fbatch != NULL) {
+ if (!folio_batch_add_page(fbatch, vmpage))
+ folio_batch_release(fbatch);
} else {
put_page(vmpage);
}
EXPORT_SYMBOL(cl_page_get);
/**
- * Releases a reference to a page, use the pagevec to release the pages
+ * Releases a reference to a page, use the folio_batch to release the pages
* in batch if provided.
*
- * Users need to do a final pagevec_release() to release any trailing pages.
+ * Users need to do a final folio_batch_release() to release any trailing pages.
*/
-void cl_pagevec_put(const struct lu_env *env, struct cl_page *page,
- struct pagevec *pvec)
+void cl_batch_put(const struct lu_env *env, struct cl_page *page,
+ struct folio_batch *fbatch)
{
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
* Page is no longer reachable by other threads. Tear
* it down.
*/
- cl_page_free(env, page, pvec);
+ cl_page_free(env, page, fbatch);
}
EXIT;
}
-EXPORT_SYMBOL(cl_pagevec_put);
+EXPORT_SYMBOL(cl_batch_put);
/**
- * Releases a reference to a page, wrapper to cl_pagevec_put
+ * Releases a reference to a page, wrapper to cl_batch_put
*
* When last reference is released, page is returned to the cache, unless it
* is in cl_page_state::CPS_FREEING state, in which case it is immediately
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- cl_pagevec_put(env, page, NULL);
+ cl_batch_put(env, page, NULL);
}
EXPORT_SYMBOL(cl_page_put);
struct client_obd *cli = osc_cli(obj);
struct osc_async_page *oap;
struct osc_async_page *tmp;
- struct pagevec *pvec;
+ struct folio_batch *fbatch;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits -
PAGE_SHIFT;
io = osc_env_thread_io(env);
io->ci_obj = cl_object_top(osc2cl(obj));
io->ci_ignore_layout = 1;
- pvec = &osc_env_info(env)->oti_pagevec;
- ll_pagevec_init(pvec, 0);
+ fbatch = &osc_env_info(env)->oti_fbatch;
+ ll_folio_batch_init(fbatch, 0);
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc < 0)
GOTO(out, rc);
}
lu_ref_del(&page->cp_reference, "truncate", current);
- cl_pagevec_put(env, page, pvec);
+ cl_batch_put(env, page, fbatch);
--ext->oe_nr_pages;
++nr_pages;
}
- pagevec_release(pvec);
+ folio_batch_release(fbatch);
EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
ext->oe_nr_pages == 0),
struct osc_async_page *oap = &ops->ops_oap;
struct client_obd *cli = oap->oap_cli;
struct osc_object *osc = oap->oap_obj;
- struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
pgoff_t index;
unsigned int tmp;
unsigned int grants = 0;
/* We must not hold a page lock while we do osc_enter_cache()
* or osc_extent_find(), so we must mark dirty & unlock
- * any pages in the write commit pagevec. */
- if (pagevec_count(pvec)) {
- cb(env, io, pvec);
- pagevec_reinit(pvec);
+ * any pages in the write commit folio_batch.
+ */
+ if (folio_batch_count(fbatch)) {
+ cb(env, io, fbatch);
+ folio_batch_reinit(fbatch);
}
if (grants == 0) {
osc_page_gang_cbt cb, void *cbdata)
{
struct osc_page *ops;
- struct pagevec *pagevec;
+ struct folio_batch *fbatch;
void **pvec;
pgoff_t idx;
unsigned int nr;
idx = start;
pvec = osc_env_info(env)->oti_pvec;
- pagevec = &osc_env_info(env)->oti_pagevec;
- ll_pagevec_init(pagevec, 0);
+ fbatch = &osc_env_info(env)->oti_fbatch;
+ ll_folio_batch_init(fbatch, 0);
spin_lock(&osc->oo_tree_lock);
while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
idx, OTI_PVEC_SIZE)) > 0) {
ops = pvec[i];
page = ops->ops_cl.cpl_page;
lu_ref_del(&page->cp_reference, "gang_lookup", current);
- cl_pagevec_put(env, page, pagevec);
+ cl_batch_put(env, page, fbatch);
}
- pagevec_release(pagevec);
+ folio_batch_release(fbatch);
if (nr < OTI_PVEC_SIZE || end_of_region)
break;
struct cl_page_list *qin, int from, int to,
cl_commit_cbt cb)
{
- struct cl_io *io = ios->cis_io;
- struct osc_io *oio = cl2osc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct osc_io *oio = cl2osc_io(env, ios);
struct osc_object *osc = cl2osc(ios->cis_obj);
- struct cl_page *page;
- struct cl_page *last_page;
+ struct cl_page *page;
+ struct cl_page *last_page;
struct osc_page *opg;
- struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
int result = 0;
ENTRY;
}
}
- ll_pagevec_init(pvec, 0);
+ ll_folio_batch_init(fbatch, 0);
while (qin->pl_nr > 0) {
struct osc_async_page *oap;
cl_page_list_del(env, qin, page);
/* if there are no more slots, do the callback & reinit */
- if (pagevec_add(pvec, page->cp_vmpage) == 0) {
- (*cb)(env, io, pvec);
- pagevec_reinit(pvec);
+ if (!folio_batch_add_page(fbatch, page->cp_vmpage)) {
+ (*cb)(env, io, fbatch);
+ folio_batch_reinit(fbatch);
}
}
/* The shrink interval is in seconds, so we can update it once per
osc_update_next_shrink(osc_cli(osc));
- /* Clean up any partially full pagevecs */
- if (pagevec_count(pvec) != 0)
- (*cb)(env, io, pvec);
+ /* Clean up any partially full folio_batches */
+ if (folio_batch_count(fbatch) != 0)
+ (*cb)(env, io, fbatch);
/* Can't access these pages any more. Page can be in transfer and
* complete at any time. */
}
}
-static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
+static void discard_cl_pages(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
{
- struct pagevec *pagevec = &osc_env_info(env)->oti_pagevec;
+ struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch;
int i;
- ll_pagevec_init(pagevec, 0);
+ ll_folio_batch_init(fbatch, 0);
for (i = 0; i < max_index; i++) {
struct cl_page *page = pvec[i];
cl_page_delete(env, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
- cl_pagevec_put(env, page, pagevec);
+ cl_batch_put(env, page, fbatch);
pvec[i] = NULL;
}
- pagevec_release(pagevec);
+ folio_batch_release(fbatch);
}
/**
spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
index = 0;
cl_io_fini(env, io);
pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
spin_unlock(&cli->cl_lru_list_lock);
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
index = 0;
spin_lock(&cli->cl_lru_list_lock);
spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
- discard_pagevec(env, io, pvec, index);
+ discard_cl_pages(env, io, pvec, index);
cl_io_fini(env, io);
cl_object_put(env, clobj);
struct niobuf_local *lnb, int npages)
{
struct osd_thread_info *oti = osd_oti_get(env);
- struct pagevec pvec;
+ struct folio_batch fbatch;
int i;
- ll_pagevec_init(&pvec, 0);
+ ll_folio_batch_init(&fbatch, 0);
for (i = 0; i < npages; i++) {
struct page *page = lnb[i].lnb_page;
} else {
if (lnb[i].lnb_locked)
unlock_page(page);
- if (pagevec_add(&pvec, page) == 0)
- pagevec_release(&pvec);
+ if (folio_batch_add_page(&fbatch, page) == 0)
+ folio_batch_release(&fbatch);
}
lnb[i].lnb_page = NULL;
}
- /* Release any partial pagevec */
- pagevec_release(&pvec);
+ /* Release any partial folio_batch */
+ folio_batch_release(&fbatch);
RETURN(0);
}