From da6abb9e18436edf7ce27089904975f35263f689 Mon Sep 17 00:00:00 2001 From: Shaun Tancheff Date: Wed, 6 Mar 2024 11:33:09 -0800 Subject: [PATCH] LU-17081 build: Prefer folio_batch to pagevec Linux commit v5.16-rc4-36-g10331795fb79 pagevec: Add folio_batch Linux commit v6.2-rc4-254-g811561288397 mm: pagevec: add folio_batch_reinit() Linux commit v6.4-rc4-438-g1e0877d58b1e mm: remove struct pagevec Use folio_batch and provide wrappers for older kernels to use pagevec handling, conditionally provide a folio_batch_reinit Add macros to ease adding pages to folio_batch(es) as well as unwinding batches of struct folio where struct page is needed. Lustre-change: https://review.whamcloud.com/52259 Lustre-commit: TBD (from 81c567481b7be1d9d4655a47027918f7a8d16ff8) HPE-bug-id: LUS-11811 Signed-off-by: Shaun Tancheff Change-Id: Ie70e4851df00a73f194aaa6631678b54b5d128a1 Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/54074 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Yang Sheng Reviewed-by: Andreas Dilger --- lustre/autoconf/lustre-core.m4 | 102 ++++++++++++++++++++++++++- lustre/include/cl_object.h | 154 +++++++++++++---------------------------- lustre/include/lustre_compat.h | 53 ++++++++++++-- lustre/include/lustre_osc.h | 2 +- lustre/llite/pcc.c | 28 ++++---- lustre/llite/vvp_io.c | 113 +++++++++++++++++++----------- lustre/obdclass/cl_page.c | 24 +++---- lustre/osc/osc_cache.c | 31 +++++---- lustre/osc/osc_io.c | 24 +++---- lustre/osc/osc_page.c | 18 ++--- lustre/osd-ldiskfs/osd_io.c | 12 ++-- 11 files changed, 338 insertions(+), 223 deletions(-) diff --git a/lustre/autoconf/lustre-core.m4 b/lustre/autoconf/lustre-core.m4 index f29b7ba..7c77960 100644 --- a/lustre/autoconf/lustre-core.m4 +++ b/lustre/autoconf/lustre-core.m4 @@ -3305,6 +3305,30 @@ AC_DEFUN([LC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX], [ ]) # LC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX # +# LC_HAVE_FILEMAP_GET_FOLIOS +# +# Linux commit v5.19-rc3-342-gbe0ced5e9cb8 +# filemap: Add filemap_get_folios() +# +AC_DEFUN([LC_SRC_HAVE_FILEMAP_GET_FOLIOS], [ + LB2_LINUX_TEST_SRC([filemap_get_folios], [ + #include + ],[ + struct address_space *m = NULL; + pgoff_t start = 0; + struct folio_batch *fbatch = NULL; + (void)filemap_get_folios(m, &start, ULONG_MAX, fbatch); + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_FILEMAP_GET_FOLIOS], [ + AC_MSG_CHECKING([if filemap_get_folios() exists]) + LB2_LINUX_TEST_RESULT([filemap_get_folios], [ + AC_DEFINE(HAVE_FILEMAP_GET_FOLIOS, 1, + [filemap_get_folios() exists]) + ]) +]) # LC_HAVE_FILEMAP_GET_FOLIOS + +# # LC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO # # Linux commit v5.19-rc3-392-g5490da4f06d1 @@ -3508,7 +3532,7 @@ AC_DEFUN([LC_SRC_HAVE_FILEMAP_GET_FOLIOS_CONTIG], [ ]) AC_DEFUN([LC_HAVE_FILEMAP_GET_FOLIOS_CONTIG], [ AC_MSG_CHECKING([if filemap_get_folios_contig() is available]) - LB2_LINUX_TEST_RESULT([filldir_ctx_return_bool], [ + LB2_LINUX_TEST_RESULT([filemap_get_folios_contig], [ AC_DEFINE(HAVE_FILEMAP_GET_FOLIOS_CONTIG, 1, [filemap_get_folios_contig() is available]) ]) @@ -3641,6 +3665,29 @@ AC_DEFUN([LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK], [ ]) # LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK # +# LC_HAVE_FOLIO_BATCH_REINIT +# +# linux kernel v6.2-rc4-254-g811561288397 +# mm: pagevec: add folio_batch_reinit() +# +AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH_REINIT], [ + LB2_LINUX_TEST_SRC([folio_batch_reinit_exists], [ + #include + ],[ + struct folio_batch fbatch __attribute__ ((unused)); + + folio_batch_reinit(&fbatch); + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_FOLIO_BATCH_REINIT], [ + AC_MSG_CHECKING([if 'folio_batch_reinit' is available]) + LB2_LINUX_TEST_RESULT([folio_batch_reinit_exists], [ + AC_DEFINE(HAVE_FOLIO_BATCH_REINIT, 1, + ['folio_batch_reinit' is available]) + ]) +]) # LC_HAVE_FOLIO_BATCH_REINIT + +# # LC_HAVE_IOV_ITER_IOVEC # # linux kernel v6.3-rc4-32-g6eb203e1a868 @@ -3800,6 +3847,51 @@ AC_DEFUN([LC_HAVE_GET_USER_PAGES_WITHOUT_VMA], [ ]) # LC_HAVE_GET_USER_PAGES_WITHOUT_VMA # +# LC_HAVE_FOLIO_BATCH +# +# linux kernel v5.16-rc4-36-g10331795fb79 +# pagevec: Add folio_batch +# +AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH], [ + LB2_LINUX_TEST_SRC([struct_folio_batch_exists], [ + #include + ],[ + struct folio_batch fbatch __attribute__ ((unused)); + + folio_batch_init(&fbatch); + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_FOLIO_BATCH], [ + AC_MSG_CHECKING([if 'struct folio_batch' is available]) + LB2_LINUX_TEST_RESULT([struct_folio_batch_exists], [ + AC_DEFINE(HAVE_FOLIO_BATCH, 1, + ['struct folio_batch' is available]) + ]) +]) # LC_HAVE_FOLIO_BATCH + +# +# LC_HAVE_STRUCT_PAGEVEC +# +# linux kernel v6.4-rc4-438-g1e0877d58b1e +# mm: remove struct pagevec +# +AC_DEFUN([LC_SRC_HAVE_STRUCT_PAGEVEC], [ + LB2_LINUX_TEST_SRC([struct_pagevec_exists], [ + #include + ],[ + struct pagevec *pvec = NULL; + (void)pvec; + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_STRUCT_PAGEVEC], [ + AC_MSG_CHECKING([if 'struct pagevec' is available]) + LB2_LINUX_TEST_RESULT([struct_pagevec_exists], [ + AC_DEFINE(HAVE_PAGEVEC, 1, + ['struct pagevec' is available]) + ]) +]) # LC_HAVE_STRUCT_PAGEVEC + +# # LC_PROG_LINUX # # Lustre linux kernel checks @@ -4006,6 +4098,7 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [ LC_SRC_HAVE_ADDRESS_SPACE_OPERATIONS_RELEASE_FOLIO LC_SRC_HAVE_LSMCONTEXT_INIT LC_SRC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX + LC_SRC_HAVE_FILEMAP_GET_FOLIOS # 6.0 LC_SRC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO @@ -4026,6 +4119,7 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [ LC_SRC_HAVE_U64_CAPABILITY LC_SRC_HAVE_MNT_IDMAP_ARG LC_SRC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK + LC_SRC_HAVE_FOLIO_BATCH_REINIT # 6.4 LC_SRC_HAVE_IOV_ITER_IOVEC @@ -4036,6 +4130,8 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [ LC_SRC_HAVE_FILEMAP_SPLICE_READ LC_SRC_HAVE_ENUM_ITER_PIPE LC_SRC_HAVE_GET_USER_PAGES_WITHOUT_VMA + LC_SRC_HAVE_FOLIO_BATCH + LC_SRC_HAVE_STRUCT_PAGEVEC # kernel patch to extend integrity interface LC_SRC_BIO_INTEGRITY_PREP_FN @@ -4268,6 +4364,7 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [ LC_HAVE_ADDRESS_SPACE_OPERATIONS_RELEASE_FOLIO LC_HAVE_LSMCONTEXT_INIT LC_SECURITY_DENTRY_INIT_SECURTY_WITH_CTX + LC_HAVE_FILEMAP_GET_FOLIOS # 6.0 LC_HAVE_ADDRESS_SPACE_OPERATIONS_MIGRATE_FOLIO @@ -4289,6 +4386,7 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [ LC_HAVE_U64_CAPABILITY LC_HAVE_MNT_IDMAP_ARG LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK + LC_HAVE_FOLIO_BATCH_REINIT # 6.4 LC_HAVE_IOV_ITER_IOVEC @@ -4299,6 +4397,8 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [ LC_HAVE_FILEMAP_SPLICE_READ LC_HAVE_ENUM_ITER_PIPE LC_HAVE_GET_USER_PAGES_WITHOUT_VMA + LC_HAVE_FOLIO_BATCH + LC_HAVE_STRUCT_PAGEVEC # kernel patch to extend integrity interface LC_BIO_INTEGRITY_PREP_FN diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index e280455..2063218 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -82,7 +82,6 @@ * See comments below for the description of i/o, page, and dlm-locking * design. * - * @{ */ /* @@ -101,6 +100,7 @@ #include #include #include +#include struct obd_info; struct inode; @@ -132,8 +132,7 @@ struct cl_device { struct lu_device cd_lu_dev; }; -/** \addtogroup cl_object cl_object - * @{ */ +/* cl_object */ /** * "Data attributes" of cl_object. Data attributes can be updated * independently for a sub-object, and top-object's attributes are calculated @@ -272,14 +271,11 @@ struct cl_object_conf { }; enum { - /** configure layout, set up a new stripe, must be called while - * holding layout lock. */ + /** configure layout, new stripe, must must be holding layout lock. */ OBJECT_CONF_SET = 0, - /** invalidate the current stripe configuration due to losing - * layout lock. */ + /** invalidate the current stripe config when losing layout lock. */ OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be - * set up. */ + /** wait for old layout to go away so that new layout can be set up. */ OBJECT_CONF_WAIT = 2 }; @@ -379,7 +375,7 @@ struct cl_object_operations { * cl_object_operations::coo_attr_get() is used. */ int (*coo_attr_update)(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); + const struct cl_attr *attr, unsigned int valid); /** * Mark the inode dirty. By this way, the inode will add into the * writeback list of the corresponding @bdi_writeback, and then it will @@ -456,8 +452,7 @@ struct cl_object_operations { * Extended header for client object. */ struct cl_object_header { - /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. */ + /* Standard lu_object_header. cl_object::co_lu::lo_header points here.*/ struct lu_object_header coh_lu; /** @@ -505,13 +500,8 @@ struct cl_object_header { &(obj)->co_lu.lo_header->loh_layers,\ co_lu.lo_linkage) -/** @} cl_object */ - #define CL_PAGE_EOF ((pgoff_t)~0ull) -/** \addtogroup cl_page cl_page - * @{ */ - /** \struct cl_page * Layered client page. * @@ -732,13 +722,15 @@ enum cl_page_state { }; enum cl_page_type { - /** Host page, the page is from the host inode which the cl_page - * belongs to. */ + /** Host page, the page is from the host inode which the cl_page + * belongs to. + */ CPT_CACHEABLE = 1, - /** Transient page, the transient cl_page is used to bind a cl_page - * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO and lockless IO. */ + /** Transient page, the transient cl_page is used to bind a cl_page + * to vmpage which is not belonging to the same object of cl_page. + * it is used in DirectIO and lockless IO. + */ CPT_TRANSIENT, CPT_NR }; @@ -910,7 +902,6 @@ struct cl_page_operations { * * Transfer methods. * - * @{ */ /** * Request type dependent vector of operations. @@ -978,9 +969,8 @@ struct cl_page_operations { * * \see cl_page_clip() */ - void (*cpo_clip)(const struct lu_env *env, - const struct cl_page_slice *slice, - int from, int to); + void (*cpo_clip)(const struct lu_env *env, + const struct cl_page_slice *slice, int from, int to); /** * Write out a page by kernel. This is only called by ll_writepage * right now. @@ -990,7 +980,6 @@ struct cl_page_operations { int (*cpo_flush)(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io); - /** @} transfer */ }; /** @@ -1043,10 +1032,6 @@ static inline bool __page_in_use(const struct cl_page *page, int refc) */ #define cl_page_in_use_noref(pg) __page_in_use(pg, 0) -/** @} cl_page */ - -/** \addtogroup cl_lock cl_lock - * @{ */ /** \struct cl_lock * * Extent locking on the client. @@ -1192,8 +1177,9 @@ struct cl_lock { */ struct cl_lock_slice { struct cl_lock *cls_lock; - /** Object slice corresponding to this lock slice. Immutable after - * creation. */ + /** Object slice corresponding to this lock slice. Immutable after + * creation. + */ struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ @@ -1205,7 +1191,6 @@ struct cl_lock_slice { * \see lov_lock_ops, osc_lock_ops */ struct cl_lock_operations { - /** @{ */ /** * Attempts to enqueue the lock. Called top-to-bottom. * @@ -1225,7 +1210,6 @@ struct cl_lock_operations { */ void (*clo_cancel)(const struct lu_env *env, const struct cl_lock_slice *slice); - /** @} */ /** * Destructor. Frees resources and the slice. * @@ -1235,9 +1219,8 @@ struct cl_lock_operations { /** * Optional debugging helper. Prints given lock slice. */ - int (*clo_print)(const struct lu_env *env, - void *cookie, lu_printer_t p, - const struct cl_lock_slice *slice); + int (*clo_print)(const struct lu_env *env, void *cookie, + lu_printer_t p, const struct cl_lock_slice *slice); }; #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \ @@ -1257,8 +1240,6 @@ do { \ LBUG(); \ } while (0) -/** @} cl_lock */ - /** \addtogroup cl_page_list cl_page_list * Page list used to perform collective operations on a group of pages. * @@ -1276,10 +1257,9 @@ do { \ * * \todo XXX concurrency control. * - * @{ */ struct cl_page_list { - unsigned pl_nr; + unsigned int pl_nr; struct list_head pl_pages; }; @@ -1292,10 +1272,6 @@ struct cl_2queue { struct cl_page_list c2_qout; }; -/** @} cl_page_list */ - -/** \addtogroup cl_io cl_io - * @{ */ /** \struct cl_io * I/O * @@ -1445,17 +1421,19 @@ struct cl_io_slice { }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct pagevec *); + struct folio_batch *); struct cl_read_ahead { /* Maximum page index the readahead window will end. * This is determined DLM lock coverage, RPC and stripe boundary. - * cra_end is included. */ + * cra_end is included. + */ pgoff_t cra_end_idx; /* optimal RPC size for this read, by pages */ unsigned long cra_rpc_pages; /* Release callback. If readahead holds resources underneath, this - * function should be called to release it. */ + * function should be called to release it. + */ void (*cra_release)(const struct lu_env *env, struct cl_read_ahead *ra); @@ -1569,9 +1547,8 @@ struct cl_io_operations { * executed on them. */ int (*cio_submit)(const struct lu_env *env, - const struct cl_io_slice *slice, - enum cl_req_type crt, - struct cl_2queue *queue); + const struct cl_io_slice *slice, + enum cl_req_type crt, struct cl_2queue *queue); /** * Queue async page for write. * The difference between cio_submit and cio_queue is that @@ -1749,7 +1726,8 @@ enum cl_fsync_mode { /** discard all of dirty pages in a specific file range */ CL_FSYNC_DISCARD = 2, /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished */ + * return. OST_SYNC RPC must be issued and finished + */ CL_FSYNC_ALL = 3, /** start writeback, thus the kernel can reclaim some memory */ CL_FSYNC_RECLAIM = 4, @@ -2010,23 +1988,21 @@ struct cl_io { * How many times the read has retried before this one. * Set by the top level and consumed by the LOV. */ - unsigned ci_ndelay_tried; + unsigned int ci_ndelay_tried; /** * Designated mirror index for this I/O. */ - unsigned ci_designated_mirror; + unsigned int ci_designated_mirror; /** * Number of pages owned by this IO. For invariant checking. */ - unsigned ci_owned_nr; + unsigned int ci_owned_nr; /** * Range of write intent. Valid if ci_need_write_intent is set. */ struct lu_extent ci_write_intent; }; -/** @} cl_io */ - /** * Per-transfer attributes. */ @@ -2048,7 +2024,8 @@ enum cache_stats_item { /** how many entities are in the cache right now */ CS_total, /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ + * evicted) right now + */ CS_busy, /** how many entities were created at all */ CS_create, @@ -2103,7 +2080,6 @@ int cl_site_stats_print(const struct cl_site *site, struct seq_file *m); * * Type conversion and accessory functions. */ -/** @{ */ static inline struct cl_site *lu2cl_site(const struct lu_site *site) { @@ -2172,10 +2148,7 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, const struct cl_lock_operations *ops); void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, struct cl_object *obj, const struct cl_io_operations *ops); -/** @} helpers */ -/** \defgroup cl_object cl_object - * @{ */ struct cl_object *cl_object_top (struct cl_object *o); struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, @@ -2190,7 +2163,7 @@ void cl_object_attr_unlock(struct cl_object *o); int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr); int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); + const struct cl_attr *attr, unsigned int valid); void cl_object_dirty_for_sync(const struct lu_env *env, struct cl_object *obj); int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, struct ost_lvb *lvb); @@ -2239,13 +2212,11 @@ static inline void *cl_object_page_slice(struct cl_object *clob, static inline int cl_object_refc(struct cl_object *clob) { struct lu_object_header *header = clob->co_lu.lo_header; + return atomic_read(&header->loh_ref); } -/** @} cl_object */ - -/** \defgroup cl_page cl_page - * @{ */ +/* cl_page */ struct cl_page *cl_page_find (const struct lu_env *env, struct cl_object *obj, pgoff_t idx, struct page *vmpage, @@ -2259,9 +2230,9 @@ struct cl_page *cl_page_alloc (const struct lu_env *env, void cl_page_get (struct cl_page *page); void cl_page_put (const struct lu_env *env, struct cl_page *page); -void cl_pagevec_put (const struct lu_env *env, +void cl_batch_put (const struct lu_env *env, struct cl_page *page, - struct pagevec *pvec); + struct folio_batch *fbatch); void cl_page_print (const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); @@ -2279,7 +2250,6 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, * * Functions dealing with the ownership of page by io. */ -/** @{ */ int cl_page_own (const struct lu_env *env, struct cl_io *io, struct cl_page *page); @@ -2293,15 +2263,12 @@ void cl_page_disown (const struct lu_env *env, struct cl_io *io, struct cl_page *page); int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); -/** @} ownership */ - /** * \name transfer * * Functions dealing with the preparation of a page for a transfer, and * tracking transfer state. */ -/** @{ */ int cl_page_prep (const struct lu_env *env, struct cl_io *io, struct cl_page *pg, enum cl_req_type crt); void cl_page_completion (const struct lu_env *env, @@ -2315,14 +2282,10 @@ void cl_page_clip (const struct lu_env *env, struct cl_page *pg, int cl_page_flush (const struct lu_env *env, struct cl_io *io, struct cl_page *pg); -/** @} transfer */ - - /** * \name helper routines * Functions to discard, delete and export a cl_page. */ -/** @{ */ void cl_page_discard(const struct lu_env *env, struct cl_io *io, struct cl_page *pg); void cl_page_delete(const struct lu_env *env, struct cl_page *pg); @@ -2331,13 +2294,11 @@ void cl_page_touch(const struct lu_env *env, const struct cl_page *pg, loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); pgoff_t cl_index(const struct cl_object *obj, loff_t offset); size_t cl_page_size(const struct cl_object *obj); - void cl_lock_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock_descr *descr); -/* @} helper */ /** * Data structure managing a client's cached pages. A count of @@ -2396,10 +2357,7 @@ struct cl_client_cache *cl_cache_init(unsigned long lru_page_max); void cl_cache_incref(struct cl_client_cache *cache); void cl_cache_decref(struct cl_client_cache *cache); -/** @} cl_page */ - -/** \defgroup cl_lock cl_lock - * @{ */ +/* cl_lock */ int cl_lock_request(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock); int cl_lock_init(const struct lu_env *env, struct cl_lock *lock, @@ -2413,11 +2371,7 @@ int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock, struct cl_sync_io *anchor); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); -/** @} cl_lock */ - -/** \defgroup cl_io cl_io - * @{ */ - +/* cl_io */ int cl_io_init (const struct lu_env *env, struct cl_io *io, enum cl_io_type iot, struct cl_object *obj); int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, @@ -2493,11 +2447,7 @@ void cl_io_print(const struct lu_env *env, void *cookie, #define CL_IO_SLICE_CLEAN(obj, base) memset_startat(obj, 0, base) -/** @} cl_io */ - -/** \defgroup cl_page_list cl_page_list - * @{ */ - +/* cl_page_list */ /** * Last page in the page list. */ @@ -2553,14 +2503,10 @@ void cl_2queue_discard (const struct lu_env *env, void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); -/** @} cl_page_list */ - void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj, struct cl_req_attr *attr); -/** \defgroup cl_sync_io cl_sync_io - * @{ */ - +/* cl_sync_io */ struct cl_sync_io; struct cl_dio_aio; struct cl_sub_dio; @@ -2646,8 +2592,6 @@ struct cl_sub_dio { void ll_release_user_pages(struct page **pages, int npages); -/** @} cl_sync_io */ - /** \defgroup cl_env cl_env * * lu_env handling for a client. @@ -2672,17 +2616,16 @@ void ll_release_user_pages(struct page **pages, int npages); * longer used environments instead of destroying them; * * \see lu_env, lu_context, lu_context_key - * @{ */ + */ +/* cl_env */ struct lu_env *cl_env_get(__u16 *refcheck); struct lu_env *cl_env_alloc(__u16 *refcheck, __u32 tags); void cl_env_put(struct lu_env *env, __u16 *refcheck); -unsigned cl_env_cache_purge(unsigned nr); +unsigned int cl_env_cache_purge(unsigned int nr); struct lu_env *cl_env_percpu_get(void); void cl_env_percpu_put(struct lu_env *env); -/** @} cl_env */ - /* * Misc */ @@ -2692,7 +2635,6 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb); struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, struct lu_device_type *ldt, struct lu_device *next); -/** @} clio */ int cl_global_init(void); void cl_global_fini(void); diff --git a/lustre/include/lustre_compat.h b/lustre/include/lustre_compat.h index 447e687..882c673 100644 --- a/lustre/include/lustre_compat.h +++ b/lustre/include/lustre_compat.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -430,12 +431,6 @@ static inline struct timespec current_time(struct inode *inode) #define smp_store_mb(var, value) set_mb(var, value) #endif -#ifdef HAVE_PAGEVEC_INIT_ONE_PARAM -#define ll_pagevec_init(pvec, n) pagevec_init(pvec) -#else -#define ll_pagevec_init(pvec, n) pagevec_init(pvec, n) -#endif - #ifdef HAVE_D_COUNT # define ll_d_count(d) d_count(d) #else @@ -662,4 +657,50 @@ static inline struct page *ll_read_cache_page(struct address_space *mapping, #endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */ } +#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS) +# define ll_folio_batch_init(batch, n) folio_batch_init(batch) +# define ll_filemap_get_folios(m, s, e, fbatch) \ + filemap_get_folios(m, &s, e, fbatch) +# define fbatch_at(fbatch, f) ((fbatch)->folios[(f)]) +# define fbatch_at_npgs(fbatch, f) folio_nr_pages((fbatch)->folios[(f)]) +# define fbatch_at_pg(fbatch, f, pg) folio_page((fbatch)->folios[(f)], (pg)) +# define folio_batch_add_page(fbatch, page) \ + folio_batch_add(fbatch, page_folio(page)) +# ifndef HAVE_FOLIO_BATCH_REINIT +static inline void folio_batch_reinit(struct folio_batch *fbatch) +{ + fbatch->nr = 0; +} +# endif /* HAVE_FOLIO_BATCH_REINIT */ + +#else /* !HAVE_FOLIO_BATCH && !HAVE_FILEMAP_GET_FOLIOS */ + +# ifdef HAVE_PAGEVEC +# define folio_batch pagevec +# endif +# define folio_batch_init(pvec) pagevec_init(pvec) +# define folio_batch_reinit(pvec) pagevec_reinit(pvec) +# define folio_batch_count(pvec) pagevec_count(pvec) +# define folio_batch_space(pvec) pagevec_space(pvec) +# define folio_batch_add_page(pvec, page) \ + pagevec_add(pvec, page) +# define folio_batch_release(pvec) \ + pagevec_release(((struct pagevec *)pvec)) +# ifdef HAVE_PAGEVEC_INIT_ONE_PARAM +# define ll_folio_batch_init(pvec, n) pagevec_init(pvec) +# else +# define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n) +# endif +#ifdef HAVE_PAGEVEC_LOOKUP_THREE_PARAM +# define ll_filemap_get_folios(m, s, e, pvec) \ + pagevec_lookup(pvec, m, &s) +#else +# define ll_filemap_get_folios(m, s, e, pvec) \ + pagevec_lookup(pvec, m, s, PAGEVEC_SIZE) +#endif +# define fbatch_at(pvec, n) ((pvec)->pages[(n)]) +# define fbatch_at_npgs(pvec, n) 1 +# define fbatch_at_pg(pvec, n, pg) ((pvec)->pages[(n)]) +#endif /* HAVE_FOLIO_BATCH && HAVE_FILEMAP_GET_FOLIOS */ + #endif /* _LUSTRE_COMPAT_H */ diff --git a/lustre/include/lustre_osc.h b/lustre/include/lustre_osc.h index eaedc62..f99619b 100644 --- a/lustre/include/lustre_osc.h +++ b/lustre/include/lustre_osc.h @@ -164,7 +164,7 @@ struct osc_thread_info { union ldlm_policy_data oti_policy; struct cl_attr oti_attr; struct cl_io oti_io; - struct pagevec oti_pagevec; + struct folio_batch oti_fbatch; void *oti_pvec[OTI_PVEC_SIZE]; /** * Fields used by cl_lock_discard_pages(). diff --git a/lustre/llite/pcc.c b/lustre/llite/pcc.c index 5d56093..895dccd 100644 --- a/lustre/llite/pcc.c +++ b/lustre/llite/pcc.c @@ -3071,27 +3071,27 @@ static int pcc_mmap_pages_convert(struct inode *inode, struct inode *pcc_inode) { #ifdef HAVE_ADD_TO_PAGE_CACHE_LOCKED - struct pagevec pvec; + struct folio_batch fbatch; pgoff_t index = 0; - int nr_pages; + unsigned nr; int rc = 0; - ll_pagevec_init(&pvec, 0); + ll_folio_batch_init(&fbatch, 0); for ( ; ; ) { struct page *page; int i; -#ifdef HAVE_PAGEVEC_LOOKUP_THREE_PARAM - nr_pages = pagevec_lookup(&pvec, pcc_inode->i_mapping, &index); -#else - nr_pages = pagevec_lookup(&pvec, pcc_inode->i_mapping, index, - PAGEVEC_SIZE); -#endif - if (nr_pages <= 0) + nr = ll_filemap_get_folios(pcc_inode->i_mapping, + index, ~0UL, &fbatch); + if (nr == 0) break; - for (i = 0; i < nr_pages; i++) { - page = pvec.pages[i]; + for (i = 0; i < nr; i++) { +#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS) + page = &fbatch.folios[i]->page; +#else + page = fbatch.pages[i]; +#endif lock_page(page); wait_on_page_writeback(page); @@ -3108,7 +3108,7 @@ static int pcc_mmap_pages_convert(struct inode *inode, page->index, GFP_KERNEL); if (rc) { unlock_page(page); - pagevec_release(&pvec); + folio_batch_release(&fbatch); return rc; } @@ -3116,7 +3116,7 @@ static int pcc_mmap_pages_convert(struct inode *inode, } index = page->index + 1; - pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); } diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 94436cb..b320572 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -44,6 +44,7 @@ #include "llite_internal.h" #include "vvp_internal.h" +#include #include static struct vvp_io *cl2vvp_io(const struct lu_env *env, @@ -1077,15 +1078,19 @@ static inline void ll_account_page_dirtied(struct page *page, * Backwards compat for 3.x, 5.x kernels relating to memcg handling * & rename of radix tree to xarray. */ -void vvp_set_pagevec_dirty(struct pagevec *pvec) +void vvp_set_batch_dirty(struct folio_batch *fbatch) { - struct page *page = pvec->pages[0]; - int count = pagevec_count(pvec); + struct page *page = fbatch_at_pg(fbatch, 0, 0); + int count = folio_batch_count(fbatch); int i; +#if !defined(HAVE_FILEMAP_GET_FOLIOS) || defined(HAVE_KALLSYMS_LOOKUP_NAME) + int pg, npgs; +#endif #ifdef HAVE_KALLSYMS_LOOKUP_NAME struct address_space *mapping = page->mapping; unsigned long flags; unsigned long skip_pages = 0; + int pgno; int dirtied = 0; #endif @@ -1093,7 +1098,7 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec) BUILD_BUG_ON(PAGEVEC_SIZE > BITS_PER_LONG); LASSERTF(page->mapping, - "mapping must be set. page %p, page->private (cl_page) %p\n", + "mapping must be set. page %px, page->private (cl_page) %px\n", page, (void *) page->private); /* @@ -1104,25 +1109,41 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec) */ #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT if (!vvp_account_page_dirtied) { - for (i = 0; i < count; i++) - __set_page_dirty_nobuffers(pvec->pages[i]); + for (i = 0; i < count; i++) { +#if defined(HAVE_FOLIO_BATCH) && defined(HAVE_FILEMAP_GET_FOLIOS) + filemap_dirty_folio(page->mapping, fbatch->folios[i]); +#else + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); + __set_page_dirty_nobuffers(page); + } +#endif + } EXIT; } #endif + /* account_page_dirtied is available directly or via kallsyms */ #ifdef HAVE_KALLSYMS_LOOKUP_NAME - for (i = 0; i < count; i++) { - page = pvec->pages[i]; + for (pgno = i = 0; i < count; i++) { + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); - ClearPageReclaim(page); + ClearPageReclaim(page); - vvp_lock_page_memcg(page); - if (TestSetPageDirty(page)) { - /* page is already dirty .. no extra work needed - * set a flag for the i'th page to be skipped - */ - vvp_unlock_page_memcg(page); - skip_pages |= (1 << i); + vvp_lock_page_memcg(page); + if (TestSetPageDirty(page)) { + /* page is already dirty .. no extra work needed + * set a flag for the i'th page to be skipped + */ + vvp_unlock_page_memcg(page); + skip_pages |= (1ul << pgno++); + LASSERTF(pgno <= BITS_PER_LONG, + "Limit exceeded pgno: %d/%d\n", pgno, + BITS_PER_LONG); + } } } @@ -1137,19 +1158,22 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec) * dirty_nobuffers should be impossible because we hold the page lock.) * 4. All mappings are the same because i/o is only to one file. */ - for (i = 0; i < count; i++) { - page = pvec->pages[i]; - /* if the i'th page was unlocked above, skip it here */ - if ((skip_pages >> i) & 1) - continue; - - LASSERTF(page->mapping == mapping, - "all pages must have the same mapping. page %p, mapping %p, first mapping %p\n", - page, page->mapping, mapping); - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); - ll_account_page_dirtied(page, mapping); - dirtied++; - vvp_unlock_page_memcg(page); + for (pgno = i = 0; i < count; i++) { + npgs = fbatch_at_npgs(fbatch, f); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); + /* if the i'th page was unlocked above, skip it here */ + if ((skip_pages >> pgno++) & 1) + continue; + + LASSERTF(page->mapping == mapping, + "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n", + page, page->mapping, mapping); + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + ll_account_page_dirtied(page, mapping); + dirtied++; + vvp_unlock_page_memcg(page); + } } ll_xa_unlock_irqrestore(&mapping->i_pages, flags); @@ -1165,29 +1189,36 @@ void vvp_set_pagevec_dirty(struct pagevec *pvec) } static void write_commit_callback(const struct lu_env *env, struct cl_io *io, - struct pagevec *pvec) + struct folio_batch *fbatch) { + struct page *vmpage; + struct cl_page *page; + int pg, npgs; int count = 0; int i = 0; ENTRY; - count = pagevec_count(pvec); + count = folio_batch_count(fbatch); LASSERT(count > 0); for (i = 0; i < count; i++) { - struct page *vmpage = pvec->pages[i]; - SetPageUptodate(vmpage); + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) + SetPageUptodate(fbatch_at_pg(fbatch, i, pg)); } - vvp_set_pagevec_dirty(pvec); + vvp_set_batch_dirty(fbatch); for (i = 0; i < count; i++) { - struct page *vmpage = pvec->pages[i]; - struct cl_page *page = (struct cl_page *) vmpage->private; - cl_page_disown(env, io, page); - lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); - cl_page_put(env, page); + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + vmpage = fbatch_at_pg(fbatch, i, pg); + page = (struct cl_page *) vmpage->private; + cl_page_disown(env, io, page); + lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); + cl_page_put(env, page); + } } EXIT; @@ -1515,9 +1546,9 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) } static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, - struct pagevec *pvec) + struct folio_batch *fbatch) { - vvp_set_pagevec_dirty(pvec); + vvp_set_batch_dirty(fbatch); } static int vvp_io_fault_start(const struct lu_env *env, diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 3af7753..368373b 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -183,7 +183,7 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize) } static void cl_page_free(const struct lu_env *env, struct cl_page *cp, - struct pagevec *pvec) + struct folio_batch *fbatch) { struct cl_object *obj = cp->cp_obj; unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize; @@ -201,9 +201,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *cp, LASSERT(vmpage != NULL); LASSERT((struct cl_page *)vmpage->private != cp); - if (pvec != NULL) { - if (!pagevec_add(pvec, vmpage)) - pagevec_release(pvec); + if (fbatch != NULL) { + if (!folio_batch_add_page(fbatch, vmpage)) + folio_batch_release(fbatch); } else { put_page(vmpage); } @@ -488,13 +488,13 @@ void cl_page_get(struct cl_page *page) EXPORT_SYMBOL(cl_page_get); /** - * Releases a reference to a page, use the pagevec to release the pages + * Releases a reference to a page, use the folio_batch to release the pages * in batch if provided. * - * Users need to do a final pagevec_release() to release any trailing pages. + * Users need to do a final folio_batch_release() to release any trailing pages. */ -void cl_pagevec_put(const struct lu_env *env, struct cl_page *page, - struct pagevec *pvec) +void cl_batch_put(const struct lu_env *env, struct cl_page *page, + struct folio_batch *fbatch) { ENTRY; CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", @@ -510,15 +510,15 @@ void cl_pagevec_put(const struct lu_env *env, struct cl_page *page, * Page is no longer reachable by other threads. Tear * it down. */ - cl_page_free(env, page, pvec); + cl_page_free(env, page, fbatch); } EXIT; } -EXPORT_SYMBOL(cl_pagevec_put); +EXPORT_SYMBOL(cl_batch_put); /** - * Releases a reference to a page, wrapper to cl_pagevec_put + * Releases a reference to a page, wrapper to cl_batch_put * * When last reference is released, page is returned to the cache, unless it * is in cl_page_state::CPS_FREEING state, in which case it is immediately @@ -528,7 +528,7 @@ EXPORT_SYMBOL(cl_pagevec_put); */ void cl_page_put(const struct lu_env *env, struct cl_page *page) { - cl_pagevec_put(env, page, NULL); + cl_batch_put(env, page, NULL); } EXPORT_SYMBOL(cl_page_put); diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index 27c320d..3ac863c 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -973,7 +973,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct client_obd *cli = osc_cli(obj); struct osc_async_page *oap; struct osc_async_page *tmp; - struct pagevec *pvec; + struct folio_batch *fbatch; int pages_in_chunk = 0; int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; @@ -998,8 +998,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, io = osc_env_thread_io(env); io->ci_obj = cl_object_top(osc2cl(obj)); io->ci_ignore_layout = 1; - pvec = &osc_env_info(env)->oti_pagevec; - ll_pagevec_init(pvec, 0); + fbatch = &osc_env_info(env)->oti_fbatch; + ll_folio_batch_init(fbatch, 0); rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc < 0) GOTO(out, rc); @@ -1037,12 +1037,12 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, } lu_ref_del(&page->cp_reference, "truncate", current); - cl_pagevec_put(env, page, pvec); + cl_batch_put(env, page, fbatch); --ext->oe_nr_pages; ++nr_pages; } - pagevec_release(pvec); + folio_batch_release(fbatch); EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial, ext->oe_nr_pages == 0), @@ -2311,7 +2311,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, struct osc_async_page *oap = &ops->ops_oap; struct client_obd *cli = oap->oap_cli; struct osc_object *osc = oap->oap_obj; - struct pagevec *pvec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; pgoff_t index; unsigned int tmp; unsigned int grants = 0; @@ -2442,10 +2442,11 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, /* We must not hold a page lock while we do osc_enter_cache() * or osc_extent_find(), so we must mark dirty & unlock - * any pages in the write commit pagevec. */ - if (pagevec_count(pvec)) { - cb(env, io, pvec); - pagevec_reinit(pvec); + * any pages in the write commit folio_batch. + */ + if (folio_batch_count(fbatch)) { + cb(env, io, fbatch); + folio_batch_reinit(fbatch); } if (grants == 0) { @@ -3096,7 +3097,7 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, osc_page_gang_cbt cb, void *cbdata) { struct osc_page *ops; - struct pagevec *pagevec; + struct folio_batch *fbatch; void **pvec; pgoff_t idx; unsigned int nr; @@ -3108,8 +3109,8 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, idx = start; pvec = osc_env_info(env)->oti_pvec; - pagevec = &osc_env_info(env)->oti_pagevec; - ll_pagevec_init(pagevec, 0); + fbatch = &osc_env_info(env)->oti_fbatch; + ll_folio_batch_init(fbatch, 0); spin_lock(&osc->oo_tree_lock); while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec, idx, OTI_PVEC_SIZE)) > 0) { @@ -3155,9 +3156,9 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, ops = pvec[i]; page = ops->ops_cl.cpl_page; lu_ref_del(&page->cp_reference, "gang_lookup", current); - cl_pagevec_put(env, page, pagevec); + cl_batch_put(env, page, fbatch); } - pagevec_release(pagevec); + folio_batch_release(fbatch); if (nr < OTI_PVEC_SIZE || end_of_region) break; diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index 665c30f..03617f0 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -301,13 +301,13 @@ int osc_io_commit_async(const struct lu_env *env, struct cl_page_list *qin, int from, int to, cl_commit_cbt cb) { - struct cl_io *io = ios->cis_io; - struct osc_io *oio = cl2osc_io(env, ios); + struct cl_io *io = ios->cis_io; + struct osc_io *oio = cl2osc_io(env, ios); struct osc_object *osc = cl2osc(ios->cis_obj); - struct cl_page *page; - struct cl_page *last_page; + struct cl_page *page; + struct cl_page *last_page; struct osc_page *opg; - struct pagevec *pvec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; int result = 0; ENTRY; @@ -327,7 +327,7 @@ int osc_io_commit_async(const struct lu_env *env, } } - ll_pagevec_init(pvec, 0); + ll_folio_batch_init(fbatch, 0); while (qin->pl_nr > 0) { struct osc_async_page *oap; @@ -359,9 +359,9 @@ int osc_io_commit_async(const struct lu_env *env, cl_page_list_del(env, qin, page); /* if there are no more slots, do the callback & reinit */ - if (pagevec_add(pvec, page->cp_vmpage) == 0) { - (*cb)(env, io, pvec); - pagevec_reinit(pvec); + if (!folio_batch_add_page(fbatch, page->cp_vmpage)) { + (*cb)(env, io, fbatch); + folio_batch_reinit(fbatch); } } /* The shrink interval is in seconds, so we can update it once per @@ -370,9 +370,9 @@ int osc_io_commit_async(const struct lu_env *env, osc_update_next_shrink(osc_cli(osc)); - /* Clean up any partially full pagevecs */ - if (pagevec_count(pvec) != 0) - (*cb)(env, io, pvec); + /* Clean up any partially full folio_batches */ + if (folio_batch_count(fbatch) != 0) + (*cb)(env, io, fbatch); /* Can't access these pages any more. Page can be in transfer and * complete at any time. */ diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 99873da..633072d 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -505,13 +505,13 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg) } } -static void discard_pagevec(const struct lu_env *env, struct cl_io *io, - struct cl_page **pvec, int max_index) +static void discard_cl_pages(const struct lu_env *env, struct cl_io *io, + struct cl_page **pvec, int max_index) { - struct pagevec *pagevec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; int i; - ll_pagevec_init(pagevec, 0); + ll_folio_batch_init(fbatch, 0); for (i = 0; i < max_index; i++) { struct cl_page *page = pvec[i]; @@ -519,11 +519,11 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io, cl_page_delete(env, page); cl_page_discard(env, io, page); cl_page_disown(env, io, page); - cl_pagevec_put(env, page, pagevec); + cl_batch_put(env, page, fbatch); pvec[i] = NULL; } - pagevec_release(pagevec); + folio_batch_release(fbatch); } /** @@ -615,7 +615,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); index = 0; cl_io_fini(env, io); @@ -660,7 +660,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, pvec[index++] = page; if (unlikely(index == OTI_PVEC_SIZE)) { spin_unlock(&cli->cl_lru_list_lock); - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); index = 0; spin_lock(&cli->cl_lru_list_lock); @@ -672,7 +672,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); cl_io_fini(env, io); cl_object_put(env, clobj); diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index 166f641..8d3ccc5 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -805,10 +805,10 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, struct niobuf_local *lnb, int npages) { struct osd_thread_info *oti = osd_oti_get(env); - struct pagevec pvec; + struct folio_batch fbatch; int i; - ll_pagevec_init(&pvec, 0); + ll_folio_batch_init(&fbatch, 0); for (i = 0; i < npages; i++) { struct page *page = lnb[i].lnb_page; @@ -824,15 +824,15 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, } else { if (lnb[i].lnb_locked) unlock_page(page); - if (pagevec_add(&pvec, page) == 0) - pagevec_release(&pvec); + if (folio_batch_add_page(&fbatch, page) == 0) + folio_batch_release(&fbatch); } lnb[i].lnb_page = NULL; } - /* Release any partial pagevec */ - pagevec_release(&pvec); + /* Release any partial folio_batch */ + folio_batch_release(&fbatch); RETURN(0); } -- 1.8.3.1