From: Shaun Tancheff Date: Tue, 5 Mar 2024 03:15:54 +0000 (+0700) Subject: LU-17081 build: Prefer folio_batch to pagevec X-Git-Tag: 2.15.62~57 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=b82eab822c078b584fadefd419bfa74df0edebcb;p=fs%2Flustre-release.git LU-17081 build: Prefer folio_batch to pagevec Linux commit v5.16-rc4-36-g10331795fb79 pagevec: Add folio_batch Linux commit v6.2-rc4-254-g811561288397 mm: pagevec: add folio_batch_reinit() Linux commit v6.4-rc4-438-g1e0877d58b1e mm: remove struct pagevec Use folio_batch and provide wrappers for older kernels to use pagevec handling, conditionally provide a folio_batch_reinit Add macros to ease adding pages to folio_batch(es) as well as unwinding batches of struct folio where struct page is needed. HPE-bug-id: LUS-11811 Signed-off-by: Shaun Tancheff Change-Id: Ie70e4851df00a73f194aaa6631678b54b5d128a1 Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/52259 Tested-by: Maloo Tested-by: jenkins Reviewed-by: Oleg Drokin Reviewed-by: Andreas Dilger Reviewed-by: Jian Yu Reviewed-by: Yang Sheng Reviewed-by: James Simmons --- diff --git a/lustre/autoconf/lustre-core.m4 b/lustre/autoconf/lustre-core.m4 index 324f6d8..d944ced 100644 --- a/lustre/autoconf/lustre-core.m4 +++ b/lustre/autoconf/lustre-core.m4 @@ -3990,6 +3990,29 @@ AC_DEFUN([LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK], [ ]) # LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK # +# LC_HAVE_FOLIO_BATCH_REINIT +# +# linux kernel v6.2-rc4-254-g811561288397 +# mm: pagevec: add folio_batch_reinit() +# +AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH_REINIT], [ + LB2_LINUX_TEST_SRC([folio_batch_reinit_exists], [ + #include + ],[ + struct folio_batch fbatch __attribute__ ((unused)); + + folio_batch_reinit(&fbatch); + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_FOLIO_BATCH_REINIT], [ + LB2_MSG_LINUX_TEST_RESULT([if 'folio_batch_reinit' is available], + [folio_batch_reinit_exists], [ + AC_DEFINE(HAVE_FOLIO_BATCH_REINIT, 1, + ['folio_batch_reinit' is available]) + ]) +]) # LC_HAVE_FOLIO_BATCH_REINIT + +# # LC_HAVE_IOV_ITER_IOVEC # # linux kernel v6.3-rc4-32-g6eb203e1a868 @@ -4149,6 +4172,51 @@ AC_DEFUN([LC_HAVE_GET_USER_PAGES_WITHOUT_VMA], [ ]) # LC_HAVE_GET_USER_PAGES_WITHOUT_VMA # +# LC_HAVE_FOLIO_BATCH +# +# linux kernel v5.16-rc4-36-g10331795fb79 +# pagevec: Add folio_batch +# +AC_DEFUN([LC_SRC_HAVE_FOLIO_BATCH], [ + LB2_LINUX_TEST_SRC([struct_folio_batch_exists], [ + #include + ],[ + struct folio_batch fbatch __attribute__ ((unused)); + + folio_batch_init(&fbatch); + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_FOLIO_BATCH], [ + LB2_MSG_LINUX_TEST_RESULT([if 'struct folio_batch' is available], + [struct_folio_batch_exists], [ + AC_DEFINE(HAVE_FOLIO_BATCH, 1, + ['struct folio_batch' is available]) + ]) +]) # LC_HAVE_FOLIO_BATCH + +# +# LC_HAVE_STRUCT_PAGEVEC +# +# linux kernel v6.4-rc4-438-g1e0877d58b1e +# mm: remove struct pagevec +# +AC_DEFUN([LC_SRC_HAVE_STRUCT_PAGEVEC], [ + LB2_LINUX_TEST_SRC([struct_pagevec_exists], [ + #include + ],[ + struct pagevec *pvec = NULL; + (void)pvec; + ],[-Werror]) +]) +AC_DEFUN([LC_HAVE_STRUCT_PAGEVEC], [ + LB2_MSG_LINUX_TEST_RESULT([if 'struct pagevec' is available], + [struct_pagevec_exists], [ + AC_DEFINE(HAVE_PAGEVEC, 1, + ['struct pagevec' is available]) + ]) +]) # LC_HAVE_STRUCT_PAGEVEC + +# # LC_PROG_LINUX # # Lustre linux kernel checks @@ -4407,6 +4475,7 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [ LC_SRC_HAVE_MNT_IDMAP_ARG LC_SRC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK LC_SRC_HAVE_U64_CAPABILITY + LC_SRC_HAVE_FOLIO_BATCH_REINIT # 6.4 LC_SRC_HAVE_IOV_ITER_IOVEC @@ -4417,6 +4486,8 @@ AC_DEFUN([LC_PROG_LINUX_SRC], [ LC_SRC_HAVE_FILEMAP_SPLICE_READ LC_SRC_HAVE_ENUM_ITER_PIPE LC_SRC_HAVE_GET_USER_PAGES_WITHOUT_VMA + LC_SRC_HAVE_FOLIO_BATCH + LC_SRC_HAVE_STRUCT_PAGEVEC # kernel patch to extend integrity interface LC_SRC_BIO_INTEGRITY_PREP_FN @@ -4697,6 +4768,7 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [ LC_HAVE_MNT_IDMAP_ARG LC_HAVE_LOCKS_LOCK_FILE_WAIT_IN_FILELOCK LC_HAVE_U64_CAPABILITY + LC_HAVE_FOLIO_BATCH_REINIT # 6.4 LC_HAVE_IOV_ITER_IOVEC @@ -4707,6 +4779,8 @@ AC_DEFUN([LC_PROG_LINUX_RESULTS], [ LC_HAVE_FILEMAP_SPLICE_READ LC_HAVE_ENUM_ITER_PIPE LC_HAVE_GET_USER_PAGES_WITHOUT_VMA + LC_HAVE_FOLIO_BATCH + LC_HAVE_STRUCT_PAGEVEC # kernel patch to extend integrity interface LC_BIO_INTEGRITY_PREP_FN diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 233455d..281d7e3 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -99,6 +99,7 @@ #include #include #include +#include struct obd_info; struct inode; @@ -1388,7 +1389,7 @@ struct cl_io_slice { }; typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *, - struct pagevec *); + struct folio_batch *); struct cl_read_ahead { /* Maximum page index the readahead window will end. @@ -2204,8 +2205,8 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, void cl_page_get(struct cl_page *page); void cl_page_put(const struct lu_env *env, struct cl_page *page); -void cl_pagevec_put(const struct lu_env *env, - struct cl_page *page, struct pagevec *pvec); +void cl_batch_put(const struct lu_env *env, struct cl_page *page, + struct folio_batch *fbatch); void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); void cl_page_header_print(const struct lu_env *env, void *cookie, diff --git a/lustre/include/lustre_compat.h b/lustre/include/lustre_compat.h index 3148b78..ac9c70c 100644 --- a/lustre/include/lustre_compat.h +++ b/lustre/include/lustre_compat.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #ifdef HAVE_XARRAY_SUPPORT #include @@ -486,12 +487,6 @@ static inline struct timespec current_time(struct inode *inode) #define smp_store_mb(var, value) set_mb(var, value) /* set full mem barrier */ #endif -#ifdef HAVE_PAGEVEC_INIT_ONE_PARAM -#define ll_pagevec_init(pvec, n) pagevec_init(pvec) -#else -#define ll_pagevec_init(pvec, n) pagevec_init(pvec, n) -#endif - #ifdef HAVE_D_COUNT # define ll_d_count(d) d_count(d) #else @@ -750,4 +745,41 @@ static inline struct page *ll_read_cache_page(struct address_space *mapping, #endif /* HAVE_READ_CACHE_PAGE_WANTS_FILE */ } +#ifdef HAVE_FOLIO_BATCH +# define ll_folio_batch_init(batch, n) folio_batch_init(batch) +# define fbatch_at(fbatch, f) ((fbatch)->folios[(f)]) +# define fbatch_at_npgs(fbatch, f) folio_nr_pages((fbatch)->folios[(f)]) +# define fbatch_at_pg(fbatch, f, pg) folio_page((fbatch)->folios[(f)], (pg)) +# define folio_batch_add_page(fbatch, page) \ + folio_batch_add(fbatch, page_folio(page)) +# ifndef HAVE_FOLIO_BATCH_REINIT +static inline void folio_batch_reinit(struct folio_batch *fbatch) +{ + fbatch->nr = 0; +} +# endif /* HAVE_FOLIO_BATCH_REINIT */ + +#else /* !HAVE_FOLIO_BATCH */ + +# ifdef HAVE_PAGEVEC +# define folio_batch pagevec +# endif +# define folio_batch_init(pvec) pagevec_init(pvec) +# define folio_batch_reinit(pvec) pagevec_reinit(pvec) +# define folio_batch_count(pvec) pagevec_count(pvec) +# define folio_batch_space(pvec) pagevec_space(pvec) +# define folio_batch_add_page(pvec, page) \ + pagevec_add(pvec, page) +# define folio_batch_release(pvec) \ + pagevec_release(((struct pagevec *)pvec)) +# ifdef HAVE_PAGEVEC_INIT_ONE_PARAM +# define ll_folio_batch_init(pvec, n) pagevec_init(pvec) +# else +# define ll_folio_batch_init(pvec, n) pagevec_init(pvec, n) +# endif +# define fbatch_at(pvec, n) ((pvec)->pages[(n)]) +# define fbatch_at_npgs(pvec, n) 1 +# define fbatch_at_pg(pvec, n, pg) ((pvec)->pages[(n)]) +#endif /* HAVE_FOLIO_BATCH */ + #endif /* _LUSTRE_COMPAT_H */ diff --git a/lustre/include/lustre_osc.h b/lustre/include/lustre_osc.h index b2da558..29f2c19 100644 --- a/lustre/include/lustre_osc.h +++ b/lustre/include/lustre_osc.h @@ -164,7 +164,7 @@ struct osc_thread_info { union ldlm_policy_data oti_policy; struct cl_attr oti_attr; struct cl_io oti_io; - struct pagevec oti_pagevec; + struct folio_batch oti_fbatch; void *oti_pvec[OTI_PVEC_SIZE]; /** * Fields used by cl_lock_discard_pages(). diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 0fa77d0..5a2477a 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -43,6 +43,7 @@ #include "llite_internal.h" #include "vvp_internal.h" +#include #include static struct vvp_io *cl2vvp_io(const struct lu_env *env, @@ -1029,15 +1030,19 @@ static inline void ll_account_page_dirtied(struct page *page, * Backwards compat for 3.x, 5.x kernels relating to memcg handling * & rename of radix tree to xarray. */ -static void vvp_set_pagevec_dirty(struct pagevec *pvec) +static void vvp_set_batch_dirty(struct folio_batch *fbatch) { - struct page *page = pvec->pages[0]; - int count = pagevec_count(pvec); + struct page *page = fbatch_at_pg(fbatch, 0, 0); + int count = folio_batch_count(fbatch); int i; +#if !defined(HAVE_FOLIO_BATCH) || defined(HAVE_KALLSYMS_LOOKUP_NAME) + int pg, npgs; +#endif #ifdef HAVE_KALLSYMS_LOOKUP_NAME struct address_space *mapping = page->mapping; unsigned long flags; unsigned long skip_pages = 0; + int pgno; int dirtied = 0; #endif @@ -1056,25 +1061,41 @@ static void vvp_set_pagevec_dirty(struct pagevec *pvec) */ #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT if (!vvp_account_page_dirtied) { - for (i = 0; i < count; i++) - __set_page_dirty_nobuffers(pvec->pages[i]); + for (i = 0; i < count; i++) { +#ifdef HAVE_FOLIO_BATCH + filemap_dirty_folio(page->mapping, fbatch->folios[i]); +#else + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); + __set_page_dirty_nobuffers(page); + } +#endif + } EXIT; } #endif + /* account_page_dirtied is available directly or via kallsyms */ #ifdef HAVE_KALLSYMS_LOOKUP_NAME - for (i = 0; i < count; i++) { - page = pvec->pages[i]; + for (pgno = i = 0; i < count; i++) { + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); - ClearPageReclaim(page); + ClearPageReclaim(page); - vvp_lock_page_memcg(page); - if (TestSetPageDirty(page)) { - /* page is already dirty .. no extra work needed - * set a flag for the i'th page to be skipped - */ - vvp_unlock_page_memcg(page); - skip_pages |= (1 << i); + vvp_lock_page_memcg(page); + if (TestSetPageDirty(page)) { + /* page is already dirty .. no extra work needed + * set a flag for the i'th page to be skipped + */ + vvp_unlock_page_memcg(page); + skip_pages |= (1ul << pgno++); + LASSERTF(pgno <= BITS_PER_LONG, + "Limit exceeded pgno: %d/%d\n", pgno, + BITS_PER_LONG); + } } } @@ -1089,19 +1110,22 @@ static void vvp_set_pagevec_dirty(struct pagevec *pvec) * dirty_nobuffers should be impossible because we hold the page lock.) * 4. All mappings are the same because i/o is only to one file. */ - for (i = 0; i < count; i++) { - page = pvec->pages[i]; - /* if the i'th page was unlocked above, skip it here */ - if ((skip_pages >> i) & 1) - continue; - - LASSERTF(page->mapping == mapping, - "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n", - page, page->mapping, mapping); - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); - ll_account_page_dirtied(page, mapping); - dirtied++; - vvp_unlock_page_memcg(page); + for (pgno = i = 0; i < count; i++) { + npgs = fbatch_at_npgs(fbatch, f); + for (pg = 0; pg < npgs; pg++) { + page = fbatch_at_pg(fbatch, i, pg); + /* if the i'th page was unlocked above, skip it here */ + if ((skip_pages >> pgno++) & 1) + continue; + + LASSERTF(page->mapping == mapping, + "all pages must have the same mapping. page %px, mapping %px, first mapping %px\n", + page, page->mapping, mapping); + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + ll_account_page_dirtied(page, mapping); + dirtied++; + vvp_unlock_page_memcg(page); + } } ll_xa_unlock_irqrestore(&mapping->i_pages, flags); @@ -1117,31 +1141,36 @@ static void vvp_set_pagevec_dirty(struct pagevec *pvec) } static void write_commit_callback(const struct lu_env *env, struct cl_io *io, - struct pagevec *pvec) + struct folio_batch *fbatch) { + struct page *vmpage; + struct cl_page *page; + int pg, npgs; int count = 0; int i = 0; ENTRY; - count = pagevec_count(pvec); + count = folio_batch_count(fbatch); LASSERT(count > 0); for (i = 0; i < count; i++) { - struct page *vmpage = pvec->pages[i]; - - SetPageUptodate(vmpage); + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) + SetPageUptodate(fbatch_at_pg(fbatch, i, pg)); } - vvp_set_pagevec_dirty(pvec); + vvp_set_batch_dirty(fbatch); for (i = 0; i < count; i++) { - struct page *vmpage = pvec->pages[i]; - struct cl_page *page = (struct cl_page *) vmpage->private; - - cl_page_disown(env, io, page); - lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); - cl_page_put(env, page); + npgs = fbatch_at_npgs(fbatch, i); + for (pg = 0; pg < npgs; pg++) { + vmpage = fbatch_at_pg(fbatch, i, pg); + page = (struct cl_page *) vmpage->private; + cl_page_disown(env, io, page); + lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io)); + cl_page_put(env, page); + } } EXIT; @@ -1477,9 +1506,9 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) } static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io, - struct pagevec *pvec) + struct folio_batch *fbatch) { - vvp_set_pagevec_dirty(pvec); + vvp_set_batch_dirty(fbatch); } static int vvp_io_fault_start(const struct lu_env *env, diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 5ffd8a5..631d66b 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -159,7 +159,7 @@ static void __cl_page_free(struct cl_page *cl_page, unsigned short bufsize) } static void cl_page_free(const struct lu_env *env, struct cl_page *cp, - struct pagevec *pvec) + struct folio_batch *fbatch) { struct cl_object *obj = cp->cp_obj; unsigned short bufsize = cl_object_header(obj)->coh_page_bufsize; @@ -178,9 +178,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *cp, LASSERT(vmpage != NULL); LASSERT((struct cl_page *)vmpage->private != cp); - if (pvec != NULL) { - if (!pagevec_add(pvec, vmpage)) - pagevec_release(pvec); + if (fbatch != NULL) { + if (!folio_batch_add_page(fbatch, vmpage)) + folio_batch_release(fbatch); } else { put_page(vmpage); } @@ -452,13 +452,13 @@ void cl_page_get(struct cl_page *page) EXPORT_SYMBOL(cl_page_get); /** - * Releases a reference to a page, use the pagevec to release the pages + * Releases a reference to a page, use the folio_batch to release the pages * in batch if provided. * - * Users need to do a final pagevec_release() to release any trailing pages. + * Users need to do a final folio_batch_release() to release any trailing pages. */ -void cl_pagevec_put(const struct lu_env *env, struct cl_page *page, - struct pagevec *pvec) +void cl_batch_put(const struct lu_env *env, struct cl_page *page, + struct folio_batch *fbatch) { ENTRY; CL_PAGE_HEADER(D_TRACE, env, page, "%d\n", @@ -471,15 +471,15 @@ void cl_pagevec_put(const struct lu_env *env, struct cl_page *page, PASSERT(env, page, page->cp_owner == NULL); PASSERT(env, page, list_empty(&page->cp_batch)); /* Page is no longer reachable by other threads. Tear it down */ - cl_page_free(env, page, pvec); + cl_page_free(env, page, fbatch); } EXIT; } -EXPORT_SYMBOL(cl_pagevec_put); +EXPORT_SYMBOL(cl_batch_put); /** - * Releases a reference to a page, wrapper to cl_pagevec_put + * Releases a reference to a page, wrapper to cl_batch_put * * When last reference is released, page is returned to the cache, unless it * is in cl_page_state::CPS_FREEING state, in which case it is immediately @@ -489,7 +489,7 @@ EXPORT_SYMBOL(cl_pagevec_put); */ void cl_page_put(const struct lu_env *env, struct cl_page *page) { - cl_pagevec_put(env, page, NULL); + cl_batch_put(env, page, NULL); } EXPORT_SYMBOL(cl_page_put); diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index 0f54582..91d37f7 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -951,7 +951,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct client_obd *cli = osc_cli(obj); struct osc_async_page *oap; struct osc_async_page *tmp; - struct pagevec *pvec; + struct folio_batch *fbatch; int pages_in_chunk = 0; int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; @@ -976,8 +976,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, io = osc_env_thread_io(env); io->ci_obj = cl_object_top(osc2cl(obj)); io->ci_ignore_layout = 1; - pvec = &osc_env_info(env)->oti_pagevec; - ll_pagevec_init(pvec, 0); + fbatch = &osc_env_info(env)->oti_fbatch; + ll_folio_batch_init(fbatch, 0); rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); if (rc < 0) GOTO(out, rc); @@ -1015,12 +1015,12 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, } lu_ref_del(&page->cp_reference, "truncate", current); - cl_pagevec_put(env, page, pvec); + cl_batch_put(env, page, fbatch); --ext->oe_nr_pages; ++nr_pages; } - pagevec_release(pvec); + folio_batch_release(fbatch); EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial, ext->oe_nr_pages == 0), @@ -2242,7 +2242,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, struct osc_extent *ext = NULL; struct osc_async_page *oap = &ops->ops_oap; struct client_obd *cli = osc_cli(osc); - struct pagevec *pvec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; pgoff_t index; unsigned int tmp; unsigned int grants = 0; @@ -2370,10 +2370,11 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, /* We must not hold a page lock while we do osc_enter_cache() * or osc_extent_find(), so we must mark dirty & unlock - * any pages in the write commit pagevec. */ - if (pagevec_count(pvec)) { - cb(env, io, pvec); - pagevec_reinit(pvec); + * any pages in the write commit folio_batch. + */ + if (folio_batch_count(fbatch)) { + cb(env, io, fbatch); + folio_batch_reinit(fbatch); } if (grants == 0) { @@ -3021,7 +3022,7 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, osc_page_gang_cbt cb, void *cbdata) { struct osc_page *ops; - struct pagevec *pagevec; + struct folio_batch *fbatch; void **pvec; pgoff_t idx; unsigned int nr; @@ -3033,8 +3034,8 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, idx = start; pvec = osc_env_info(env)->oti_pvec; - pagevec = &osc_env_info(env)->oti_pagevec; - ll_pagevec_init(pagevec, 0); + fbatch = &osc_env_info(env)->oti_fbatch; + ll_folio_batch_init(fbatch, 0); spin_lock(&osc->oo_tree_lock); while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec, idx, OTI_PVEC_SIZE)) > 0) { @@ -3080,9 +3081,9 @@ bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io, ops = pvec[i]; page = ops->ops_cl.cpl_page; lu_ref_del(&page->cp_reference, "gang_lookup", current); - cl_pagevec_put(env, page, pagevec); + cl_batch_put(env, page, fbatch); } - pagevec_release(pagevec); + folio_batch_release(fbatch); if (nr < OTI_PVEC_SIZE || end_of_region) break; diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index 2ce268f..947af47 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -300,13 +300,13 @@ int osc_io_commit_async(const struct lu_env *env, struct cl_page_list *qin, int from, int to, cl_commit_cbt cb) { - struct cl_io *io = ios->cis_io; - struct osc_io *oio = cl2osc_io(env, ios); + struct cl_io *io = ios->cis_io; + struct osc_io *oio = cl2osc_io(env, ios); struct osc_object *osc = cl2osc(ios->cis_obj); - struct cl_page *page; - struct cl_page *last_page; + struct cl_page *page; + struct cl_page *last_page; struct osc_page *opg; - struct pagevec *pvec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; int result = 0; ENTRY; @@ -326,7 +326,7 @@ int osc_io_commit_async(const struct lu_env *env, } } - ll_pagevec_init(pvec, 0); + ll_folio_batch_init(fbatch, 0); while (qin->pl_nr > 0) { struct osc_async_page *oap; @@ -355,9 +355,9 @@ int osc_io_commit_async(const struct lu_env *env, cl_page_list_del(env, qin, page, true); /* if there are no more slots, do the callback & reinit */ - if (pagevec_add(pvec, page->cp_vmpage) == 0) { - (*cb)(env, io, pvec); - pagevec_reinit(pvec); + if (!folio_batch_add_page(fbatch, page->cp_vmpage)) { + (*cb)(env, io, fbatch); + folio_batch_reinit(fbatch); } } /* The shrink interval is in seconds, so we can update it once per @@ -366,9 +366,9 @@ int osc_io_commit_async(const struct lu_env *env, osc_update_next_shrink(osc_cli(osc)); - /* Clean up any partially full pagevecs */ - if (pagevec_count(pvec) != 0) - (*cb)(env, io, pvec); + /* Clean up any partially full folio_batches */ + if (folio_batch_count(fbatch) != 0) + (*cb)(env, io, fbatch); /* Can't access these pages any more. Page can be in transfer and * complete at any time. */ diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 9729275..7be22e5 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -488,24 +488,24 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg) } } -static void discard_pagevec(const struct lu_env *env, struct cl_io *io, - struct cl_page **pvec, int max_index) +static void discard_cl_pages(const struct lu_env *env, struct cl_io *io, + struct cl_page **pvec, int max_index) { - struct pagevec *pagevec = &osc_env_info(env)->oti_pagevec; + struct folio_batch *fbatch = &osc_env_info(env)->oti_fbatch; int i; - ll_pagevec_init(pagevec, 0); + ll_folio_batch_init(fbatch, 0); for (i = 0; i < max_index; i++) { struct cl_page *page = pvec[i]; LASSERT(cl_page_is_owned(page, io)); cl_page_discard(env, io, page); cl_page_disown(env, io, page); - cl_pagevec_put(env, page, pagevec); + cl_batch_put(env, page, fbatch); pvec[i] = NULL; } - pagevec_release(pagevec); + folio_batch_release(fbatch); } /** @@ -597,7 +597,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); index = 0; cl_io_fini(env, io); @@ -642,7 +642,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, pvec[index++] = page; if (unlikely(index == OTI_PVEC_SIZE)) { spin_unlock(&cli->cl_lru_list_lock); - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); index = 0; spin_lock(&cli->cl_lru_list_lock); @@ -654,7 +654,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { - discard_pagevec(env, io, pvec, index); + discard_cl_pages(env, io, pvec, index); cl_io_fini(env, io); cl_object_put(env, clobj); diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index 7b253bd..c093c40 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -663,11 +663,11 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt)); struct osd_thread_info *oti = osd_oti_get(env); struct osd_iobuf *iobuf = &oti->oti_iobuf; - struct pagevec pvec; + struct folio_batch fbatch; int i; osd_brw_stats_update(osd, iobuf); - ll_pagevec_init(&pvec, 0); + ll_folio_batch_init(&fbatch, 0); for (i = 0; i < npages; i++) { struct page *page = lnb[i].lnb_page; @@ -683,8 +683,8 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, } else { if (lnb[i].lnb_locked) unlock_page(page); - if (pagevec_add(&pvec, page) == 0) - pagevec_release(&pvec); + if (folio_batch_add_page(&fbatch, page) == 0) + folio_batch_release(&fbatch); } lnb[i].lnb_page = NULL; @@ -692,8 +692,8 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, LASSERTF(oti->oti_dio_pages_used == 0, "%d\n", oti->oti_dio_pages_used); - /* Release any partial pagevec */ - pagevec_release(&pvec); + /* Release any partial folio_batch */ + folio_batch_release(&fbatch); RETURN(0); }