From: James Simmons Date: Sun, 28 Aug 2016 23:52:15 +0000 (-0400) Subject: LU-8560 libcfs: handle PAGE_CACHE_* removal in newer kernels X-Git-Tag: 2.8.58~32 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=2104ed0f0da3651f0cb4ab0c78a1037891d7cb4f LU-8560 libcfs: handle PAGE_CACHE_* removal in newer kernels Starting with linux kernel 4.6 all the PAGE_CACHE_* defines have been removed. Now it is required to use PAGE_* instead. This is a simple blanket change since PAGE_CACHE_* was always the same as PAGE_*. Change-Id: I3ba8954d44969e2473afa939bbb8b8b5b1345446 Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/22206 Tested-by: Maloo Tested-by: Jenkins Reviewed-by: Frank Zago Reviewed-by: Andreas Dilger Reviewed-by: John L. Hammond Reviewed-by: Dmitry Eremin Reviewed-by: Oleg Drokin --- diff --git a/contrib/scripts/checkpatch.pl b/contrib/scripts/checkpatch.pl index 668c243..20db5d5 100755 --- a/contrib/scripts/checkpatch.pl +++ b/contrib/scripts/checkpatch.pl @@ -432,8 +432,11 @@ my %dep_functions = ( 'cfs_list_splice_tail', 'list_splice_tail', 'cfs_list_t', 'struct list_head', - 'CFS_PAGE_MASK', 'PAGE_CACHE_MASK or PAGE_MASK', - 'CFS_PAGE_SIZE', 'PAGE_CACHE_SIZE or PAGE_SIZE', + 'CFS_PAGE_MASK', 'PAGE_MASK', + 'CFS_PAGE_SIZE', 'PAGE_SIZE', + 'PAGE_CACHE_MASK', 'PAGE_MASK', + 'PAGE_CACHE_SIZE', 'PAGE_SIZE', + 'PAGE_CACHE_SHIFT', 'PAGE_SHIFT', 'cfs_proc_dir_entry_t', 'struct proc_dir_entry', diff --git a/libcfs/include/libcfs/libcfs_prim.h b/libcfs/include/libcfs/libcfs_prim.h index 06ae853..3bc447e 100644 --- a/libcfs/include/libcfs/libcfs_prim.h +++ b/libcfs/include/libcfs/libcfs_prim.h @@ -60,7 +60,7 @@ cfs_time_t cfs_timer_deadline(struct timer_list *t); #if BITS_PER_LONG == 32 /* limit to lowmem on 32-bit systems */ # define NUM_CACHEPAGES \ - min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) + min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4) #else # define NUM_CACHEPAGES totalram_pages #endif diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index 47197b3..6d7ba72 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -151,7 +151,7 @@ do { \ #endif /* LIBCFS_DEBUG */ #ifndef LIBCFS_VMALLOC_SIZE -#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ +#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */ #endif #define LIBCFS_ALLOC_PRE(size, mask) \ diff --git a/libcfs/libcfs/debug.c b/libcfs/libcfs/debug.c index 673dc9b..51cba40 100644 --- a/libcfs/libcfs/debug.c +++ b/libcfs/libcfs/debug.c @@ -302,7 +302,7 @@ int libcfs_debug_init(unsigned long bufsize) max = TCD_MAX_PAGES; } else { max = (max / num_possible_cpus()); - max = (max << (20 - PAGE_CACHE_SHIFT)); + max = (max << (20 - PAGE_SHIFT)); } rc = cfs_tracefile_init(max); diff --git a/libcfs/libcfs/linux/linux-curproc.c b/libcfs/libcfs/linux/linux-curproc.c index 47f94d5..d9c06ac 100644 --- a/libcfs/libcfs/linux/linux-curproc.c +++ b/libcfs/libcfs/linux/linux-curproc.c @@ -188,7 +188,7 @@ int cfs_get_environ(const char *key, char *value, int *val_len) { struct mm_struct *mm; char *buffer; - int buf_len = PAGE_CACHE_SIZE; + int buf_len = PAGE_SIZE; int key_len = strlen(key); unsigned long addr; int rc; diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index 28f7f18..71a912a 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -145,7 +145,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) if (tcd->tcd_cur_pages > 0) { __LASSERT(!list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= PAGE_CACHE_SIZE) + if (tage->used + len <= PAGE_SIZE) return tage; } @@ -224,7 +224,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, * from here: this will lead to infinite recursion. */ - if (len > PAGE_CACHE_SIZE) { + if (len > PAGE_SIZE) { printk(KERN_ERR "cowardly refusing to write %lu bytes in a page\n", len); return NULL; @@ -314,7 +314,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, for (i = 0; i < 2; i++) { tage = cfs_trace_get_tage(tcd, needed + known_size + 1); if (tage == NULL) { - if (needed + known_size > PAGE_CACHE_SIZE) + if (needed + known_size > PAGE_SIZE) mask |= D_ERROR; cfs_trace_put_tcd(tcd); @@ -325,7 +325,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, string_buf = (char *)page_address(tage->page) + tage->used + known_size; - max_nob = PAGE_CACHE_SIZE - tage->used - known_size; + max_nob = PAGE_SIZE - tage->used - known_size; if (max_nob <= 0) { printk(KERN_EMERG "negative max_nob: %d\n", max_nob); @@ -383,7 +383,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, __LASSERT(debug_buf == string_buf); tage->used += needed; - __LASSERT(tage->used <= PAGE_CACHE_SIZE); + __LASSERT(tage->used <= PAGE_SIZE); console: if ((mask & libcfs_printk) == 0) { @@ -790,7 +790,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string); int cfs_trace_allocate_string_buffer(char **str, int nob) { - if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ + if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */ return -EINVAL; *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); @@ -903,7 +903,7 @@ int cfs_trace_set_debug_mb(int mb) } mb /= num_possible_cpus(); - pages = mb << (20 - PAGE_CACHE_SHIFT); + pages = mb << (20 - PAGE_SHIFT); cfs_tracefile_write_lock(); @@ -941,7 +941,7 @@ int cfs_trace_get_debug_mb(void) cfs_tracefile_read_unlock(); - return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; + return (total_pages >> (20 - PAGE_SHIFT)) + 1; } static int tracefiled(void *arg) diff --git a/libcfs/libcfs/tracefile.h b/libcfs/libcfs/tracefile.h index 7c570fa..e786c29 100644 --- a/libcfs/libcfs/tracefile.h +++ b/libcfs/libcfs/tracefile.h @@ -96,7 +96,7 @@ extern void libcfs_unregister_panic_notifier(void); extern int libcfs_panic_in_progress; extern int cfs_trace_max_debug_mb(void); -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -105,7 +105,7 @@ extern int cfs_trace_max_debug_mb(void); /* * Private declare for tracefile */ -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -316,7 +316,7 @@ do { \ do { \ __LASSERT(tage != NULL); \ __LASSERT(tage->page != NULL); \ - __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ + __LASSERT(tage->used <= PAGE_SIZE); \ __LASSERT(page_count(tage->page) > 0); \ } while (0) diff --git a/lnet/include/lnet/types.h b/lnet/include/lnet/types.h index ebc997c..4c67b27 100644 --- a/lnet/include/lnet/types.h +++ b/lnet/include/lnet/types.h @@ -489,7 +489,7 @@ typedef struct { /** * Starting offset of the fragment within the page. Note that the * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. + * kiov_len + kiov_offset <= PAGE_SIZE. */ unsigned int kiov_offset; } lnet_kiov_t; diff --git a/lnet/klnds/socklnd/socklnd_lib.c b/lnet/klnds/socklnd/socklnd_lib.c index e26db71..db244c8 100644 --- a/lnet/klnds/socklnd/socklnd_lib.c +++ b/lnet/klnds/socklnd/socklnd_lib.c @@ -287,7 +287,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, for (nob = i = 0; i < niov; i++) { if ((kiov[i].kiov_offset != 0 && i > 0) || (kiov[i].kiov_offset + kiov[i].kiov_len != - PAGE_CACHE_SIZE && i < niov - 1)) + PAGE_SIZE && i < niov - 1)) return NULL; pages[i] = kiov[i].kiov_page; diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index 4c6fb17..c78221c 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -134,7 +134,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) + lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) return -EINVAL; /* invalid length */ total_length += lmd->md_iov.kiov[i].kiov_len; diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index cacc500..cbbc888 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -544,12 +544,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, if (len <= frag_len) { dst->kiov_len = len; - LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); + LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); return niov; } dst->kiov_len = frag_len; - LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); + LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); len -= frag_len; dst++; @@ -883,7 +883,7 @@ lnet_msg2bufpool(lnet_msg_t *msg) rbp = &the_lnet.ln_rtrpools[cpt][0]; LASSERT(msg->msg_len <= LNET_MTU); - while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { + while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) { rbp++; LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); } diff --git a/lnet/lnet/lib-socket.c b/lnet/lnet/lib-socket.c index 9392f4f..b9515c2 100644 --- a/lnet/lnet/lib-socket.c +++ b/lnet/lnet/lib-socket.c @@ -202,9 +202,9 @@ lnet_ipif_enumerate(char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { + if (nalloc * sizeof(*ifr) > PAGE_SIZE) { toobig = 1; - nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); + nalloc = PAGE_SIZE / sizeof(*ifr); CWARN("Too many interfaces: only enumerating " "first %d\n", nalloc); } diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index 5a1e56f..ea2fb68 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -30,8 +30,8 @@ #define LNET_NRB_SMALL_PAGES 1 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) -#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ - PAGE_CACHE_SHIFT) +#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \ + PAGE_SHIFT) static char *forwarding = ""; module_param(forwarding, charp, 0444); @@ -1332,7 +1332,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) return NULL; } - rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; + rb->rb_kiov[i].kiov_len = PAGE_SIZE; rb->rb_kiov[i].kiov_offset = 0; rb->rb_kiov[i].kiov_page = page; } diff --git a/lnet/selftest/brw_test.c b/lnet/selftest/brw_test.c index 2b57088..1379c53 100644 --- a/lnet/selftest/brw_test.c +++ b/lnet/selftest/brw_test.c @@ -88,7 +88,7 @@ brw_client_init (sfw_test_instance_t *tsi) npg = breq->blk_npg; /* NB: this is not going to work for variable page size, * but we have to keep it for compatibility */ - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -100,7 +100,7 @@ brw_client_init (sfw_test_instance_t *tsi) opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } if (npg > LNET_MAX_IOV || npg <= 0) @@ -159,13 +159,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_SIMPLE) { memcpy(addr, &magic, BRW_MSIZE); - addr += PAGE_CACHE_SIZE - BRW_MSIZE; + addr += PAGE_SIZE - BRW_MSIZE; memcpy(addr, &magic, BRW_MSIZE); return; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); return; } @@ -190,7 +190,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) data = *((__u64 *) addr); if (data != magic) goto bad_data; - addr += PAGE_CACHE_SIZE - BRW_MSIZE; + addr += PAGE_SIZE - BRW_MSIZE; data = *((__u64 *) addr); if (data != magic) goto bad_data; @@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) { data = *(((__u64 *) addr) + i); if (data != magic) goto bad_data; } @@ -268,7 +268,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; npg = breq->blk_npg; - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -280,7 +280,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); @@ -453,10 +453,10 @@ brw_server_handle(struct srpc_server_rpc *rpc) reply->brw_status = EINVAL; return 0; } - npg = reqst->brw_len >> PAGE_CACHE_SHIFT; + npg = reqst->brw_len >> PAGE_SHIFT; } else { - npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; diff --git a/lnet/selftest/conctl.c b/lnet/selftest/conctl.c index 804a7a0..a738a6b 100644 --- a/lnet/selftest/conctl.c +++ b/lnet/selftest/conctl.c @@ -745,7 +745,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) if (args->lstio_tes_param != NULL && (args->lstio_tes_param_len <= 0 || args->lstio_tes_param_len > - PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) + PAGE_SIZE - sizeof(lstcon_test_t))) return -EINVAL; LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); @@ -823,7 +823,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) opc = data->ioc_u32[0]; - if (data->ioc_plen1 > PAGE_CACHE_SIZE) + if (data->ioc_plen1 > PAGE_SIZE) return -EINVAL; LIBCFS_ALLOC(buf, data->ioc_plen1); diff --git a/lnet/selftest/conrpc.c b/lnet/selftest/conrpc.c index b00b80a..8b372a2 100644 --- a/lnet/selftest/conrpc.c +++ b/lnet/selftest/conrpc.c @@ -789,8 +789,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) test_bulk_req_t *brq = &req->tsr_u.bulk_v0; brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / - PAGE_CACHE_SIZE; + brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) / + PAGE_SIZE; brq->blk_flags = param->blk_flags; return 0; @@ -825,7 +825,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, if (transop == LST_TRANS_TSBCLIADD) { npg = sfw_id_pages(test->tes_span); nob = (feats & LST_FEAT_BULK_LEN) == 0 ? - npg * PAGE_CACHE_SIZE : + npg * PAGE_SIZE : sizeof(lnet_process_id_packed_t) * test->tes_span; } @@ -852,7 +852,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, LASSERT(nob > 0); len = (feats & LST_FEAT_BULK_LEN) == 0 ? - PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE); + PAGE_SIZE : min_t(int, nob, PAGE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; diff --git a/lnet/selftest/framework.c b/lnet/selftest/framework.c index 07a2812..1999962 100644 --- a/lnet/selftest/framework.c +++ b/lnet/selftest/framework.c @@ -1181,7 +1181,7 @@ sfw_add_test (srpc_server_rpc_t *rpc) int len; if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { len = sizeof(lnet_process_id_packed_t) * diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index eb9e9dc..bec99df 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -90,7 +90,7 @@ void srpc_set_counters (const srpc_counters_t *cnt) static int srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { - nob = min(nob, (int)PAGE_CACHE_SIZE); + nob = min_t(int, nob, (int)PAGE_SIZE); LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index 386da3b..29b1af26 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -389,10 +389,10 @@ typedef struct sfw_test_instance { } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at +/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at * the end of pages are not used */ #define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) +#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) diff --git a/lustre/fld/fld_handler.c b/lustre/fld/fld_handler.c index 902f42b..e7631f2 100644 --- a/lustre/fld/fld_handler.c +++ b/lustre/fld/fld_handler.c @@ -334,7 +334,7 @@ static int fld_handle_read(struct tgt_session_info *tsi) RETURN(err_serious(-EPROTO)); req_capsule_set_size(tsi->tsi_pill, &RMF_GENERIC_DATA, RCL_SERVER, - PAGE_CACHE_SIZE); + PAGE_SIZE); rc = req_capsule_server_pack(tsi->tsi_pill); if (unlikely(rc != 0)) @@ -343,7 +343,7 @@ static int fld_handle_read(struct tgt_session_info *tsi) data = req_capsule_server_get(tsi->tsi_pill, &RMF_GENERIC_DATA); rc = fld_server_read(tsi->tsi_env, lu_site2seq(site)->ss_server_fld, - in, data, PAGE_CACHE_SIZE); + in, data, PAGE_SIZE); RETURN(rc); } diff --git a/lustre/fld/fld_request.c b/lustre/fld/fld_request.c index 374d405..ad76857 100644 --- a/lustre/fld/fld_request.c +++ b/lustre/fld/fld_request.c @@ -398,7 +398,7 @@ again: RETURN(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, - RCL_SERVER, PAGE_CACHE_SIZE); + RCL_SERVER, PAGE_SIZE); break; default: rc = -EINVAL; diff --git a/lustre/include/lu_object.h b/lustre/include/lu_object.h index e8e4cf4..9eaba1d 100644 --- a/lustre/include/lu_object.h +++ b/lustre/include/lu_object.h @@ -1149,7 +1149,7 @@ struct lu_context_key { { \ type *value; \ \ - CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ + CLASSERT(PAGE_SIZE >= sizeof(*value)); \ \ OBD_ALLOC_PTR(value); \ if (value == NULL) \ diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 505b46c..e0a7171 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -1000,16 +1000,16 @@ static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than PAGE_CACHE_SIZE because the client needs to + * It's different than PAGE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. + * lu_dirpage header is if client and server PAGE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ diff --git a/lustre/include/lustre_debug.h b/lustre/include/lustre_debug.h index 770b3c7..412dbbc 100644 --- a/lustre/include/lustre_debug.h +++ b/lustre/include/lustre_debug.h @@ -52,9 +52,9 @@ #define ASSERT_MAX_SIZE_MB 60000ULL #define ASSERT_PAGE_INDEX(index, OP) \ -do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \ +do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_SHIFT)) { \ CERROR("bad page index %lu > %llu\n", index, \ - ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \ + ASSERT_MAX_SIZE_MB << (20 - PAGE_SHIFT)); \ libcfs_debug = ~0UL; \ OP; \ }} while(0) diff --git a/lustre/include/lustre_disk.h b/lustre/include/lustre_disk.h index aea63f5..46aeaa0 100644 --- a/lustre/include/lustre_disk.h +++ b/lustre/include/lustre_disk.h @@ -280,14 +280,14 @@ struct lustre_mount_data { /* * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. + * 2^n * PAGE_SIZE * 8 for the number of bits that fit an order-n allocation. * If we need more than 131072 clients (order-2 allocation on x86) then this * should become an array of single-page pointers that are allocated on demand. */ -#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) +#if (128 * 1024UL) > (PAGE_SIZE * 8) #define LR_MAX_CLIENTS (128 * 1024UL) #else -#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) +#define LR_MAX_CLIENTS (PAGE_SIZE * 8) #endif /** COMPAT_146: this is an OST (temporary) */ diff --git a/lustre/include/lustre_idmap.h b/lustre/include/lustre_idmap.h index 0d80f5c..078ba9f 100644 --- a/lustre/include/lustre_idmap.h +++ b/lustre/include/lustre_idmap.h @@ -49,7 +49,7 @@ #include -#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_CACHE_SIZE / sizeof(gid_t))) +#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t))) #define CFS_GROUP_AT(gi, i) \ ((gi)->blocks[(i) / CFS_NGROUPS_PER_BLOCK][(i) % CFS_NGROUPS_PER_BLOCK]) diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index a76c426..99e4237 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -102,21 +102,21 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ #if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" #endif -#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" #endif #if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -457,7 +457,7 @@ */ /* depress threads factor for VM with small memory size */ #define OSS_THR_FACTOR min_t(int, 8, \ - NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT)) + NUM_CACHEPAGES >> (28 - PAGE_SHIFT)) #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define OSS_NTHRS_BASE 64 diff --git a/lustre/include/lustre_patchless_compat.h b/lustre/include/lustre_patchless_compat.h index cefa869..58daecf 100644 --- a/lustre/include/lustre_patchless_compat.h +++ b/lustre/include/lustre_patchless_compat.h @@ -95,7 +95,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) if (PagePrivate(page)) #ifdef HAVE_INVALIDATE_RANGE - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); #else page->mapping->a_ops->invalidatepage(page, 0); #endif diff --git a/lustre/include/obd.h b/lustre/include/obd.h index 460ce5b..349d660 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -213,7 +213,7 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_SIZE, OST block size) */ int cl_chunkbits; /* extent insertion metadata overhead to be accounted in grant, * in bytes */ @@ -1199,7 +1199,7 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { LASSERT(obd != NULL); - return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT; } /* when RPC size or the max RPCs in flight is increased, the max dirty pages @@ -1212,7 +1212,7 @@ static inline void client_adjust_max_dirty(struct client_obd *cli) /* initializing */ if (cli->cl_dirty_max_pages <= 0) cli->cl_dirty_max_pages = (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) - >> PAGE_CACHE_SHIFT; + >> PAGE_SHIFT; else { unsigned long dirty_max = cli->cl_max_rpcs_in_flight * cli->cl_max_pages_per_rpc; diff --git a/lustre/include/obd_support.h b/lustre/include/obd_support.h index cc8a6ec..6dfd472 100644 --- a/lustre/include/obd_support.h +++ b/lustre/include/obd_support.h @@ -795,7 +795,7 @@ do { \ #endif #ifdef POISON_BULK -#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \ +#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_SIZE); \ kunmap(page); } while (0) #else #define POISON_PAGE(page, val) do { } while (0) diff --git a/lustre/ldlm/ldlm_extent.c b/lustre/ldlm/ldlm_extent.c index b51265f..b38b14d 100644 --- a/lustre/ldlm/ldlm_extent.c +++ b/lustre/ldlm/ldlm_extent.c @@ -94,7 +94,7 @@ static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req, * the client requested. Also we need to make sure it's also server * page size aligned otherwise a server page can be covered by two * write locks. */ - mask = PAGE_CACHE_SIZE; + mask = PAGE_SIZE; req_align = (req_end + 1) | req_start; if (req_align != 0 && (req_align & (mask - 1)) == 0) { while ((req_align & mask) == 0) diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index 5a1e716..e05873c 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -404,17 +404,17 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) * from OFD after connecting. */ cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES; - /* set cl_chunkbits default value to PAGE_CACHE_SHIFT, + /* set cl_chunkbits default value to PAGE_SHIFT, * it will be updated at OSC connection time. */ - cli->cl_chunkbits = PAGE_CACHE_SHIFT; + cli->cl_chunkbits = PAGE_SHIFT; if (!strcmp(name, LUSTRE_MDC_NAME)) { cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) { cli->cl_max_rpcs_in_flight = 2; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) { cli->cl_max_rpcs_in_flight = 3; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { if (osc_on_mdt(obddev->obd_name)) diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 98c3d80..ef3560b 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -206,7 +206,7 @@ static int expired_lock_main(void *arg) lock = list_entry(expired->next, struct ldlm_lock, l_pending_chain); - if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE && + if ((void *)lock < LP_POISON + PAGE_SIZE && (void *)lock >= LP_POISON) { spin_unlock_bh(&waiting_locks_spinlock); CERROR("free lock on elt list %p\n", lock); @@ -214,7 +214,7 @@ static int expired_lock_main(void *arg) } list_del_init(&lock->l_pending_chain); if ((void *)lock->l_export < - LP_POISON + PAGE_CACHE_SIZE && + LP_POISON + PAGE_SIZE && (void *)lock->l_export >= LP_POISON) { CERROR("lock with free export on elt list %p\n", lock->l_export); diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 4328274..5a882d3 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -109,7 +109,7 @@ /* * 50 ldlm locks for 1MB of RAM. */ -#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) +#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50) /* * Maximal possible grant step plan in %. diff --git a/lustre/ldlm/ldlm_reclaim.c b/lustre/ldlm/ldlm_reclaim.c index 7fda5cf..b551ea2 100644 --- a/lustre/ldlm/ldlm_reclaim.c +++ b/lustre/ldlm/ldlm_reclaim.c @@ -344,7 +344,7 @@ static inline __u64 ldlm_ratio2locknr(int ratio) { __u64 locknr; - locknr = ((__u64)NUM_CACHEPAGES << PAGE_CACHE_SHIFT) * ratio; + locknr = ((__u64)NUM_CACHEPAGES << PAGE_SHIFT) * ratio; do_div(locknr, 100 * sizeof(struct ldlm_lock)); return locknr; diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index f196849..8e34498 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -752,7 +752,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off) { int avail; - avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; + avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size; if (likely(avail >= 0)) avail /= (int)sizeof(struct lustre_handle); else diff --git a/lustre/lfsck/lfsck_internal.h b/lustre/lfsck/lfsck_internal.h index 68bef40..fd3164c 100644 --- a/lustre/lfsck/lfsck_internal.h +++ b/lustre/lfsck/lfsck_internal.h @@ -1426,7 +1426,7 @@ static inline int lfsck_links_read(const struct lu_env *env, { ldata->ld_buf = lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf, - PAGE_CACHE_SIZE); + PAGE_SIZE); return __lfsck_links_read(env, obj, ldata); } @@ -1437,7 +1437,7 @@ static inline int lfsck_links_read2(const struct lu_env *env, { ldata->ld_buf = lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf2, - PAGE_CACHE_SIZE); + PAGE_SIZE); return __lfsck_links_read(env, obj, ldata); } diff --git a/lustre/lfsck/lfsck_layout.c b/lustre/lfsck/lfsck_layout.c index b80623a..8241ea8 100644 --- a/lustre/lfsck/lfsck_layout.c +++ b/lustre/lfsck/lfsck_layout.c @@ -397,7 +397,7 @@ static int lfsck_layout_verify_header(struct lov_mds_md_v1 *lmm) return 0; } -#define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE +#define LFSCK_RBTREE_BITMAP_SIZE PAGE_SIZE #define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3) #define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1) diff --git a/lustre/llite/dir.c b/lustre/llite/dir.c index ea0c35c..03c0866 100644 --- a/lustre/llite/dir.c +++ b/lustre/llite/dir.c @@ -141,7 +141,7 @@ * a header lu_dirpage which describes the start/end hash, and whether this * page is empty (contains no dir entry) or hash collide with next page. * After client receives reply, several pages will be integrated into dir page - * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the + * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the * lu_dirpage for this integrated page will be adjusted. See * mdc_adjust_dirpages(). * @@ -1468,7 +1468,7 @@ out_rmdir: st.st_gid = body->mbo_gid; st.st_rdev = body->mbo_rdev; st.st_size = body->mbo_size; - st.st_blksize = PAGE_CACHE_SIZE; + st.st_blksize = PAGE_SIZE; st.st_blocks = body->mbo_blocks; st.st_atime = body->mbo_atime; st.st_mtime = body->mbo_mtime; diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index b87da82..a79d49a 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -315,10 +315,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) } /* default to about 64M of readahead on a given system. */ -#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT)) enum ra_stat { RA_STAT_HIT = 0, @@ -1011,7 +1011,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, static inline void ll_invalidate_page(struct page *vmpage) { struct address_space *mapping = vmpage->mapping; - loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; + loff_t offset = vmpage->index << PAGE_SHIFT; LASSERT(PageLocked(vmpage)); if (mapping == NULL) @@ -1021,7 +1021,7 @@ static inline void ll_invalidate_page(struct page *vmpage) * truncate_complete_page() calls * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete(). */ - ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); truncate_complete_page(mapping, vmpage); } diff --git a/lustre/llite/llite_lib.c b/lustre/llite/llite_lib.c index 3b790d3..f395747 100644 --- a/lustre/llite/llite_lib.c +++ b/lustre/llite/llite_lib.c @@ -304,15 +304,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, valid != CLIENT_CONNECT_MDT_REQD) { char *buf; - OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE); - obd_connect_flags2str(buf, PAGE_CACHE_SIZE, + OBD_ALLOC_WAIT(buf, PAGE_SIZE); + obd_connect_flags2str(buf, PAGE_SIZE, valid ^ CLIENT_CONNECT_MDT_REQD, 0, ","); LCONSOLE_ERROR_MSG(0x170, "Server %s does not support " "feature(s) needed for correct operation " "of this client (%s). Please upgrade " "server or downgrade client.\n", sbi->ll_md_exp->exp_obd->obd_name, buf); - OBD_FREE(buf, PAGE_CACHE_SIZE); + OBD_FREE(buf, PAGE_SIZE); GOTO(out_md_fid, err = -EPROTO); } @@ -356,7 +356,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, sbi->ll_flags |= LL_SBI_64BIT_HASH; if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) - sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_CACHE_SHIFT; + sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT; else sbi->ll_md_brw_pages = 1; diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index 94f2b12..d03e92f 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -60,7 +60,7 @@ void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); + (vma->vm_pgoff << PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~PAGE_MASK; } @@ -477,7 +477,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, last - first + 1, 0); } diff --git a/lustre/llite/lloop.c b/lustre/llite/lloop.c index 2144c7d..96e6c30 100644 --- a/lustre/llite/lloop.c +++ b/lustre/llite/lloop.c @@ -226,7 +226,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) #endif bio_for_each_segment_all(bvec, bio, iter) { BUG_ON(bvec->bv_offset != 0); - BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); + BUG_ON(bvec->bv_len != PAGE_SIZE); pages[page_count] = bvec->bv_page; offsets[page_count] = offset; @@ -240,7 +240,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, page_count); - pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; + pvec->ldp_size = page_count << PAGE_SHIFT; pvec->ldp_nr = page_count; /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to @@ -538,7 +538,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = PAGE_CACHE_SIZE; + lo->lo_blocksize = PAGE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -560,7 +560,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, /* queue parameters */ blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9)); blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); diff --git a/lustre/llite/lproc_llite.c b/lustre/llite/lproc_llite.c index 34615bd..12a6622 100644 --- a/lustre/llite/lproc_llite.c +++ b/lustre/llite/lproc_llite.c @@ -259,7 +259,7 @@ static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v) pages_number = sbi->ll_ra_info.ra_max_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_seq_read_frac_helper(m, pages_number, mult); } @@ -277,14 +277,14 @@ ll_max_readahead_mb_seq_write(struct file *file, const char __user *buffer, if (rc) return rc; - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; if (pages_number < 0 || pages_number > totalram_pages / 2) { /* 1/2 of RAM */ CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n", ll_get_fsname(sb, NULL, 0), - (unsigned long)pages_number >> (20 - PAGE_CACHE_SHIFT), - totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); + (unsigned long)pages_number >> (20 - PAGE_SHIFT), + totalram_pages >> (20 - PAGE_SHIFT + 1)); return -ERANGE; } @@ -306,7 +306,7 @@ static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v) pages_number = sbi->ll_ra_info.ra_max_pages_per_file; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_seq_read_frac_helper(m, pages_number, mult); } @@ -325,13 +325,13 @@ ll_max_readahead_per_file_mb_seq_write(struct file *file, if (rc) return rc; - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) { CERROR("%s: can't set max_readahead_per_file_mb=%lu > " "max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0), - (unsigned long)pages_number >> (20 - PAGE_CACHE_SHIFT), - sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_CACHE_SHIFT)); + (unsigned long)pages_number >> (20 - PAGE_SHIFT), + sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -353,7 +353,7 @@ static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *v) pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_seq_read_frac_helper(m, pages_number, mult); } @@ -372,13 +372,13 @@ ll_max_read_ahead_whole_mb_seq_write(struct file *file, if (rc) return rc; - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; /* Cap this at the current max readahead window size, the readahead * algorithm does this anyway so it's pointless to set it larger. */ if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { - int pages_shift = 20 - PAGE_CACHE_SHIFT; + int pages_shift = 20 - PAGE_SHIFT; CERROR("%s: can't set max_read_ahead_whole_mb=%lu > " "max_read_ahead_per_file_mb=%lu\n", ll_get_fsname(sb, NULL, 0), @@ -399,7 +399,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) struct super_block *sb = m->private; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = sbi->ll_cache; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; long max_cached_mb; long unused_mb; @@ -448,12 +448,12 @@ ll_max_cached_mb_seq_write(struct file *file, const char __user *buffer, if (rc) RETURN(rc); - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; if (pages_number < 0 || pages_number > totalram_pages) { CERROR("%s: can't set max cache more than %lu MB\n", ll_get_fsname(sb, NULL, 0), - totalram_pages >> (20 - PAGE_CACHE_SHIFT)); + totalram_pages >> (20 - PAGE_SHIFT)); RETURN(-ERANGE); } /* Allow enough cache so clients can make well-formed RPCs */ @@ -907,7 +907,7 @@ static int ll_unstable_stats_seq_show(struct seq_file *m, void *v) int mb; pages = atomic_long_read(&cache->ccc_unstable_nr); - mb = (pages * PAGE_CACHE_SIZE) >> 20; + mb = (pages * PAGE_SIZE) >> 20; seq_printf(m, "unstable_check: %8d\n" "unstable_pages: %12ld\n" diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index daf665c..15b0c3f 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -484,7 +484,7 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io, unsigned long end_index; /* Truncate RA window to end of file */ - end_index = (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT); + end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT); if (end_index <= end) { end = end_index; ria->ria_eof = true; @@ -547,7 +547,7 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io, if (ria->ria_reserved != 0) ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved); - if (ra_end == end && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end == end && ra_end == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(inode, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from @@ -764,8 +764,8 @@ static void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (ras->ras_requests >= 2 && !ras->ras_request_index) { __u64 kms_pages; - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); @@ -953,7 +953,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) * breaking kernel which assumes ->writepage should mark * PageWriteback or clean the page. */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. @@ -991,7 +991,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) ENTRY; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1025,7 +1025,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (end == OBD_OBJECT_EOF) mapping->writeback_index = 0; else - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) +1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } RETURN(result); } diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index f2ee2b8..7830a83 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -99,7 +99,7 @@ static void ll_invalidatepage(struct page *vmpage, * happening with locked page too */ #ifdef HAVE_INVALIDATE_RANGE - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { #else if (offset == 0) { #endif @@ -336,7 +336,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ +#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_SIZE) & \ ~(DT_MAX_BRW_SIZE - 1)) #ifndef HAVE_IOV_ITER_RW @@ -368,8 +368,8 @@ ll_direct_IO( CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), " "offset=%lld=%llx, pages %zd (max %lu)\n", PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_CACHE_SHIFT, - MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + file_offset, file_offset, count >> PAGE_SHIFT, + MAX_DIO_SIZE >> PAGE_SHIFT); /* Check that all user buffers are aligned as well */ if (iov_iter_alignment(iter) & ~PAGE_MASK) @@ -422,8 +422,8 @@ ll_direct_IO( * We should always be able to kmalloc for a * page worth of page pointers = 4MB on i386. */ if (result == -ENOMEM && - size > (PAGE_CACHE_SIZE / sizeof(*pages)) * - PAGE_CACHE_SIZE) { + size > (PAGE_SIZE / sizeof(*pages)) * + PAGE_SIZE) { size = ((((size / 2) - 1) | ~PAGE_MASK) + 1) & PAGE_MASK; CDEBUG(D_VFSTRACE, "DIO size now %zu\n", @@ -465,9 +465,9 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - *max_pages -= user_addr >> PAGE_CACHE_SHIFT; + *max_pages = (user_addr + size + PAGE_SIZE - 1) >> + PAGE_SHIFT; + *max_pages -= user_addr >> PAGE_SHIFT; OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); if (*pages) { @@ -505,8 +505,8 @@ ll_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), " "offset=%lld=%llx, pages %zd (max %lu)\n", PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_CACHE_SHIFT, - MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + file_offset, file_offset, count >> PAGE_SHIFT, + MAX_DIO_SIZE >> PAGE_SHIFT); /* Check that all user buffers are aligned as well */ for (seg = 0; seg < nr_segs; seg++) { @@ -545,7 +545,7 @@ ll_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, &pages, &max_pages); if (likely(page_count > 0)) { if (unlikely(page_count < max_pages)) - bytes = page_count << PAGE_CACHE_SHIFT; + bytes = page_count << PAGE_SHIFT; result = ll_direct_IO_seg(env, io, rw, inode, bytes, file_offset, pages, page_count); @@ -562,11 +562,11 @@ ll_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, * We should always be able to kmalloc for a * page worth of page pointers = 4MB on i386. */ if (result == -ENOMEM && - size > (PAGE_CACHE_SIZE / sizeof(*pages)) * - PAGE_CACHE_SIZE) { + size > (PAGE_SIZE / sizeof(*pages)) * + PAGE_SIZE) { size = ((((size / 2) - 1) | - ~PAGE_CACHE_MASK) + 1) & - PAGE_CACHE_MASK; + ~PAGE_MASK) + 1) & + PAGE_MASK; CDEBUG(D_VFSTRACE, "DIO size now %zu\n", size); continue; @@ -637,9 +637,9 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, struct cl_page *page; struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *vmpage = NULL; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; int result = 0; ENTRY; @@ -739,7 +739,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, struct cl_io *io; struct vvp_io *vio; struct cl_page *page; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); bool unplug = false; int result = 0; ENTRY; diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 2f10ea7..a8469b8 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -179,10 +179,10 @@ static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj, * --bug 17336 */ loff_t size = i_size_read(inode); unsigned long cur_index = start >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((size == 0 && cur_index != 0) || - (((size - 1) >> PAGE_CACHE_SHIFT) < + (((size - 1) >> PAGE_SHIFT) < cur_index)) *exceed = 1; } @@ -751,7 +751,7 @@ static int vvp_io_read_start(const struct lu_env *env, if (!vio->vui_ra_valid) { vio->vui_ra_valid = true; vio->vui_ra_start = cl_index(obj, pos); - vio->vui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); + vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1); ll_ras_enter(file); } diff --git a/lustre/lmv/lmv_obd.c b/lustre/lmv/lmv_obd.c index 25f5083..d485cc2 100644 --- a/lustre/lmv/lmv_obd.c +++ b/lustre/lmv/lmv_obd.c @@ -2283,7 +2283,7 @@ static int lmv_read_striped_page(struct obd_export *exp, dp->ldp_flags |= LDF_COLLIDE; area = dp + 1; - left_bytes = PAGE_CACHE_SIZE - sizeof(*dp); + left_bytes = PAGE_SIZE - sizeof(*dp); ent = area; last_ent = ent; do { diff --git a/lustre/lov/lov_io.c b/lustre/lov/lov_io.c index 78d0a51..64b52bf 100644 --- a/lustre/lov/lov_io.c +++ b/lustre/lov/lov_io.c @@ -616,7 +616,7 @@ static int lov_io_read_ahead(const struct lu_env *env, if (ra_end != CL_PAGE_EOF) ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe); - pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT; + pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT; CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, " "stripe_size = %u, stripe no = %u, start index = %lu\n", diff --git a/lustre/lov/lov_offset.c b/lustre/lov/lov_offset.c index a6056e2..0032131 100644 --- a/lustre/lov/lov_offset.c +++ b/lustre/lov/lov_offset.c @@ -76,9 +76,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index, { loff_t offset; - offset = lov_stripe_size(lsm, (stripe_index << PAGE_CACHE_SHIFT) + 1, + offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe); - return offset >> PAGE_CACHE_SHIFT; + return offset >> PAGE_SHIFT; } /* we have an offset in file backed by an lov and want to find out where diff --git a/lustre/mdc/mdc_request.c b/lustre/mdc/mdc_request.c index 16273cb..7c3fb96 100644 --- a/lustre/mdc/mdc_request.c +++ b/lustre/mdc/mdc_request.c @@ -909,9 +909,9 @@ restart_bulk: /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < npages; i++) desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); - mdc_readdir_pack(req, offset, PAGE_CACHE_SIZE * npages, fid); + mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -943,7 +943,7 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n", exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred, - PAGE_CACHE_SIZE * npages); + PAGE_SIZE * npages); ptlrpc_req_finished(req); RETURN(-EPROTO); } @@ -1056,7 +1056,7 @@ static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash, * |s|e|f|p|ent| 0 | ... | 0 | * '----------------- -----' * - * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is + * However, on hosts where the native VM page size (PAGE_SIZE) is * larger than LU_PAGE_SIZE, a single host page may contain multiple * lu_dirpages. After reading the lu_dirpages from the MDS, the * ldp_hash_end of the first lu_dirpage refers to the one immediately @@ -1087,7 +1087,7 @@ static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash, * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * to the first entry of the next lu_dirpage. */ -#if PAGE_CACHE_SIZE > LU_PAGE_SIZE +#if PAGE_SIZE > LU_PAGE_SIZE static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs) { int i; @@ -1138,7 +1138,7 @@ static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs) } #else #define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0) -#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ +#endif /* PAGE_SIZE > LU_PAGE_SIZE */ /* parameters for readdir page */ struct readpage_param { @@ -1212,7 +1212,7 @@ static int mdc_read_page_remote(void *data, struct page *page0) int lu_pgs; rd_pgs = (req->rq_bulk->bd_nob_transferred + - PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + PAGE_SIZE - 1) >> PAGE_SHIFT; lu_pgs = req->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); diff --git a/lustre/mdd/mdd_dir.c b/lustre/mdd/mdd_dir.c index 8417a75..e0e878d 100644 --- a/lustre/mdd/mdd_dir.c +++ b/lustre/mdd/mdd_dir.c @@ -1104,7 +1104,7 @@ int mdd_links_read(const struct lu_env *env, struct mdd_object *mdd_obj, /* First try a small buf */ LASSERT(env != NULL); ldata->ld_buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_link_buf, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ldata->ld_buf->lb_buf == NULL) return -ENOMEM; diff --git a/lustre/mdd/mdd_lproc.c b/lustre/mdd/mdd_lproc.c index 099fc1a..b291ffd 100644 --- a/lustre/mdd/mdd_lproc.c +++ b/lustre/mdd/mdd_lproc.c @@ -107,9 +107,9 @@ mdd_changelog_mask_seq_write(struct file *file, const char __user *buffer, int rc; ENTRY; - if (count >= PAGE_CACHE_SIZE) + if (count >= PAGE_SIZE) RETURN(-EINVAL); - OBD_ALLOC(kernbuf, PAGE_CACHE_SIZE); + OBD_ALLOC(kernbuf, PAGE_SIZE); if (kernbuf == NULL) RETURN(-ENOMEM); if (copy_from_user(kernbuf, buffer, count)) @@ -121,7 +121,7 @@ mdd_changelog_mask_seq_write(struct file *file, const char __user *buffer, if (rc == 0) rc = count; out: - OBD_FREE(kernbuf, PAGE_CACHE_SIZE); + OBD_FREE(kernbuf, PAGE_SIZE); return rc; } LPROC_SEQ_FOPS(mdd_changelog_mask); diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index 437b7da..d8ddcdc 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -1105,7 +1105,7 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, PFID(mdt_object_fid(o)), rc); rc = -EFAULT; } else { - int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc); + int print_limit = min_t(int, PAGE_SIZE - 128, rc); if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO)) rc -= 2; @@ -1785,8 +1785,8 @@ static int mdt_readpage(struct tgt_session_info *tsi) rdpg->rp_attrs |= LUDA_64BITHASH; rdpg->rp_count = min_t(unsigned int, reqbody->mbo_nlink, exp_max_brw_size(tsi->tsi_exp)); - rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> + PAGE_SHIFT; OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]); if (rdpg->rp_pages == NULL) RETURN(-ENOMEM); diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c index 1c98fa7..bbfdd9f 100644 --- a/lustre/mgc/mgc_request.c +++ b/lustre/mgc/mgc_request.c @@ -1353,7 +1353,7 @@ static int mgc_import_event(struct obd_device *obd, } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1379,22 +1379,22 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, LASSERT(cfg->cfg_instance != NULL); LASSERT(cfg->cfg_sb == cfg->cfg_instance); - OBD_ALLOC(inst, PAGE_CACHE_SIZE); + OBD_ALLOC(inst, PAGE_SIZE); if (inst == NULL) RETURN(-ENOMEM); if (!IS_SERVER(lsi)) { - pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_CACHE_SIZE) { - OBD_FREE(inst, PAGE_CACHE_SIZE); + pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance); + if (pos >= PAGE_SIZE) { + OBD_FREE(inst, PAGE_SIZE); return -E2BIG; } } else { LASSERT(IS_MDT(lsi)); rc = server_name2svname(lsi->lsi_svname, inst, NULL, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (rc) { - OBD_FREE(inst, PAGE_CACHE_SIZE); + OBD_FREE(inst, PAGE_SIZE); RETURN(-EINVAL); } pos = strlen(inst); @@ -1402,7 +1402,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, ++pos; buf = inst + pos; - bufsz = PAGE_CACHE_SIZE - pos; + bufsz = PAGE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); @@ -1434,7 +1434,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_CACHE_SIZE) { + if (entry->mne_length > PAGE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1557,7 +1557,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* continue, even one with error */ } - OBD_FREE(inst, PAGE_CACHE_SIZE); + OBD_FREE(inst, PAGE_SIZE); RETURN(rc); } @@ -1648,7 +1648,7 @@ again: else body->mcb_offset = cfg->cfg_last_idx + 1; body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_CACHE_SHIFT; + body->mcb_bits = PAGE_SHIFT; body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ @@ -1661,7 +1661,7 @@ again: for (i = 0; i < nrpages; i++) desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1693,7 +1693,7 @@ again: if (ealen < 0) GOTO(out, rc = ealen); - if (ealen > nrpages << PAGE_CACHE_SHIFT) + if (ealen > nrpages << PAGE_SHIFT) GOTO(out, rc = -EINVAL); if (ealen == 0) { /* no logs transferred */ @@ -1745,7 +1745,7 @@ again: rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, min_t(int, ealen, - PAGE_CACHE_SIZE), + PAGE_SIZE), mne_swab); kunmap(pages[i]); if (rc2 < 0) { @@ -1757,7 +1757,7 @@ again: break; } - ealen -= PAGE_CACHE_SIZE; + ealen -= PAGE_SIZE; } out: diff --git a/lustre/mgs/mgs_handler.c b/lustre/mgs/mgs_handler.c index 1027df1..15269b6 100644 --- a/lustre/mgs/mgs_handler.c +++ b/lustre/mgs/mgs_handler.c @@ -676,7 +676,7 @@ static int mgs_iocontrol_nodemap(const struct lu_env *env, GOTO(out, rc = -EINVAL); } - if (data->ioc_plen1 > PAGE_CACHE_SIZE) + if (data->ioc_plen1 > PAGE_SIZE) GOTO(out, rc = -E2BIG); OBD_ALLOC(lcfg, data->ioc_plen1); @@ -814,7 +814,7 @@ static int mgs_iocontrol_pool(const struct lu_env *env, GOTO(out_pool, rc = -EINVAL); } - if (data->ioc_plen1 > PAGE_CACHE_SIZE) + if (data->ioc_plen1 > PAGE_SIZE) GOTO(out_pool, rc = -E2BIG); OBD_ALLOC(lcfg, data->ioc_plen1); diff --git a/lustre/mgs/mgs_nids.c b/lustre/mgs/mgs_nids.c index 4420b64..4cf02ba 100644 --- a/lustre/mgs/mgs_nids.c +++ b/lustre/mgs/mgs_nids.c @@ -96,7 +96,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, /* make sure unit_size is power 2 */ LASSERT((unit_size & (unit_size - 1)) == 0); - LASSERT(nrpages << PAGE_CACHE_SHIFT >= units_total * unit_size); + LASSERT(nrpages << PAGE_SHIFT >= units_total * unit_size); mutex_lock(&tbl->mn_lock); LASSERT(nidtbl_is_sane(tbl)); @@ -160,7 +160,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, buf = kmap(pages[index]); ++index; - units_in_page = PAGE_CACHE_SIZE / unit_size; + units_in_page = PAGE_SIZE / unit_size; LASSERT(units_in_page > 0); } @@ -632,7 +632,7 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) RETURN(rc); bufsize = body->mcb_units << body->mcb_bits; - nrpages = (bufsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + nrpages = (bufsize + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nrpages > PTLRPC_MAX_BRW_PAGES) RETURN(-EINVAL); @@ -648,14 +648,14 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) GOTO(out, rc = -EINVAL); res->mcr_offset = body->mcb_offset; - unit_size = min_t(int, 1 << body->mcb_bits, PAGE_CACHE_SIZE); + unit_size = min_t(int, 1 << body->mcb_bits, PAGE_SIZE); bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res, pages, nrpages, bufsize / unit_size, unit_size); if (bytes < 0) GOTO(out, rc = bytes); /* start bulk transfer */ - page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; LASSERT(page_count <= nrpages); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, PTLRPC_BULK_PUT_SOURCE | @@ -668,8 +668,8 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) for (i = 0; i < page_count && bytes > 0; i++) { desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, min_t(int, bytes, - PAGE_CACHE_SIZE)); - bytes -= PAGE_CACHE_SIZE; + PAGE_SIZE)); + bytes -= PAGE_SIZE; } rc = target_bulk_io(req->rq_export, desc, &lwi); @@ -745,7 +745,7 @@ int lprocfs_wr_ir_state(struct file *file, const char __user *buffer, char *ptr; int rc = 0; - if (count == 0 || count >= PAGE_CACHE_SIZE) + if (count == 0 || count >= PAGE_SIZE) return -EINVAL; OBD_ALLOC(kbuf, count + 1); diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index f38c3a1..4c4ca87 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -1049,7 +1049,7 @@ int cl_page_cancel(const struct lu_env *env, struct cl_page *page) */ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) { - return (loff_t)idx << PAGE_CACHE_SHIFT; + return (loff_t)idx << PAGE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1058,13 +1058,13 @@ EXPORT_SYMBOL(cl_offset); */ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) { - return offset >> PAGE_CACHE_SHIFT; + return offset >> PAGE_SHIFT; } EXPORT_SYMBOL(cl_index); size_t cl_page_size(const struct cl_object *obj) { - return 1UL << PAGE_CACHE_SHIFT; + return 1UL << PAGE_SHIFT; } EXPORT_SYMBOL(cl_page_size); diff --git a/lustre/obdclass/class_obd.c b/lustre/obdclass/class_obd.c index 82c8cf7..7ffdf21 100644 --- a/lustre/obdclass/class_obd.c +++ b/lustre/obdclass/class_obd.c @@ -478,9 +478,9 @@ static int obd_init_checks(void) CWARN("s64 wrong length! strlen(%s)=%d != 2\n", buf, len); ret = -EINVAL; } - if ((u64val & ~PAGE_CACHE_MASK) >= PAGE_CACHE_SIZE) { + if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) { CWARN("mask failed: u64val %llu >= %llu\n", u64val, - (__u64)PAGE_CACHE_SIZE); + (__u64)PAGE_SIZE); ret = -EINVAL; } @@ -539,7 +539,7 @@ static int __init obdclass_init(void) /* Default the dirty page cache cap to 1/2 of system memory. * For clients with less memory, a larger fraction is needed * for other purposes (mostly for BGL). */ - if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) + if (totalram_pages <= 512 << (20 - PAGE_SHIFT)) obd_max_dirty_pages = totalram_pages / 4; else obd_max_dirty_pages = totalram_pages / 2; diff --git a/lustre/obdclass/linkea.c b/lustre/obdclass/linkea.c index c119fcd..ddc8d22 100644 --- a/lustre/obdclass/linkea.c +++ b/lustre/obdclass/linkea.c @@ -33,7 +33,7 @@ int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf) { - ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_CACHE_SIZE); + ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_SIZE); if (ldata->ld_buf->lb_buf == NULL) return -ENOMEM; ldata->ld_leh = ldata->ld_buf->lb_buf; diff --git a/lustre/obdclass/linux/linux-obdo.c b/lustre/obdclass/linux/linux-obdo.c index 0a3858e..733d8c1 100644 --- a/lustre/obdclass/linux/linux-obdo.c +++ b/lustre/obdclass/linux/linux-obdo.c @@ -44,7 +44,7 @@ #include #include -#include /* for PAGE_CACHE_SIZE */ +#include /* for PAGE_SIZE */ #include #include diff --git a/lustre/obdclass/linux/linux-sysctl.c b/lustre/obdclass/linux/linux-sysctl.c index 460f9aa..9d29ce1 100644 --- a/lustre/obdclass/linux/linux-sysctl.c +++ b/lustre/obdclass/linux/linux-sysctl.c @@ -139,7 +139,7 @@ proc_max_dirty_pages_in_mb(struct ctl_table *table, int write, if (val < 0) return -ERANGE; - val >>= PAGE_CACHE_SHIFT; + val >>= PAGE_SHIFT; /* Don't allow them to let dirty pages exceed 90% of system * memory and set a hard minimum of 4MB. */ @@ -149,8 +149,8 @@ proc_max_dirty_pages_in_mb(struct ctl_table *table, int write, "setting to %lu\n", val, ((totalram_pages / 10) * 9)); obd_max_dirty_pages = ((totalram_pages / 10) * 9); - } else if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { - obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT); + } else if (val < 4 << (20 - PAGE_SHIFT)) { + obd_max_dirty_pages = 4 << (20 - PAGE_SHIFT); } else { obd_max_dirty_pages = val; } @@ -160,7 +160,7 @@ proc_max_dirty_pages_in_mb(struct ctl_table *table, int write, len = lprocfs_read_frac_helper(buf, sizeof(buf), *(unsigned long *)table->data, - 1 << (20 - PAGE_CACHE_SHIFT)); + 1 << (20 - PAGE_SHIFT)); if (len > *lenp) len = *lenp; buf[len] = '\0'; diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index 3a78091..8956113 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -958,8 +958,8 @@ static unsigned long lu_htable_order(struct lu_device *top) #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) - cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_SHIFT)) + cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -972,7 +972,7 @@ static unsigned long lu_htable_order(struct lu_device *top) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_CACHE_SIZE / 1024); + (PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; diff --git a/lustre/obdecho/echo.c b/lustre/obdecho/echo.c index 622c6eb..9a2a097 100644 --- a/lustre/obdecho/echo.c +++ b/lustre/obdecho/echo.c @@ -54,7 +54,7 @@ #define ECHO_INIT_OID 0x10000000ULL #define ECHO_HANDLE_MAGIC 0xabcd0123fedc9876ULL -#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_CACHE_SHIFT) +#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_SHIFT) static struct page *echo_persistent_pages[ECHO_PERSISTENT_PAGES]; enum { @@ -288,7 +288,7 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, int len = nb->rnb_len; while (len > 0) { - int plen = PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)); + int plen = PAGE_SIZE - (offset & (PAGE_SIZE-1)); if (len < plen) plen = len; @@ -299,14 +299,14 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, res->lnb_file_offset = offset; res->lnb_len = plen; LASSERT((res->lnb_file_offset & ~PAGE_MASK) + - res->lnb_len <= PAGE_CACHE_SIZE); + res->lnb_len <= PAGE_SIZE); if (ispersistent && - ((res->lnb_file_offset >> PAGE_CACHE_SHIFT) < + ((res->lnb_file_offset >> PAGE_SHIFT) < ECHO_PERSISTENT_PAGES)) { res->lnb_page = echo_persistent_pages[res->lnb_file_offset >> - PAGE_CACHE_SHIFT]; + PAGE_SHIFT]; /* Take extra ref so __free_pages() can be called OK */ get_page(res->lnb_page); } else { @@ -346,9 +346,9 @@ static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj, struct niobuf_local *lb, int verify) { struct niobuf_local *res = lb; - u64 start = rb->rnb_offset >> PAGE_CACHE_SHIFT; - u64 end = (rb->rnb_offset + rb->rnb_len + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + u64 start = rb->rnb_offset >> PAGE_SHIFT; + u64 end = (rb->rnb_offset + rb->rnb_len + PAGE_SIZE - 1) >> + PAGE_SHIFT; int count = (int)(end - start); int rc = 0; int i; @@ -657,8 +657,8 @@ int echo_persistent_pages_init(void) return -ENOMEM; } - memset (kmap (pg), 0, PAGE_CACHE_SIZE); - kunmap (pg); + memset(kmap(pg), 0, PAGE_SIZE); + kunmap(pg); echo_persistent_pages[i] = pg; } diff --git a/lustre/obdecho/echo_client.c b/lustre/obdecho/echo_client.c index 3ac5aaa..3f5dfce 100644 --- a/lustre/obdecho/echo_client.c +++ b/lustre/obdecho/echo_client.c @@ -1313,7 +1313,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * PAGE_CACHE_SIZE - 1, + offset + npages * PAGE_SIZE - 1, rw == READ ? LCK_PR : LCK_PW, &lh.cookie, CEF_NEVER); if (rc < 0) @@ -2254,11 +2254,11 @@ static void echo_client_page_debug_setup(struct page *page, int rw, u64 id, int delta; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; @@ -2284,11 +2284,11 @@ echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count) int rc2; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; @@ -2333,7 +2333,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, RETURN(-EINVAL); /* XXX think again with misaligned I/O */ - npages = count >> PAGE_CACHE_SHIFT; + npages = count >> PAGE_SHIFT; if (rw == OBD_BRW_WRITE) brw_flags = OBD_BRW_ASYNC; @@ -2350,7 +2350,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, for (i = 0, pgp = pga, off = offset; i < npages; - i++, pgp++, off += PAGE_CACHE_SIZE) { + i++, pgp++, off += PAGE_SIZE) { LASSERT(pgp->pg == NULL); /* for cleanup */ @@ -2360,7 +2360,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, goto out; pages[i] = pgp->pg; - pgp->count = PAGE_CACHE_SIZE; + pgp->count = PAGE_SIZE; pgp->off = off; pgp->flag = brw_flags; @@ -2412,11 +2412,11 @@ static int echo_client_prep_commit(const struct lu_env *env, ENTRY; - if (count <= 0 || (count & ~PAGE_CACHE_MASK) != 0) + if (count <= 0 || (count & ~PAGE_MASK) != 0) RETURN(-EINVAL); - apc = npages = batch >> PAGE_CACHE_SHIFT; - tot_pages = count >> PAGE_CACHE_SHIFT; + apc = npages = batch >> PAGE_SHIFT; + tot_pages = count >> PAGE_SHIFT; OBD_ALLOC(lnb, apc * sizeof(struct niobuf_local)); if (lnb == NULL) @@ -2436,10 +2436,10 @@ static int echo_client_prep_commit(const struct lu_env *env, npages = tot_pages; rnb.rnb_offset = off; - rnb.rnb_len = npages * PAGE_CACHE_SIZE; + rnb.rnb_len = npages * PAGE_SIZE; rnb.rnb_flags = brw_flags; ioo.ioo_bufcnt = 1; - off += npages * PAGE_CACHE_SIZE; + off += npages * PAGE_SIZE; lpages = npages; ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb); @@ -2888,7 +2888,7 @@ static int __init obdecho_init(void) ENTRY; LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); + LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); # ifdef HAVE_SERVER_SUPPORT rc = echo_persistent_pages_init(); diff --git a/lustre/ofd/lproc_ofd.c b/lustre/ofd/lproc_ofd.c index 38093a5..52ab770 100644 --- a/lustre/ofd/lproc_ofd.c +++ b/lustre/ofd/lproc_ofd.c @@ -639,7 +639,7 @@ LPROC_SEQ_FOPS(ofd_sync_lock_cancel); * * When ofd_grant_compat_disable is set, we don't grant any space to clients * not supporting OBD_CONNECT_GRANT_PARAM. Otherwise, space granted to such - * a client is inflated since it consumes PAGE_CACHE_SIZE of grant space per + * a client is inflated since it consumes PAGE_SIZE of grant space per * block, (i.e. typically 4kB units), but underlaying file system might have * block size bigger than page size, e.g. ZFS. See LU-2049 for details. * diff --git a/lustre/ofd/ofd_dev.c b/lustre/ofd/ofd_dev.c index be51836..cfa2baa 100644 --- a/lustre/ofd/ofd_dev.c +++ b/lustre/ofd/ofd_dev.c @@ -2138,14 +2138,14 @@ static int ofd_ladvise_prefetch(const struct lu_env *env, GOTO(out_unlock, rc); /* We need page aligned offset and length */ - start_index = start >> PAGE_CACHE_SHIFT; - end_index = (end - 1) >> PAGE_CACHE_SHIFT; + start_index = start >> PAGE_SHIFT; + end_index = (end - 1) >> PAGE_SHIFT; pages = end_index - start_index + 1; while (pages > 0) { nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages : PTLRPC_MAX_BRW_PAGES; - rnb.rnb_offset = start_index << PAGE_CACHE_SHIFT; - rnb.rnb_len = nr_local << PAGE_CACHE_SHIFT; + rnb.rnb_offset = start_index << PAGE_SHIFT; + rnb.rnb_len = nr_local << PAGE_SHIFT; rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb, 0); if (unlikely(rc < 0)) break; diff --git a/lustre/ofd/ofd_internal.h b/lustre/ofd/ofd_internal.h index e2955d2..d76c029 100644 --- a/lustre/ofd/ofd_internal.h +++ b/lustre/ofd/ofd_internal.h @@ -449,7 +449,7 @@ static inline int ofd_grant_prohibit(struct obd_export *exp, /* When ofd_grant_compat_disable is set, we don't grant any space to * clients not supporting OBD_CONNECT_GRANT_PARAM. * Otherwise, space granted to such a client is inflated since it - * consumes PAGE_CACHE_SIZE of grant space per block */ + * consumes PAGE_SIZE of grant space per block */ return !!(ofd_obd(ofd)->obd_self_export != exp && !ofd_grant_param_supp(exp) && ofd->ofd_grant_compat_disable); } diff --git a/lustre/ofd/ofd_obd.c b/lustre/ofd/ofd_obd.c index c47b5bd..4c81ef2 100644 --- a/lustre/ofd/ofd_obd.c +++ b/lustre/ofd/ofd_obd.c @@ -1443,11 +1443,11 @@ static int ofd_health_check(const struct lu_env *nul, struct obd_device *obd) GOTO(out, rc = -EROFS); #ifdef USE_HEALTH_CHECK_WRITE - OBD_ALLOC(info->fti_buf.lb_buf, PAGE_CACHE_SIZE); + OBD_ALLOC(info->fti_buf.lb_buf, PAGE_SIZE); if (info->fti_buf.lb_buf == NULL) GOTO(out, rc = -ENOMEM); - info->fti_buf.lb_len = PAGE_CACHE_SIZE; + info->fti_buf.lb_len = PAGE_SIZE; info->fti_off = 0; th = dt_trans_create(&env, ofd->ofd_osd); @@ -1466,7 +1466,7 @@ static int ofd_health_check(const struct lu_env *nul, struct obd_device *obd) } dt_trans_stop(&env, ofd->ofd_osd, th); - OBD_FREE(info->fti_buf.lb_buf, PAGE_CACHE_SIZE); + OBD_FREE(info->fti_buf.lb_buf, PAGE_SIZE); CDEBUG(D_INFO, "write 1 page synchronously for checking io rc %d\n",rc); #endif diff --git a/lustre/osc/lproc_osc.c b/lustre/osc/lproc_osc.c index bd370a7..ba4a40a 100644 --- a/lustre/osc/lproc_osc.c +++ b/lustre/osc/lproc_osc.c @@ -143,7 +143,7 @@ static int osc_max_dirty_mb_seq_show(struct seq_file *m, void *v) val = cli->cl_dirty_max_pages; spin_unlock(&cli->cl_loi_list_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_seq_read_frac_helper(m, val, mult); } @@ -160,10 +160,10 @@ static ssize_t osc_max_dirty_mb_seq_write(struct file *file, if (rc) return rc; - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; if (pages_number <= 0 || - pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || + pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; @@ -180,7 +180,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v) { struct obd_device *dev = m->private; struct client_obd *cli = &dev->u.cli; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; seq_printf(m, "used_mb: %ld\n" "busy_cnt: %ld\n" @@ -217,7 +217,7 @@ osc_cached_mb_seq_write(struct file *file, const char __user *buffer, if (rc) return rc; - pages_number >>= PAGE_CACHE_SHIFT; + pages_number >>= PAGE_SHIFT; if (pages_number < 0) return -ERANGE; @@ -244,7 +244,7 @@ static int osc_cur_dirty_bytes_seq_show(struct seq_file *m, void *v) struct client_obd *cli = &dev->u.cli; spin_lock(&cli->cl_loi_list_lock); - seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_CACHE_SHIFT); + seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT); spin_unlock(&cli->cl_loi_list_lock); return 0; } @@ -564,15 +564,15 @@ static ssize_t osc_obd_max_pages_per_rpc_seq_write(struct file *file, /* if the max_pages is specified in bytes, convert to pages */ if (val >= ONE_MB_BRW_SIZE) - val >>= PAGE_CACHE_SHIFT; + val >>= PAGE_SHIFT; LPROCFS_CLIMP_CHECK(dev); - chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); + chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; if (val == 0 || (ocd->ocd_brw_size != 0 && - val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT)) { + val > ocd->ocd_brw_size >> PAGE_SHIFT)) { LPROCFS_CLIMP_EXIT(dev); return -ERANGE; } @@ -594,7 +594,7 @@ static int osc_unstable_stats_seq_show(struct seq_file *m, void *v) int mb; pages = atomic_long_read(&cli->cl_unstable_count); - mb = (pages * PAGE_CACHE_SIZE) >> 20; + mb = (pages * PAGE_SIZE) >> 20; seq_printf(m, "unstable_pages: %20ld\n" "unstable_mb: %10d\n", diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index f1dfca1..bc511c3 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -523,7 +523,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, return -ERANGE; LASSERT(cur->oe_dlmlock == victim->oe_dlmlock); - ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; + ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; chunk_start = cur->oe_start >> ppc_bits; chunk_end = cur->oe_end >> ppc_bits; if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && @@ -881,8 +881,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (!sent) { lost_grant = ext->oe_grants; - } else if (blocksize < PAGE_CACHE_SIZE && - last_count != PAGE_CACHE_SIZE) { + } else if (blocksize < PAGE_SIZE && + last_count != PAGE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes * wrong. Should match the code in filter_grant_check. */ @@ -892,7 +892,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (end) count += blocksize - end; - lost_grant = PAGE_CACHE_SIZE - count; + lost_grant = PAGE_SIZE - count; } if (ext->oe_grants > 0) osc_free_grant(cli, nr_pages, lost_grant, ext->oe_grants); @@ -974,7 +974,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct osc_async_page *tmp; int pages_in_chunk = 0; int ppc_bits = cli->cl_chunkbits - - PAGE_CACHE_SHIFT; + PAGE_SHIFT; __u64 trunc_chunk = trunc_index >> ppc_bits; int grants = 0; int nr_pages = 0; @@ -1132,7 +1132,7 @@ static int osc_extent_make_ready(const struct lu_env *env, if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last_oap_count > 0); - LASSERT(last->oap_page_off + last_oap_count <= PAGE_CACHE_SIZE); + LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE); last->oap_count = last_oap_count; spin_lock(&last->oap_lock); last->oap_async_flags |= ASYNC_COUNT_STABLE; @@ -1143,7 +1143,7 @@ static int osc_extent_make_ready(const struct lu_env *env, * because it's known they are not the last page */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; + oap->oap_count = PAGE_SIZE - oap->oap_page_off; spin_lock(&oap->oap_lock); oap->oap_async_flags |= ASYNC_COUNT_STABLE; spin_unlock(&oap->oap_lock); @@ -1170,7 +1170,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, struct osc_object *obj = ext->oe_obj; struct client_obd *cli = osc_cli(obj); struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; pgoff_t chunk = index >> ppc_bits; pgoff_t end_chunk; pgoff_t end_index; @@ -1307,9 +1307,9 @@ static int osc_refresh_count(const struct lu_env *env, return 0; else if (cl_offset(obj, index + 1) > kms) /* catch sub-page write at end of file */ - return kms % PAGE_CACHE_SIZE; + return kms % PAGE_SIZE; else - return PAGE_CACHE_SIZE; + return PAGE_SIZE; } static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, @@ -1388,7 +1388,7 @@ static void osc_consume_write_grant(struct client_obd *cli, cli->cl_dirty_pages++; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - PAGE_CACHE_SIZE, pga, pga->pg); + PAGE_SIZE, pga, pga->pg); osc_update_next_shrink(cli); } @@ -1469,7 +1469,7 @@ static void osc_unreserve_grant(struct client_obd *cli, * used, we should return these grants to OST. There're two cases where grants * can be lost: * 1. truncate; - * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was + * 2. blocksize at OST is less than PAGE_SIZE and a partial page was * written. In this case OST may use less chunks to serve this partial * write. OSTs don't actually know the page size on the client side. so * clients have to calculate lost grant by the blocksize on the OST. @@ -1497,7 +1497,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu/%lu\n", lost_grant, cli->cl_lost_grant, - cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT, + cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT, cli->cl_dirty_grant); } diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index baef262..05ef75b 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -367,7 +367,7 @@ static int osc_io_write_iter_init(const struct lu_env *env, if (cl_io_is_append(io)) RETURN(osc_io_iter_init(env, ios)); - npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT; + npages = io->u.ci_rw.crw_count >> PAGE_SHIFT; if (io->u.ci_rw.crw_pos & ~PAGE_MASK) ++npages; diff --git a/lustre/osc/osc_object.c b/lustre/osc/osc_object.c index 454311c..eef9bd9 100644 --- a/lustre/osc/osc_object.c +++ b/lustre/osc/osc_object.c @@ -241,15 +241,15 @@ static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj, if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC)) goto skip_locking; - policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_CACHE_MASK; + policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK; if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <= - fmkey->lfik_fiemap.fm_start + PAGE_CACHE_SIZE - 1) + fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fmkey->lfik_fiemap.fm_start + fmkey->lfik_fiemap.fm_length + - PAGE_CACHE_SIZE - 1) & PAGE_CACHE_MASK; + PAGE_SIZE - 1) & PAGE_MASK; ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 55eae29..20e68f9 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -269,7 +269,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, int result; opg->ops_from = 0; - opg->ops_to = PAGE_CACHE_SIZE; + opg->ops_to = PAGE_SIZE; result = osc_prep_async_page(osc, opg, page->cp_vmpage, cl_offset(obj, index)); diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index aafe470..d11c9f7 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -672,7 +672,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM)) oa->o_dirty = cli->cl_dirty_grant; else - oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT; + oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT; if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit > cli->cl_dirty_max_pages)) { CERROR("dirty %lu - %lu > dirty_max %lu\n", @@ -701,7 +701,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, nrpages = cli->cl_max_pages_per_rpc; nrpages *= cli->cl_max_rpcs_in_flight + 1; nrpages = max(nrpages, cli->cl_dirty_max_pages); - oa->o_undirty = nrpages << PAGE_CACHE_SHIFT; + oa->o_undirty = nrpages << PAGE_SHIFT; if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM)) { int nrextents; @@ -791,11 +791,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); + (cli->cl_max_pages_per_rpc << PAGE_SHIFT); spin_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; spin_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); @@ -811,8 +811,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively * impact block allocation and long-term performance. */ - if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT) + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { spin_unlock(&cli->cl_loi_list_lock); @@ -859,7 +859,7 @@ static int osc_should_shrink_grant(struct client_obd *client) /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) * Keep comment here so that it can be found by searching. */ - int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > brw_size) @@ -923,13 +923,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_avail_grant -= cli->cl_dirty_grant; else cli->cl_avail_grant -= - cli->cl_dirty_pages << PAGE_CACHE_SHIFT; + cli->cl_dirty_pages << PAGE_SHIFT; } if (cli->cl_avail_grant < 0) { CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n", cli_name(cli), cli->cl_avail_grant, - ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT); + ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT); /* workaround for servers which do not have the patch from * LU-2679 */ cli->cl_avail_grant = ocd->ocd_grant; @@ -1212,9 +1212,9 @@ osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa, LASSERT(pg->count > 0); /* make sure there is no gap in the middle of page array */ LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && + (ergo(i == 0, poff + pg->count == PAGE_SIZE) && ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == PAGE_CACHE_SIZE) && + poff == 0 && pg->count == PAGE_SIZE) && ergo(i == page_count - 1, poff == 0)), "i: %d/%d pg: %p off: %llu, count: %u\n", i, page_count, pg, pg->off, pg->count); @@ -1837,7 +1837,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, oap->oap_count; else LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_CACHE_SIZE); + PAGE_SIZE); if (oap->oap_interrupted) interrupted = true; } @@ -1890,7 +1890,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_splice_init(ext_list, &aa->aa_exts); spin_lock(&cli->cl_loi_list_lock); - starting_offset >>= PAGE_CACHE_SHIFT; + starting_offset >>= PAGE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); @@ -2563,7 +2563,7 @@ static int osc_reconnect(const struct lu_env *env, if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) grant += cli->cl_dirty_grant; else - grant += cli->cl_dirty_pages << PAGE_CACHE_SHIFT; + grant += cli->cl_dirty_pages << PAGE_SHIFT; data->ocd_grant = grant ? : 2 * cli_brw_size(obd); lost_grant = cli->cl_lost_grant; cli->cl_lost_grant = 0; diff --git a/lustre/osd-ldiskfs/osd_handler.c b/lustre/osd-ldiskfs/osd_handler.c index fd97306..65acd4d 100644 --- a/lustre/osd-ldiskfs/osd_handler.c +++ b/lustre/osd-ldiskfs/osd_handler.c @@ -6526,7 +6526,7 @@ static int osd_mount(const struct lu_env *env, /* Glom up mount options */ if (*options != '\0') strcat(options, ","); - strlcat(options, "no_mbcache", PAGE_CACHE_SIZE); + strlcat(options, "no_mbcache", PAGE_SIZE); type = get_fs_type("ldiskfs"); if (!type) { diff --git a/lustre/osd-ldiskfs/osd_internal.h b/lustre/osd-ldiskfs/osd_internal.h index d44e389..0c966be 100644 --- a/lustre/osd-ldiskfs/osd_internal.h +++ b/lustre/osd-ldiskfs/osd_internal.h @@ -438,7 +438,7 @@ struct osd_it_ea_dirent { * there would be one ext3 readdir for every mdd readdir page. */ -#define OSD_IT_EA_BUFSIZE (PAGE_CACHE_SIZE + PAGE_CACHE_SIZE/4) +#define OSD_IT_EA_BUFSIZE (PAGE_SIZE + PAGE_SIZE/4) /** * This is iterator's in-memory data structure in interoperability @@ -490,7 +490,7 @@ struct osd_it_quota { struct list_head oiq_list; }; -#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512) +#define MAX_BLOCKS_PER_PAGE (PAGE_SIZE / 512) struct osd_iobuf { wait_queue_head_t dr_wait; diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index 6996f63..8cdd545 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -82,7 +82,7 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, iobuf->dr_rw = rw; iobuf->dr_init_at = line; - blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) { LASSERT(iobuf->dr_pg_buf.lb_len >= pages * sizeof(iobuf->dr_pages[0])); @@ -97,7 +97,7 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n", (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages); pages = i; - blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); iobuf->dr_max_pages = 0; CDEBUG(D_OTHER, "realloc %u for %u blocks\n", (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks); @@ -258,7 +258,7 @@ static int can_be_merged(struct bio *bio, sector_t sector) static int osd_do_bio(struct osd_device *osd, struct inode *inode, struct osd_iobuf *iobuf) { - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; struct page **pages = iobuf->dr_pages; int npages = iobuf->dr_npages; sector_t *blocks = iobuf->dr_blocks; @@ -390,8 +390,8 @@ static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages, *nrpages = 0; while (len > 0) { - int poff = offset & (PAGE_CACHE_SIZE - 1); - int plen = PAGE_CACHE_SIZE - poff; + int poff = offset & (PAGE_SIZE - 1); + int plen = PAGE_SIZE - poff; if (plen > len) plen = len; @@ -422,7 +422,7 @@ static struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw) LASSERT(inode); - page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT, GFP_NOFS | __GFP_HIGHMEM); if (unlikely(page == NULL)) lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1); @@ -762,7 +762,7 @@ map: static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index, int clen, sector_t *blocks, int create) { - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; struct bpointers bp; int err; @@ -788,7 +788,7 @@ static int osd_ldiskfs_map_bm_inode_pages(struct inode *inode, struct page **page, int pages, sector_t *blocks, int create) { - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; pgoff_t bitmap_max_page_index; sector_t *b; int rc = 0, i; @@ -848,7 +848,7 @@ static int osd_ldiskfs_map_ext_inode_pages(struct inode *inode, /* look for next extent */ fp = NULL; - blocks += clen * (PAGE_CACHE_SIZE >> inode->i_blkbits); + blocks += clen * (PAGE_SIZE >> inode->i_blkbits); } if (fp) @@ -879,7 +879,7 @@ static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page, int pages, sector_t *blocks, int create) { - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; int rc = 0, i = 0; struct page *fp = NULL; int clen = 0; @@ -982,7 +982,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, RETURN(rc); isize = i_size_read(inode); - maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1; + maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1; if (osd->od_writethrough_cache) cache = 1; @@ -1003,7 +1003,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, */ ClearPageUptodate(lnb[i].lnb_page); - if (lnb[i].lnb_len == PAGE_CACHE_SIZE) + if (lnb[i].lnb_len == PAGE_SIZE) continue; if (maxidx >= lnb[i].lnb_page->index) { @@ -1018,7 +1018,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) & ~PAGE_MASK; if (off) - memset(p + off, 0, PAGE_CACHE_SIZE - off); + memset(p + off, 0, PAGE_SIZE - off); kunmap(lnb[i].lnb_page); } } @@ -1125,7 +1125,7 @@ static int osd_declare_write_commit(const struct lu_env *env, extents++; if (!osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent)) - quota_space += PAGE_CACHE_SIZE; + quota_space += PAGE_SIZE; /* ignore quota for the whole request if any page is from * client cache or written by root. diff --git a/lustre/osd-ldiskfs/osd_lproc.c b/lustre/osd-ldiskfs/osd_lproc.c index 8171fbe..48a8624 100644 --- a/lustre/osd-ldiskfs/osd_lproc.c +++ b/lustre/osd-ldiskfs/osd_lproc.c @@ -63,7 +63,7 @@ void osd_brw_stats_update(struct osd_device *osd, struct osd_iobuf *iobuf) if (unlikely(nr_pages == 0)) return; - blocks_per_page = PAGE_CACHE_SIZE >> osd_sb(osd)->s_blocksize_bits; + blocks_per_page = PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits; lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages); diff --git a/lustre/osd-zfs/osd_io.c b/lustre/osd-zfs/osd_io.c index eed35f5..6ed983b 100644 --- a/lustre/osd-zfs/osd_io.c +++ b/lustre/osd-zfs/osd_io.c @@ -143,7 +143,7 @@ static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt, buf->lb_buf, DMU_READ_PREFETCH); record_end_io(osd, READ, cfs_time_current() - start, size, - size >> PAGE_CACHE_SHIFT); + size >> PAGE_SHIFT); if (rc == 0) { rc = size; *pos += size; @@ -239,7 +239,7 @@ static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt, out: record_end_io(osd, WRITE, 0, buf->lb_len, - buf->lb_len >> PAGE_CACHE_SHIFT); + buf->lb_len >> PAGE_SHIFT); RETURN(rc); } @@ -361,8 +361,8 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj, dbf = (void *) ((unsigned long)dbp[i] | 1); while (tocpy > 0) { - thispage = PAGE_CACHE_SIZE; - thispage -= bufoff & (PAGE_CACHE_SIZE - 1); + thispage = PAGE_SIZE; + thispage -= bufoff & (PAGE_SIZE - 1); thispage = min(tocpy, thispage); lnb->lnb_rc = 0; @@ -439,7 +439,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj, /* go over pages arcbuf contains, put them as * local niobufs for ptlrpc's bulks */ while (sz_in_block > 0) { - plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE); + plen = min_t(int, sz_in_block, PAGE_SIZE); lnb[i].lnb_file_offset = off; lnb[i].lnb_page_offset = 0; @@ -473,7 +473,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj, /* can't use zerocopy, allocate temp. buffers */ while (sz_in_block > 0) { - plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE); + plen = min_t(int, sz_in_block, PAGE_SIZE); lnb[i].lnb_file_offset = off; lnb[i].lnb_page_offset = 0; diff --git a/lustre/osp/osp_object.c b/lustre/osp/osp_object.c index c0096c9..0678535 100644 --- a/lustre/osp/osp_object.c +++ b/lustre/osp/osp_object.c @@ -1669,7 +1669,7 @@ static int osp_it_fetch(const struct lu_env *env, struct osp_it *it) /* 1MB bulk */ npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20); - npages /= PAGE_CACHE_SIZE; + npages /= PAGE_SIZE; OBD_ALLOC(pages, npages * sizeof(*pages)); if (pages == NULL) @@ -1730,7 +1730,7 @@ static int osp_it_fetch(const struct lu_env *env, struct osp_it *it) for (i = 0; i < npages; i++) desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1748,7 +1748,7 @@ static int osp_it_fetch(const struct lu_env *env, struct osp_it *it) GOTO(out, rc = -EPROTO); npages = (ii->ii_count + LU_PAGE_COUNT - 1) >> - (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT); + (PAGE_SHIFT - LU_PAGE_SHIFT); if (npages > it->ooi_total_npages) { CERROR("%s: returned more pages than expected, %u > %u\n", osp->opd_obd->obd_name, npages, it->ooi_total_npages); diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 18be925..53a8603 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -212,7 +212,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, LASSERT(page != NULL); LASSERT(pageoffset >= 0); LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(pageoffset + len <= PAGE_SIZE); LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); kiov = &BD_GET_KIOV(desc, desc->bd_iov_count); diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index ef0df00..d41340a 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -892,7 +892,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp, if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, + min(ocd->ocd_brw_size >> PAGE_SHIFT, cli->cl_max_pages_per_rpc); else if (imp->imp_connect_op == MDS_CONNECT || imp->imp_connect_op == MGS_CONNECT) diff --git a/lustre/ptlrpc/lproc_ptlrpc.c b/lustre/ptlrpc/lproc_ptlrpc.c index 61a6136..05f8fcb 100644 --- a/lustre/ptlrpc/lproc_ptlrpc.c +++ b/lustre/ptlrpc/lproc_ptlrpc.c @@ -310,8 +310,8 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, /* This sanity check is more of an insanity check; we can still * hose a kernel by allowing the request history to grow too * far. */ - bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; if (val > totalram_pages/(2 * bufpages)) return -ERANGE; @@ -1261,7 +1261,7 @@ lprocfs_import_seq_write(struct file *file, const char __user *buffer, const char prefix[] = "connection="; const int prefix_len = sizeof(prefix) - 1; - if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) + if (count > PAGE_SIZE - 1 || count <= prefix_len) return -EINVAL; OBD_ALLOC(kbuf, count + 1); diff --git a/lustre/ptlrpc/nodemap_storage.c b/lustre/ptlrpc/nodemap_storage.c index fb12f09..f4abb34 100644 --- a/lustre/ptlrpc/nodemap_storage.c +++ b/lustre/ptlrpc/nodemap_storage.c @@ -1232,8 +1232,8 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, RETURN(-EINVAL); rdpg.rp_count = (body->mcb_units << body->mcb_bits); - rdpg.rp_npages = (rdpg.rp_count + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + rdpg.rp_npages = (rdpg.rp_count + PAGE_SIZE - 1) >> + PAGE_SHIFT; if (rdpg.rp_npages > PTLRPC_MAX_BRW_PAGES) RETURN(-EINVAL); @@ -1269,7 +1269,7 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, res->mcr_offset = nodemap_ii.ii_hash_end; res->mcr_size = bytes; - page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; LASSERT(page_count <= rdpg.rp_count); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, PTLRPC_BULK_PUT_SOURCE | @@ -1281,8 +1281,8 @@ int nodemap_get_config_req(struct obd_device *mgs_obd, for (i = 0; i < page_count && bytes > 0; i++) { ptlrpc_prep_bulk_page_pin(desc, rdpg.rp_pages[i], 0, - min_t(int, bytes, PAGE_CACHE_SIZE)); - bytes -= PAGE_CACHE_SIZE; + min_t(int, bytes, PAGE_SIZE)); + bytes -= PAGE_SIZE; } rc = target_bulk_io(req->rq_export, desc, &lwi); diff --git a/lustre/ptlrpc/recover.c b/lustre/ptlrpc/recover.c index 7d23518..50db39b 100644 --- a/lustre/ptlrpc/recover.c +++ b/lustre/ptlrpc/recover.c @@ -212,7 +212,7 @@ int ptlrpc_resend(struct obd_import *imp) } list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { - LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, + LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 720dd8a..df1dbe6 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -53,7 +53,7 @@ #include "ptlrpc_internal.h" -static int mult = 20 - PAGE_CACHE_SHIFT; +static int mult = 20 - PAGE_SHIFT; static int enc_pool_max_memory_mb; module_param(enc_pool_max_memory_mb, int, 0644); MODULE_PARM_DESC(enc_pool_max_memory_mb, @@ -64,7 +64,7 @@ MODULE_PARM_DESC(enc_pool_max_memory_mb, ****************************************/ -#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) +#define PTRS_PER_PAGE (PAGE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (PTRS_PER_PAGE) #define IDLE_IDX_MAX (100) @@ -222,7 +222,7 @@ static void enc_pools_release_free_pages(long npages) /* free unused pools */ while (p_idx_max1 < p_idx_max2) { LASSERT(page_pools.epp_pools[p_idx_max2]); - OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE); + OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_SIZE); page_pools.epp_pools[p_idx_max2] = NULL; p_idx_max2--; } @@ -328,7 +328,7 @@ static unsigned long enc_pools_cleanup(struct page ***pools, int npools) cleaned++; } } - OBD_FREE(pools[i], PAGE_CACHE_SIZE); + OBD_FREE(pools[i], PAGE_SIZE); pools[i] = NULL; } } @@ -448,7 +448,7 @@ static int enc_pools_add_pages(int npages) goto out; for (i = 0; i < npools; i++) { - OBD_ALLOC(pools[i], PAGE_CACHE_SIZE); + OBD_ALLOC(pools[i], PAGE_SIZE); if (pools[i] == NULL) goto out_pools; diff --git a/lustre/quota/qsd_reint.c b/lustre/quota/qsd_reint.c index c516d0f..d9a5537 100644 --- a/lustre/quota/qsd_reint.c +++ b/lustre/quota/qsd_reint.c @@ -196,7 +196,7 @@ static int qsd_reint_index(const struct lu_env *env, struct qsd_qtype_info *qqi, /* let's do a 1MB bulk */ npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20); - npages /= PAGE_CACHE_SIZE; + npages /= PAGE_SIZE; /* allocate pages for bulk index read */ OBD_ALLOC(pages, npages * sizeof(*pages)); @@ -256,7 +256,7 @@ repeat: ver = ii->ii_version; pg_cnt = (ii->ii_count + (LU_PAGE_COUNT) - 1); - pg_cnt >>= PAGE_CACHE_SHIFT - LU_PAGE_SHIFT; + pg_cnt >>= PAGE_SHIFT - LU_PAGE_SHIFT; if (pg_cnt > npages) { CERROR("%s: master returned more pages than expected, %u > %u" diff --git a/lustre/quota/qsd_request.c b/lustre/quota/qsd_request.c index d1cd515..7c382f5 100644 --- a/lustre/quota/qsd_request.c +++ b/lustre/quota/qsd_request.c @@ -386,7 +386,7 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp, /* req now owns desc and will free it when it gets freed */ for (i = 0; i < npages; i++) desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); /* pack index information in request */ req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO); diff --git a/lustre/target/tgt_handler.c b/lustre/target/tgt_handler.c index a76c023..4ca8be3 100644 --- a/lustre/target/tgt_handler.c +++ b/lustre/target/tgt_handler.c @@ -1043,13 +1043,13 @@ int tgt_sendpage(struct tgt_session_info *tsi, struct lu_rdpg *rdpg, int nob) RETURN(-ENOMEM); if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)) - /* old client requires reply size in it's PAGE_CACHE_SIZE, + /* old client requires reply size in it's PAGE_SIZE, * which is rdpg->rp_count */ nob = rdpg->rp_count; for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0; i++, tmpcount -= tmpsize) { - tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE); + tmpsize = min_t(int, tmpcount, PAGE_SIZE); desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0, tmpsize); } @@ -1110,7 +1110,7 @@ static int tgt_obd_idx_read(struct tgt_session_info *tsi) GOTO(out, rc = -EFAULT); rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT, exp_max_brw_size(tsi->tsi_exp)); - rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT; + rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT; /* allocate pages to store the containers */ OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));