'cfs_list_splice_tail', 'list_splice_tail',
'cfs_list_t', 'struct list_head',
- 'CFS_PAGE_MASK', 'PAGE_CACHE_MASK or PAGE_MASK',
- 'CFS_PAGE_SIZE', 'PAGE_CACHE_SIZE or PAGE_SIZE',
+ 'CFS_PAGE_MASK', 'PAGE_MASK',
+ 'CFS_PAGE_SIZE', 'PAGE_SIZE',
+ 'PAGE_CACHE_MASK', 'PAGE_MASK',
+ 'PAGE_CACHE_SIZE', 'PAGE_SIZE',
+ 'PAGE_CACHE_SHIFT', 'PAGE_SHIFT',
'cfs_proc_dir_entry_t', 'struct proc_dir_entry',
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
# define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
#else
# define NUM_CACHEPAGES totalram_pages
#endif
#endif /* LIBCFS_DEBUG */
#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \
max = TCD_MAX_PAGES;
} else {
max = (max / num_possible_cpus());
- max = (max << (20 - PAGE_CACHE_SHIFT));
+ max = (max << (20 - PAGE_SHIFT));
}
rc = cfs_tracefile_init(max);
{
struct mm_struct *mm;
char *buffer;
- int buf_len = PAGE_CACHE_SIZE;
+ int buf_len = PAGE_SIZE;
int key_len = strlen(key);
unsigned long addr;
int rc;
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_CACHE_SIZE)
+ if (tage->used + len <= PAGE_SIZE)
return tage;
}
* from here: this will lead to infinite recursion.
*/
- if (len > PAGE_CACHE_SIZE) {
+ if (len > PAGE_SIZE) {
printk(KERN_ERR
"cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
- if (needed + known_size > PAGE_CACHE_SIZE)
+ if (needed + known_size > PAGE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+ max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(KERN_EMERG "negative max_nob: %d\n",
max_nob);
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT(tage->used <= PAGE_CACHE_SIZE);
+ __LASSERT(tage->used <= PAGE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
}
mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_CACHE_SHIFT);
+ pages = mb << (20 - PAGE_SHIFT);
cfs_tracefile_write_lock();
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
extern int libcfs_panic_in_progress;
extern int cfs_trace_max_debug_mb(void);
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* Private declare for tracefile
*/
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
do { \
__LASSERT(tage != NULL); \
__LASSERT(tage->page != NULL); \
- __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(tage->used <= PAGE_SIZE); \
__LASSERT(page_count(tage->page) > 0); \
} while (0)
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ * kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
(kiov[i].kiov_offset + kiov[i].kiov_len !=
- PAGE_CACHE_SIZE && i < niov - 1))
+ PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
+ lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
if (len <= frag_len) {
dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
return niov;
}
dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
len -= frag_len;
dst++;
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
toobig = 1;
- nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_SIZE / sizeof(*ifr);
CWARN("Too many interfaces: only enumerating "
"first %d\n", nalloc);
}
#define LNET_NRB_SMALL_PAGES 1
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
-#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
- PAGE_CACHE_SHIFT)
+#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
+ PAGE_SHIFT)
static char *forwarding = "";
module_param(forwarding, charp, 0444);
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+ rb->rb_kiov[i].kiov_len = PAGE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}
npg = breq->blk_npg;
/* NB: this is not going to work for variable page size,
* but we have to keep it for compatibility */
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ addr += PAGE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ addr += PAGE_SIZE - BRW_MSIZE;
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *) addr) + i);
if (data != magic) goto bad_data;
}
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
reply->brw_status = EINVAL;
return 0;
}
- npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+ npg = reqst->brw_len >> PAGE_SHIFT;
} else {
- npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
if (args->lstio_tes_param != NULL &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
- PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+ PAGE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
opc = data->ioc_u32[0];
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ if (data->ioc_plen1 > PAGE_SIZE)
return -EINVAL;
LIBCFS_ALLOC(buf, data->ioc_plen1);
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
brq->blk_flags = param->blk_flags;
return 0;
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
- npg * PAGE_CACHE_SIZE :
+ npg * PAGE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
LASSERT(nob > 0);
len = (feats & LST_FEAT_BULK_LEN) == 0 ?
- PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE);
+ PAGE_SIZE : min_t(int, nob, PAGE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;
int len;
if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- len = npg * PAGE_CACHE_SIZE;
+ len = npg * PAGE_SIZE;
} else {
len = sizeof(lnet_process_id_packed_t) *
static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
- nob = min(nob, (int)PAGE_CACHE_SIZE);
+ nob = min_t(int, nob, (int)PAGE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
* the end of pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
RETURN(err_serious(-EPROTO));
req_capsule_set_size(tsi->tsi_pill, &RMF_GENERIC_DATA, RCL_SERVER,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
rc = req_capsule_server_pack(tsi->tsi_pill);
if (unlikely(rc != 0))
data = req_capsule_server_get(tsi->tsi_pill, &RMF_GENERIC_DATA);
rc = fld_server_read(tsi->tsi_env, lu_site2seq(site)->ss_server_fld,
- in, data, PAGE_CACHE_SIZE);
+ in, data, PAGE_SIZE);
RETURN(rc);
}
RETURN(-ENOMEM);
req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
- RCL_SERVER, PAGE_CACHE_SIZE);
+ RCL_SERVER, PAGE_SIZE);
break;
default:
rc = -EINVAL;
{ \
type *value; \
\
- CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof(*value)); \
\
OBD_ALLOC_PTR(value); \
if (value == NULL) \
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */
#define ASSERT_MAX_SIZE_MB 60000ULL
#define ASSERT_PAGE_INDEX(index, OP) \
-do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \
+do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_SHIFT)) { \
CERROR("bad page index %lu > %llu\n", index, \
- ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \
+ ASSERT_MAX_SIZE_MB << (20 - PAGE_SHIFT)); \
libcfs_debug = ~0UL; \
OP; \
}} while(0)
/*
* This limit is arbitrary (131072 clients on x86), but it is convenient to use
- * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
+ * 2^n * PAGE_SIZE * 8 for the number of bits that fit an order-n allocation.
* If we need more than 131072 clients (order-2 allocation on x86) then this
* should become an array of single-page pointers that are allocated on demand.
*/
-#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
+#if (128 * 1024UL) > (PAGE_SIZE * 8)
#define LR_MAX_CLIENTS (128 * 1024UL)
#else
-#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
+#define LR_MAX_CLIENTS (PAGE_SIZE * 8)
#endif
/** COMPAT_146: this is an OST (temporary) */
#include <libcfs/libcfs.h>
-#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_CACHE_SIZE / sizeof(gid_t)))
+#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
#define CFS_GROUP_AT(gi, i) \
((gi)->blocks[(i) / CFS_NGROUPS_PER_BLOCK][(i) % CFS_NGROUPS_PER_BLOCK])
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
#if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
#endif
-#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+#if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
#endif
#if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
*/
/* depress threads factor for VM with small memory size */
#define OSS_THR_FACTOR min_t(int, 8, \
- NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
+ NUM_CACHEPAGES >> (28 - PAGE_SHIFT))
#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
#define OSS_NTHRS_BASE 64
if (PagePrivate(page))
#ifdef HAVE_INVALIDATE_RANGE
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
#else
page->mapping->a_ops->invalidatepage(page, 0);
#endif
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
+ * the extent size. A chunk is max(PAGE_SIZE, OST block size) */
int cl_chunkbits;
/* extent insertion metadata overhead to be accounted in grant,
* in bytes */
static inline int cli_brw_size(struct obd_device *obd)
{
LASSERT(obd != NULL);
- return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
/* when RPC size or the max RPCs in flight is increased, the max dirty pages
/* initializing */
if (cli->cl_dirty_max_pages <= 0)
cli->cl_dirty_max_pages = (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
else {
unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
cli->cl_max_pages_per_rpc;
#endif
#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \
+#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_SIZE); \
kunmap(page); } while (0)
#else
#define POISON_PAGE(page, val) do { } while (0)
* the client requested. Also we need to make sure it's also server
* page size aligned otherwise a server page can be covered by two
* write locks. */
- mask = PAGE_CACHE_SIZE;
+ mask = PAGE_SIZE;
req_align = (req_end + 1) | req_start;
if (req_align != 0 && (req_align & (mask - 1)) == 0) {
while ((req_align & mask) == 0)
* from OFD after connecting. */
cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
- /* set cl_chunkbits default value to PAGE_CACHE_SHIFT,
+ /* set cl_chunkbits default value to PAGE_SHIFT,
* it will be updated at OSC connection time. */
- cli->cl_chunkbits = PAGE_CACHE_SHIFT;
+ cli->cl_chunkbits = PAGE_SHIFT;
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
if (osc_on_mdt(obddev->obd_name))
lock = list_entry(expired->next, struct ldlm_lock,
l_pending_chain);
- if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
+ if ((void *)lock < LP_POISON + PAGE_SIZE &&
(void *)lock >= LP_POISON) {
spin_unlock_bh(&waiting_locks_spinlock);
CERROR("free lock on elt list %p\n", lock);
}
list_del_init(&lock->l_pending_chain);
if ((void *)lock->l_export <
- LP_POISON + PAGE_CACHE_SIZE &&
+ LP_POISON + PAGE_SIZE &&
(void *)lock->l_export >= LP_POISON) {
CERROR("lock with free export on elt list %p\n",
lock->l_export);
/*
* 50 ldlm locks for 1MB of RAM.
*/
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
/*
* Maximal possible grant step plan in %.
{
__u64 locknr;
- locknr = ((__u64)NUM_CACHEPAGES << PAGE_CACHE_SHIFT) * ratio;
+ locknr = ((__u64)NUM_CACHEPAGES << PAGE_SHIFT) * ratio;
do_div(locknr, 100 * sizeof(struct ldlm_lock));
return locknr;
{
int avail;
- avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else
{
ldata->ld_buf =
lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return __lfsck_links_read(env, obj, ldata);
}
{
ldata->ld_buf =
lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf2,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return __lfsck_links_read(env, obj, ldata);
}
return 0;
}
-#define LFSCK_RBTREE_BITMAP_SIZE PAGE_CACHE_SIZE
+#define LFSCK_RBTREE_BITMAP_SIZE PAGE_SIZE
#define LFSCK_RBTREE_BITMAP_WIDTH (LFSCK_RBTREE_BITMAP_SIZE << 3)
#define LFSCK_RBTREE_BITMAP_MASK (LFSCK_RBTREE_BITMAP_WIDTH - 1)
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
* lu_dirpage for this integrated page will be adjusted. See
* mdc_adjust_dirpages().
*
st.st_gid = body->mbo_gid;
st.st_rdev = body->mbo_rdev;
st.st_size = body->mbo_size;
- st.st_blksize = PAGE_CACHE_SIZE;
+ st.st_blksize = PAGE_SIZE;
st.st_blocks = body->mbo_blocks;
st.st_atime = body->mbo_atime;
st.st_mtime = body->mbo_mtime;
}
/* default to about 64M of readahead on a given system. */
-#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (64UL << (20 - PAGE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
static inline void ll_invalidate_page(struct page *vmpage)
{
struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+ loff_t offset = vmpage->index << PAGE_SHIFT;
LASSERT(PageLocked(vmpage));
if (mapping == NULL)
* truncate_complete_page() calls
* a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
*/
- ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
valid != CLIENT_CONNECT_MDT_REQD) {
char *buf;
- OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
- obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+ OBD_ALLOC_WAIT(buf, PAGE_SIZE);
+ obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, 0, ",");
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
"feature(s) needed for correct operation "
"of this client (%s). Please upgrade "
"server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
- OBD_FREE(buf, PAGE_CACHE_SIZE);
+ OBD_FREE(buf, PAGE_SIZE);
GOTO(out_md_fid, err = -EPROTO);
}
sbi->ll_flags |= LL_SBI_64BIT_HASH;
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
- sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_CACHE_SHIFT;
+ sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
else
sbi->ll_md_brw_pages = 1;
unsigned long addr, size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ (vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~PAGE_MASK;
}
LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
last - first + 1, 0);
}
#endif
bio_for_each_segment_all(bvec, bio, iter) {
BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+ BUG_ON(bvec->bv_len != PAGE_SIZE);
pages[page_count] = bvec->bv_page;
offsets[page_count] = offset;
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
page_count);
- pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+ pvec->ldp_size = page_count << PAGE_SHIFT;
pvec->ldp_nr = page_count;
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->lo_blocksize = PAGE_CACHE_SIZE;
+ lo->lo_blocksize = PAGE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
/* queue parameters */
blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
pages_number = sbi->ll_ra_info.ra_max_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_seq_read_frac_helper(m, pages_number, mult);
}
if (rc)
return rc;
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
if (pages_number < 0 || pages_number > totalram_pages / 2) {
/* 1/2 of RAM */
CERROR("%s: can't set max_readahead_mb=%lu > %luMB\n",
ll_get_fsname(sb, NULL, 0),
- (unsigned long)pages_number >> (20 - PAGE_CACHE_SHIFT),
- totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1));
+ (unsigned long)pages_number >> (20 - PAGE_SHIFT),
+ totalram_pages >> (20 - PAGE_SHIFT + 1));
return -ERANGE;
}
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_seq_read_frac_helper(m, pages_number, mult);
}
if (rc)
return rc;
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
if (pages_number < 0 || pages_number > sbi->ll_ra_info.ra_max_pages) {
CERROR("%s: can't set max_readahead_per_file_mb=%lu > "
"max_read_ahead_mb=%lu\n", ll_get_fsname(sb, NULL, 0),
- (unsigned long)pages_number >> (20 - PAGE_CACHE_SHIFT),
- sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_CACHE_SHIFT));
+ (unsigned long)pages_number >> (20 - PAGE_SHIFT),
+ sbi->ll_ra_info.ra_max_pages >> (20 - PAGE_SHIFT));
return -ERANGE;
}
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_seq_read_frac_helper(m, pages_number, mult);
}
if (rc)
return rc;
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
/* Cap this at the current max readahead window size, the readahead
* algorithm does this anyway so it's pointless to set it larger. */
if (pages_number < 0 ||
pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- int pages_shift = 20 - PAGE_CACHE_SHIFT;
+ int pages_shift = 20 - PAGE_SHIFT;
CERROR("%s: can't set max_read_ahead_whole_mb=%lu > "
"max_read_ahead_per_file_mb=%lu\n",
ll_get_fsname(sb, NULL, 0),
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
long max_cached_mb;
long unused_mb;
if (rc)
RETURN(rc);
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+ totalram_pages >> (20 - PAGE_SHIFT));
RETURN(-ERANGE);
}
/* Allow enough cache so clients can make well-formed RPCs */
int mb;
pages = atomic_long_read(&cache->ccc_unstable_nr);
- mb = (pages * PAGE_CACHE_SIZE) >> 20;
+ mb = (pages * PAGE_SIZE) >> 20;
seq_printf(m, "unstable_check: %8d\n"
"unstable_pages: %12ld\n"
unsigned long end_index;
/* Truncate RA window to end of file */
- end_index = (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT);
+ end_index = (unsigned long)((kms - 1) >> PAGE_SHIFT);
if (end_index <= end) {
end = end_index;
ria->ria_eof = true;
if (ria->ria_reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), ria->ria_reserved);
- if (ra_end == end && ra_end == (kms >> PAGE_CACHE_SHIFT))
+ if (ra_end == end && ra_end == (kms >> PAGE_SHIFT))
ll_ra_stats_inc(inode, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
if (ras->ras_requests >= 2 && !ras->ras_request_index) {
__u64 kms_pages;
- kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
* breaking kernel which assumes ->writepage should mark
* PageWriteback or clean the page. */
result = cl_sync_file_range(inode, offset,
- offset + PAGE_CACHE_SIZE - 1,
+ offset + PAGE_SIZE - 1,
CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
ENTRY;
if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+ start = mapping->writeback_index << PAGE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
if (end == OBD_OBJECT_EOF)
mapping->writeback_index = 0;
else
- mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) +1;
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
RETURN(result);
}
* happening with locked page too
*/
#ifdef HAVE_INVALIDATE_RANGE
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ if (offset == 0 && length == PAGE_SIZE) {
#else
if (offset == 0) {
#endif
* representing PAGE_SIZE worth of user data, into a single buffer, and
* then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
-#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
+#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
#ifndef HAVE_IOV_ITER_RW
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), "
"offset=%lld=%llx, pages %zd (max %lu)\n",
PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
- MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+ file_offset, file_offset, count >> PAGE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
if (iov_iter_alignment(iter) & ~PAGE_MASK)
* We should always be able to kmalloc for a
* page worth of page pointers = 4MB on i386. */
if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
+ size > (PAGE_SIZE / sizeof(*pages)) *
+ PAGE_SIZE) {
size = ((((size / 2) - 1) |
~PAGE_MASK) + 1) & PAGE_MASK;
CDEBUG(D_VFSTRACE, "DIO size now %zu\n",
return -EFBIG;
}
- *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+ *max_pages = (user_addr + size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
+ *max_pages -= user_addr >> PAGE_SHIFT;
OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
if (*pages) {
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), "
"offset=%lld=%llx, pages %zd (max %lu)\n",
PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
- MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+ file_offset, file_offset, count >> PAGE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
for (seg = 0; seg < nr_segs; seg++) {
&pages, &max_pages);
if (likely(page_count > 0)) {
if (unlikely(page_count < max_pages))
- bytes = page_count << PAGE_CACHE_SHIFT;
+ bytes = page_count << PAGE_SHIFT;
result = ll_direct_IO_seg(env, io, rw, inode,
bytes, file_offset,
pages, page_count);
* We should always be able to kmalloc for a
* page worth of page pointers = 4MB on i386. */
if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
+ size > (PAGE_SIZE / sizeof(*pages)) *
+ PAGE_SIZE) {
size = ((((size / 2) - 1) |
- ~PAGE_CACHE_MASK) + 1) &
- PAGE_CACHE_MASK;
+ ~PAGE_MASK) + 1) &
+ PAGE_MASK;
CDEBUG(D_VFSTRACE, "DIO size now %zu\n",
size);
continue;
struct cl_page *page;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *vmpage = NULL;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
int result = 0;
ENTRY;
struct cl_io *io;
struct vvp_io *vio;
struct cl_page *page;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
bool unplug = false;
int result = 0;
ENTRY;
* --bug 17336 */
loff_t size = i_size_read(inode);
unsigned long cur_index = start >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if ((size == 0 && cur_index != 0) ||
- (((size - 1) >> PAGE_CACHE_SHIFT) <
+ (((size - 1) >> PAGE_SHIFT) <
cur_index))
*exceed = 1;
}
if (!vio->vui_ra_valid) {
vio->vui_ra_valid = true;
vio->vui_ra_start = cl_index(obj, pos);
- vio->vui_ra_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ras_enter(file);
}
dp->ldp_flags |= LDF_COLLIDE;
area = dp + 1;
- left_bytes = PAGE_CACHE_SIZE - sizeof(*dp);
+ left_bytes = PAGE_SIZE - sizeof(*dp);
ent = area;
last_ent = ent;
do {
if (ra_end != CL_PAGE_EOF)
ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
- pps = loo->lo_lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, "
"stripe_size = %u, stripe no = %u, start index = %lu\n",
{
loff_t offset;
- offset = lov_stripe_size(lsm, (stripe_index << PAGE_CACHE_SHIFT) + 1,
+ offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1,
stripe);
- return offset >> PAGE_CACHE_SHIFT;
+ return offset >> PAGE_SHIFT;
}
/* we have an offset in file backed by an lov and want to find out where
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
- mdc_readdir_pack(req, offset, PAGE_CACHE_SIZE * npages, fid);
+ mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
- PAGE_CACHE_SIZE * npages);
+ PAGE_SIZE * npages);
ptlrpc_req_finished(req);
RETURN(-EPROTO);
}
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
{
int i;
}
#else
#define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
/* parameters for readdir page */
struct readpage_param {
int lu_pgs;
rd_pgs = (req->rq_bulk->bd_nob_transferred +
- PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
lu_pgs = req->rq_bulk->bd_nob_transferred >>
LU_PAGE_SHIFT;
LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
/* First try a small buf */
LASSERT(env != NULL);
ldata->ld_buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_link_buf,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (ldata->ld_buf->lb_buf == NULL)
return -ENOMEM;
int rc;
ENTRY;
- if (count >= PAGE_CACHE_SIZE)
+ if (count >= PAGE_SIZE)
RETURN(-EINVAL);
- OBD_ALLOC(kernbuf, PAGE_CACHE_SIZE);
+ OBD_ALLOC(kernbuf, PAGE_SIZE);
if (kernbuf == NULL)
RETURN(-ENOMEM);
if (copy_from_user(kernbuf, buffer, count))
if (rc == 0)
rc = count;
out:
- OBD_FREE(kernbuf, PAGE_CACHE_SIZE);
+ OBD_FREE(kernbuf, PAGE_SIZE);
return rc;
}
LPROC_SEQ_FOPS(mdd_changelog_mask);
PFID(mdt_object_fid(o)), rc);
rc = -EFAULT;
} else {
- int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc);
+ int print_limit = min_t(int, PAGE_SIZE - 128, rc);
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
rc -= 2;
rdpg->rp_attrs |= LUDA_64BITHASH;
rdpg->rp_count = min_t(unsigned int, reqbody->mbo_nlink,
exp_max_brw_size(tsi->tsi_exp));
- rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
if (rdpg->rp_pages == NULL)
RETURN(-ENOMEM);
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ OBD_ALLOC(inst, PAGE_SIZE);
if (inst == NULL)
RETURN(-ENOMEM);
if (!IS_SERVER(lsi)) {
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_SIZE) {
+ OBD_FREE(inst, PAGE_SIZE);
return -E2BIG;
}
} else {
LASSERT(IS_MDT(lsi));
rc = server_name2svname(lsi->lsi_svname, inst, NULL,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (rc) {
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(-EINVAL);
}
pos = strlen(inst);
++pos;
buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
+ bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
+ if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
/* continue, even one with error */
}
- OBD_FREE(inst, PAGE_CACHE_SIZE);
+ OBD_FREE(inst, PAGE_SIZE);
RETURN(rc);
}
else
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
+ body->mcb_bits = PAGE_SHIFT;
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
for (i = 0; i < nrpages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (ealen < 0)
GOTO(out, rc = ealen);
- if (ealen > nrpages << PAGE_CACHE_SHIFT)
+ if (ealen > nrpages << PAGE_SHIFT)
GOTO(out, rc = -EINVAL);
if (ealen == 0) { /* no logs transferred */
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset,
ptr,
min_t(int, ealen,
- PAGE_CACHE_SIZE),
+ PAGE_SIZE),
mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
break;
}
- ealen -= PAGE_CACHE_SIZE;
+ ealen -= PAGE_SIZE;
}
out:
GOTO(out, rc = -EINVAL);
}
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ if (data->ioc_plen1 > PAGE_SIZE)
GOTO(out, rc = -E2BIG);
OBD_ALLOC(lcfg, data->ioc_plen1);
GOTO(out_pool, rc = -EINVAL);
}
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ if (data->ioc_plen1 > PAGE_SIZE)
GOTO(out_pool, rc = -E2BIG);
OBD_ALLOC(lcfg, data->ioc_plen1);
/* make sure unit_size is power 2 */
LASSERT((unit_size & (unit_size - 1)) == 0);
- LASSERT(nrpages << PAGE_CACHE_SHIFT >= units_total * unit_size);
+ LASSERT(nrpages << PAGE_SHIFT >= units_total * unit_size);
mutex_lock(&tbl->mn_lock);
LASSERT(nidtbl_is_sane(tbl));
buf = kmap(pages[index]);
++index;
- units_in_page = PAGE_CACHE_SIZE / unit_size;
+ units_in_page = PAGE_SIZE / unit_size;
LASSERT(units_in_page > 0);
}
RETURN(rc);
bufsize = body->mcb_units << body->mcb_bits;
- nrpages = (bufsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ nrpages = (bufsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nrpages > PTLRPC_MAX_BRW_PAGES)
RETURN(-EINVAL);
GOTO(out, rc = -EINVAL);
res->mcr_offset = body->mcb_offset;
- unit_size = min_t(int, 1 << body->mcb_bits, PAGE_CACHE_SIZE);
+ unit_size = min_t(int, 1 << body->mcb_bits, PAGE_SIZE);
bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res,
pages, nrpages, bufsize / unit_size, unit_size);
if (bytes < 0)
GOTO(out, rc = bytes);
/* start bulk transfer */
- page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
LASSERT(page_count <= nrpages);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
PTLRPC_BULK_PUT_SOURCE |
for (i = 0; i < page_count && bytes > 0; i++) {
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
min_t(int, bytes,
- PAGE_CACHE_SIZE));
- bytes -= PAGE_CACHE_SIZE;
+ PAGE_SIZE));
+ bytes -= PAGE_SIZE;
}
rc = target_bulk_io(req->rq_export, desc, &lwi);
char *ptr;
int rc = 0;
- if (count == 0 || count >= PAGE_CACHE_SIZE)
+ if (count == 0 || count >= PAGE_SIZE)
return -EINVAL;
OBD_ALLOC(kbuf, count + 1);
*/
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
{
- return (loff_t)idx << PAGE_CACHE_SHIFT;
+ return (loff_t)idx << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_offset);
*/
pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
{
- return offset >> PAGE_CACHE_SHIFT;
+ return offset >> PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_index);
size_t cl_page_size(const struct cl_object *obj)
{
- return 1UL << PAGE_CACHE_SHIFT;
+ return 1UL << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
CWARN("s64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~PAGE_CACHE_MASK) >= PAGE_CACHE_SIZE) {
+ if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
- (__u64)PAGE_CACHE_SIZE);
+ (__u64)PAGE_SIZE);
ret = -EINVAL;
}
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
obd_max_dirty_pages = totalram_pages / 4;
else
obd_max_dirty_pages = totalram_pages / 2;
int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf)
{
- ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_CACHE_SIZE);
+ ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_SIZE);
if (ldata->ld_buf->lb_buf == NULL)
return -ENOMEM;
ldata->ld_leh = ldata->ld_buf->lb_buf;
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
+#include <linux/pagemap.h> /* for PAGE_SIZE */
#include <lustre/lustre_idl.h>
#include <obd_class.h>
if (val < 0)
return -ERANGE;
- val >>= PAGE_CACHE_SHIFT;
+ val >>= PAGE_SHIFT;
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
"setting to %lu\n", val,
((totalram_pages / 10) * 9));
obd_max_dirty_pages = ((totalram_pages / 10) * 9);
- } else if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
- obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
+ } else if (val < 4 << (20 - PAGE_SHIFT)) {
+ obd_max_dirty_pages = 4 << (20 - PAGE_SHIFT);
} else {
obd_max_dirty_pages = val;
}
len = lprocfs_read_frac_helper(buf, sizeof(buf),
*(unsigned long *)table->data,
- 1 << (20 - PAGE_CACHE_SHIFT));
+ 1 << (20 - PAGE_SHIFT));
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
- cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+ if (cache_size > 1 << (30 - PAGE_SHIFT))
+ cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
#endif
/* clear off unreasonable cache setting. */
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
}
cache_size = cache_size / 100 * lu_cache_percent *
- (PAGE_CACHE_SIZE / 1024);
+ (PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;
#define ECHO_INIT_OID 0x10000000ULL
#define ECHO_HANDLE_MAGIC 0xabcd0123fedc9876ULL
-#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_CACHE_SHIFT)
+#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_SHIFT)
static struct page *echo_persistent_pages[ECHO_PERSISTENT_PAGES];
enum {
int len = nb->rnb_len;
while (len > 0) {
- int plen = PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1));
+ int plen = PAGE_SIZE - (offset & (PAGE_SIZE-1));
if (len < plen)
plen = len;
res->lnb_file_offset = offset;
res->lnb_len = plen;
LASSERT((res->lnb_file_offset & ~PAGE_MASK) +
- res->lnb_len <= PAGE_CACHE_SIZE);
+ res->lnb_len <= PAGE_SIZE);
if (ispersistent &&
- ((res->lnb_file_offset >> PAGE_CACHE_SHIFT) <
+ ((res->lnb_file_offset >> PAGE_SHIFT) <
ECHO_PERSISTENT_PAGES)) {
res->lnb_page =
echo_persistent_pages[res->lnb_file_offset >>
- PAGE_CACHE_SHIFT];
+ PAGE_SHIFT];
/* Take extra ref so __free_pages() can be called OK */
get_page(res->lnb_page);
} else {
struct niobuf_local *lb, int verify)
{
struct niobuf_local *res = lb;
- u64 start = rb->rnb_offset >> PAGE_CACHE_SHIFT;
- u64 end = (rb->rnb_offset + rb->rnb_len + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ u64 start = rb->rnb_offset >> PAGE_SHIFT;
+ u64 end = (rb->rnb_offset + rb->rnb_len + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
int count = (int)(end - start);
int rc = 0;
int i;
return -ENOMEM;
}
- memset (kmap (pg), 0, PAGE_CACHE_SIZE);
- kunmap (pg);
+ memset(kmap(pg), 0, PAGE_SIZE);
+ kunmap(pg);
echo_persistent_pages[i] = pg;
}
rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_CACHE_SIZE - 1,
+ offset + npages * PAGE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
int delta;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
int rc2;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
RETURN(-EINVAL);
/* XXX think again with misaligned I/O */
- npages = count >> PAGE_CACHE_SHIFT;
+ npages = count >> PAGE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
for (i = 0, pgp = pga, off = offset;
i < npages;
- i++, pgp++, off += PAGE_CACHE_SIZE) {
+ i++, pgp++, off += PAGE_SIZE) {
LASSERT(pgp->pg == NULL); /* for cleanup */
goto out;
pages[i] = pgp->pg;
- pgp->count = PAGE_CACHE_SIZE;
+ pgp->count = PAGE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
ENTRY;
- if (count <= 0 || (count & ~PAGE_CACHE_MASK) != 0)
+ if (count <= 0 || (count & ~PAGE_MASK) != 0)
RETURN(-EINVAL);
- apc = npages = batch >> PAGE_CACHE_SHIFT;
- tot_pages = count >> PAGE_CACHE_SHIFT;
+ apc = npages = batch >> PAGE_SHIFT;
+ tot_pages = count >> PAGE_SHIFT;
OBD_ALLOC(lnb, apc * sizeof(struct niobuf_local));
if (lnb == NULL)
npages = tot_pages;
rnb.rnb_offset = off;
- rnb.rnb_len = npages * PAGE_CACHE_SIZE;
+ rnb.rnb_len = npages * PAGE_SIZE;
rnb.rnb_flags = brw_flags;
ioo.ioo_bufcnt = 1;
- off += npages * PAGE_CACHE_SIZE;
+ off += npages * PAGE_SIZE;
lpages = npages;
ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
ENTRY;
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
- LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+ LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
# ifdef HAVE_SERVER_SUPPORT
rc = echo_persistent_pages_init();
*
* When ofd_grant_compat_disable is set, we don't grant any space to clients
* not supporting OBD_CONNECT_GRANT_PARAM. Otherwise, space granted to such
- * a client is inflated since it consumes PAGE_CACHE_SIZE of grant space per
+ * a client is inflated since it consumes PAGE_SIZE of grant space per
* block, (i.e. typically 4kB units), but underlaying file system might have
* block size bigger than page size, e.g. ZFS. See LU-2049 for details.
*
GOTO(out_unlock, rc);
/* We need page aligned offset and length */
- start_index = start >> PAGE_CACHE_SHIFT;
- end_index = (end - 1) >> PAGE_CACHE_SHIFT;
+ start_index = start >> PAGE_SHIFT;
+ end_index = (end - 1) >> PAGE_SHIFT;
pages = end_index - start_index + 1;
while (pages > 0) {
nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages :
PTLRPC_MAX_BRW_PAGES;
- rnb.rnb_offset = start_index << PAGE_CACHE_SHIFT;
- rnb.rnb_len = nr_local << PAGE_CACHE_SHIFT;
+ rnb.rnb_offset = start_index << PAGE_SHIFT;
+ rnb.rnb_len = nr_local << PAGE_SHIFT;
rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb, 0);
if (unlikely(rc < 0))
break;
/* When ofd_grant_compat_disable is set, we don't grant any space to
* clients not supporting OBD_CONNECT_GRANT_PARAM.
* Otherwise, space granted to such a client is inflated since it
- * consumes PAGE_CACHE_SIZE of grant space per block */
+ * consumes PAGE_SIZE of grant space per block */
return !!(ofd_obd(ofd)->obd_self_export != exp &&
!ofd_grant_param_supp(exp) && ofd->ofd_grant_compat_disable);
}
GOTO(out, rc = -EROFS);
#ifdef USE_HEALTH_CHECK_WRITE
- OBD_ALLOC(info->fti_buf.lb_buf, PAGE_CACHE_SIZE);
+ OBD_ALLOC(info->fti_buf.lb_buf, PAGE_SIZE);
if (info->fti_buf.lb_buf == NULL)
GOTO(out, rc = -ENOMEM);
- info->fti_buf.lb_len = PAGE_CACHE_SIZE;
+ info->fti_buf.lb_len = PAGE_SIZE;
info->fti_off = 0;
th = dt_trans_create(&env, ofd->ofd_osd);
}
dt_trans_stop(&env, ofd->ofd_osd, th);
- OBD_FREE(info->fti_buf.lb_buf, PAGE_CACHE_SIZE);
+ OBD_FREE(info->fti_buf.lb_buf, PAGE_SIZE);
CDEBUG(D_INFO, "write 1 page synchronously for checking io rc %d\n",rc);
#endif
val = cli->cl_dirty_max_pages;
spin_unlock(&cli->cl_loi_list_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_seq_read_frac_helper(m, val, mult);
}
if (rc)
return rc;
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
if (pages_number <= 0 ||
- pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+ pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
{
struct obd_device *dev = m->private;
struct client_obd *cli = &dev->u.cli;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
seq_printf(m, "used_mb: %ld\n"
"busy_cnt: %ld\n"
if (rc)
return rc;
- pages_number >>= PAGE_CACHE_SHIFT;
+ pages_number >>= PAGE_SHIFT;
if (pages_number < 0)
return -ERANGE;
struct client_obd *cli = &dev->u.cli;
spin_lock(&cli->cl_loi_list_lock);
- seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
+ seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT);
spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
/* if the max_pages is specified in bytes, convert to pages */
if (val >= ONE_MB_BRW_SIZE)
- val >>= PAGE_CACHE_SHIFT;
+ val >>= PAGE_SHIFT;
LPROCFS_CLIMP_CHECK(dev);
- chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+ chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
if (val == 0 || (ocd->ocd_brw_size != 0 &&
- val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT)) {
+ val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
LPROCFS_CLIMP_EXIT(dev);
return -ERANGE;
}
int mb;
pages = atomic_long_read(&cli->cl_unstable_count);
- mb = (pages * PAGE_CACHE_SIZE) >> 20;
+ mb = (pages * PAGE_SIZE) >> 20;
seq_printf(m, "unstable_pages: %20ld\n"
"unstable_mb: %10d\n",
return -ERANGE;
LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
if (!sent) {
lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_CACHE_SIZE &&
- last_count != PAGE_CACHE_SIZE) {
+ } else if (blocksize < PAGE_SIZE &&
+ last_count != PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
if (end)
count += blocksize - end;
- lost_grant = PAGE_CACHE_SIZE - count;
+ lost_grant = PAGE_SIZE - count;
}
if (ext->oe_grants > 0)
osc_free_grant(cli, nr_pages, lost_grant, ext->oe_grants);
struct osc_async_page *tmp;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits -
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last_oap_count > 0);
- LASSERT(last->oap_page_off + last_oap_count <= PAGE_CACHE_SIZE);
+ LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
last->oap_count = last_oap_count;
spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
* because it's known they are not the last page */
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+ oap->oap_count = PAGE_SIZE - oap->oap_page_off;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
return 0;
else if (cl_offset(obj, index + 1) > kms)
/* catch sub-page write at end of file */
- return kms % PAGE_CACHE_SIZE;
+ return kms % PAGE_SIZE;
else
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_CACHE_SIZE, pga, pga->pg);
+ PAGE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu/%lu\n",
lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT,
+ cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT,
cli->cl_dirty_grant);
}
if (cl_io_is_append(io))
RETURN(osc_io_iter_init(env, ios));
- npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT;
+ npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
++npages;
if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
goto skip_locking;
- policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_CACHE_MASK;
+ policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
- fmkey->lfik_fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+ fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
policy.l_extent.end = OBD_OBJECT_EOF;
else
policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
fmkey->lfik_fiemap.fm_length +
- PAGE_CACHE_SIZE - 1) & PAGE_CACHE_MASK;
+ PAGE_SIZE - 1) & PAGE_MASK;
ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
int result;
opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
+ opg->ops_to = PAGE_SIZE;
result = osc_prep_async_page(osc, opg, page->cp_vmpage,
cl_offset(obj, index));
if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM))
oa->o_dirty = cli->cl_dirty_grant;
else
- oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
+ oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
cli->cl_dirty_max_pages)) {
CERROR("dirty %lu - %lu > dirty_max %lu\n",
nrpages = cli->cl_max_pages_per_rpc;
nrpages *= cli->cl_max_rpcs_in_flight + 1;
nrpages = max(nrpages, cli->cl_dirty_max_pages);
- oa->o_undirty = nrpages << PAGE_CACHE_SHIFT;
+ oa->o_undirty = nrpages << PAGE_SHIFT;
if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data,
GRANT_PARAM)) {
int nrextents;
static int osc_shrink_grant(struct client_obd *cli)
{
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+ (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
spin_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance. */
- if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
spin_unlock(&cli->cl_loi_list_lock);
/* Get the current RPC size directly, instead of going via:
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching. */
- int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > brw_size)
cli->cl_avail_grant -= cli->cl_dirty_grant;
else
cli->cl_avail_grant -=
- cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
+ cli->cl_dirty_pages << PAGE_SHIFT;
}
if (cli->cl_avail_grant < 0) {
CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
cli_name(cli), cli->cl_avail_grant,
- ocd->ocd_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
+ ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
/* workaround for servers which do not have the patch from
* LU-2679 */
cli->cl_avail_grant = ocd->ocd_grant;
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+ (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
+ poff == 0 && pg->count == PAGE_SIZE) &&
ergo(i == page_count - 1, poff == 0)),
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
if (oap->oap_interrupted)
interrupted = true;
}
list_splice_init(ext_list, &aa->aa_exts);
spin_lock(&cli->cl_loi_list_lock);
- starting_offset >>= PAGE_CACHE_SHIFT;
+ starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM)
grant += cli->cl_dirty_grant;
else
- grant += cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
+ grant += cli->cl_dirty_pages << PAGE_SHIFT;
data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
* there would be one ext3 readdir for every mdd readdir page.
*/
-#define OSD_IT_EA_BUFSIZE (PAGE_CACHE_SIZE + PAGE_CACHE_SIZE/4)
+#define OSD_IT_EA_BUFSIZE (PAGE_SIZE + PAGE_SIZE/4)
/**
* This is iterator's in-memory data structure in interoperability
struct list_head oiq_list;
};
-#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
+#define MAX_BLOCKS_PER_PAGE (PAGE_SIZE / 512)
struct osd_iobuf {
wait_queue_head_t dr_wait;
iobuf->dr_rw = rw;
iobuf->dr_init_at = line;
- blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
LASSERT(iobuf->dr_pg_buf.lb_len >=
pages * sizeof(iobuf->dr_pages[0]));
CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
(unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
pages = i;
- blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ blocks = pages * (PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
iobuf->dr_max_pages = 0;
CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
(unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
struct osd_iobuf *iobuf)
{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
struct page **pages = iobuf->dr_pages;
int npages = iobuf->dr_npages;
sector_t *blocks = iobuf->dr_blocks;
*nrpages = 0;
while (len > 0) {
- int poff = offset & (PAGE_CACHE_SIZE - 1);
- int plen = PAGE_CACHE_SIZE - poff;
+ int poff = offset & (PAGE_SIZE - 1);
+ int plen = PAGE_SIZE - poff;
if (plen > len)
plen = len;
LASSERT(inode);
- page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ page = find_or_create_page(inode->i_mapping, offset >> PAGE_SHIFT,
GFP_NOFS | __GFP_HIGHMEM);
if (unlikely(page == NULL))
lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
static int osd_ldiskfs_map_nblocks(struct inode *inode, unsigned long index,
int clen, sector_t *blocks, int create)
{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
struct bpointers bp;
int err;
struct page **page, int pages,
sector_t *blocks, int create)
{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
pgoff_t bitmap_max_page_index;
sector_t *b;
int rc = 0, i;
/* look for next extent */
fp = NULL;
- blocks += clen * (PAGE_CACHE_SIZE >> inode->i_blkbits);
+ blocks += clen * (PAGE_SIZE >> inode->i_blkbits);
}
if (fp)
int pages, sector_t *blocks,
int create)
{
- int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
RETURN(rc);
isize = i_size_read(inode);
- maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
+ maxidx = ((isize + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1;
if (osd->od_writethrough_cache)
cache = 1;
*/
ClearPageUptodate(lnb[i].lnb_page);
- if (lnb[i].lnb_len == PAGE_CACHE_SIZE)
+ if (lnb[i].lnb_len == PAGE_SIZE)
continue;
if (maxidx >= lnb[i].lnb_page->index) {
off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
~PAGE_MASK;
if (off)
- memset(p + off, 0, PAGE_CACHE_SIZE - off);
+ memset(p + off, 0, PAGE_SIZE - off);
kunmap(lnb[i].lnb_page);
}
}
extents++;
if (!osd_is_mapped(dt, lnb[i].lnb_file_offset, &extent))
- quota_space += PAGE_CACHE_SIZE;
+ quota_space += PAGE_SIZE;
/* ignore quota for the whole request if any page is from
* client cache or written by root.
if (unlikely(nr_pages == 0))
return;
- blocks_per_page = PAGE_CACHE_SIZE >> osd_sb(osd)->s_blocksize_bits;
+ blocks_per_page = PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits;
lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages);
buf->lb_buf, DMU_READ_PREFETCH);
record_end_io(osd, READ, cfs_time_current() - start, size,
- size >> PAGE_CACHE_SHIFT);
+ size >> PAGE_SHIFT);
if (rc == 0) {
rc = size;
*pos += size;
out:
record_end_io(osd, WRITE, 0, buf->lb_len,
- buf->lb_len >> PAGE_CACHE_SHIFT);
+ buf->lb_len >> PAGE_SHIFT);
RETURN(rc);
}
dbf = (void *) ((unsigned long)dbp[i] | 1);
while (tocpy > 0) {
- thispage = PAGE_CACHE_SIZE;
- thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
+ thispage = PAGE_SIZE;
+ thispage -= bufoff & (PAGE_SIZE - 1);
thispage = min(tocpy, thispage);
lnb->lnb_rc = 0;
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
/* can't use zerocopy, allocate temp. buffers */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
/* 1MB bulk */
npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
- npages /= PAGE_CACHE_SIZE;
+ npages /= PAGE_SIZE;
OBD_ALLOC(pages, npages * sizeof(*pages));
if (pages == NULL)
for (i = 0; i < npages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
GOTO(out, rc = -EPROTO);
npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
- (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT);
+ (PAGE_SHIFT - LU_PAGE_SHIFT);
if (npages > it->ooi_total_npages) {
CERROR("%s: returned more pages than expected, %u > %u\n",
osp->opd_obd->obd_name, npages, it->ooi_total_npages);
LASSERT(page != NULL);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+ LASSERT(pageoffset + len <= PAGE_SIZE);
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+ min(ocd->ocd_brw_size >> PAGE_SHIFT,
cli->cl_max_pages_per_rpc);
else if (imp->imp_connect_op == MDS_CONNECT ||
imp->imp_connect_op == MGS_CONNECT)
/* This sanity check is more of an insanity check; we can still
* hose a kernel by allowing the request history to grow too
* far. */
- bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
if (val > totalram_pages/(2 * bufpages))
return -ERANGE;
const char prefix[] = "connection=";
const int prefix_len = sizeof(prefix) - 1;
- if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+ if (count > PAGE_SIZE - 1 || count <= prefix_len)
return -EINVAL;
OBD_ALLOC(kbuf, count + 1);
RETURN(-EINVAL);
rdpg.rp_count = (body->mcb_units << body->mcb_bits);
- rdpg.rp_npages = (rdpg.rp_count + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ rdpg.rp_npages = (rdpg.rp_count + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
if (rdpg.rp_npages > PTLRPC_MAX_BRW_PAGES)
RETURN(-EINVAL);
res->mcr_offset = nodemap_ii.ii_hash_end;
res->mcr_size = bytes;
- page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ page_count = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
LASSERT(page_count <= rdpg.rp_count);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
PTLRPC_BULK_PUT_SOURCE |
for (i = 0; i < page_count && bytes > 0; i++) {
ptlrpc_prep_bulk_page_pin(desc, rdpg.rp_pages[i], 0,
- min_t(int, bytes, PAGE_CACHE_SIZE));
- bytes -= PAGE_CACHE_SIZE;
+ min_t(int, bytes, PAGE_SIZE));
+ bytes -= PAGE_SIZE;
}
rc = target_bulk_io(req->rq_export, desc, &lwi);
}
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+ LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
#include "ptlrpc_internal.h"
-static int mult = 20 - PAGE_CACHE_SHIFT;
+static int mult = 20 - PAGE_SHIFT;
static int enc_pool_max_memory_mb;
module_param(enc_pool_max_memory_mb, int, 0644);
MODULE_PARM_DESC(enc_pool_max_memory_mb,
****************************************/
-#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
+#define PTRS_PER_PAGE (PAGE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (PTRS_PER_PAGE)
#define IDLE_IDX_MAX (100)
/* free unused pools */
while (p_idx_max1 < p_idx_max2) {
LASSERT(page_pools.epp_pools[p_idx_max2]);
- OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
+ OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_SIZE);
page_pools.epp_pools[p_idx_max2] = NULL;
p_idx_max2--;
}
cleaned++;
}
}
- OBD_FREE(pools[i], PAGE_CACHE_SIZE);
+ OBD_FREE(pools[i], PAGE_SIZE);
pools[i] = NULL;
}
}
goto out;
for (i = 0; i < npools; i++) {
- OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
+ OBD_ALLOC(pools[i], PAGE_SIZE);
if (pools[i] == NULL)
goto out_pools;
/* let's do a 1MB bulk */
npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
- npages /= PAGE_CACHE_SIZE;
+ npages /= PAGE_SIZE;
/* allocate pages for bulk index read */
OBD_ALLOC(pages, npages * sizeof(*pages));
ver = ii->ii_version;
pg_cnt = (ii->ii_count + (LU_PAGE_COUNT) - 1);
- pg_cnt >>= PAGE_CACHE_SHIFT - LU_PAGE_SHIFT;
+ pg_cnt >>= PAGE_SHIFT - LU_PAGE_SHIFT;
if (pg_cnt > npages) {
CERROR("%s: master returned more pages than expected, %u > %u"
/* req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
/* pack index information in request */
req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
RETURN(-ENOMEM);
if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
- /* old client requires reply size in it's PAGE_CACHE_SIZE,
+ /* old client requires reply size in it's PAGE_SIZE,
* which is rdpg->rp_count */
nob = rdpg->rp_count;
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
- tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
+ tmpsize = min_t(int, tmpcount, PAGE_SIZE);
desc->bd_frag_ops->add_kiov_frag(desc, rdpg->rp_pages[i], 0,
tmpsize);
}
GOTO(out, rc = -EFAULT);
rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
exp_max_brw_size(tsi->tsi_exp));
- rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE -1) >> PAGE_CACHE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* allocate pages to store the containers */
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));