types.h \
user-bitops.h \
user-crypto.h \
- user-mem.h \
user-prim.h \
user-time.h
# include <sys/types.h>
# include <libcfs/user-time.h>
# include <libcfs/user-prim.h>
-# include <libcfs/user-mem.h>
# include <libcfs/user-bitops.h>
#endif /* __KERNEL__ */
/* Dump a debug log */
void lc_watchdog_dumplog(pid_t pid, void *data);
-#else /* !__KERNEL__ */
-#include <unistd.h>
-#ifndef PAGE_SIZE
-#define PAGE_SIZE sysconf(_SC_PAGESIZE)
-#endif
-#endif /* !__KERNEL__ */
+#endif /* __KERNEL__ */
/* need both kernel and user-land acceptor */
#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
#include <libcfs/libcfs_debug.h>
#include <libcfs/libcfs_private.h>
#include <libcfs/bitmap.h>
-#include <libcfs/libcfs_cpu.h>
#ifdef __KERNEL__
+# include <libcfs/libcfs_cpu.h>
# include <libcfs/libcfs_ioctl.h>
+# include <libcfs/libcfs_prim.h>
#endif /* __KERNEL__ */
-#include <libcfs/libcfs_prim.h>
#include <libcfs/libcfs_time.h>
#ifdef __KERNEL__
# include <libcfs/libcfs_string.h>
int cfs_cpu_ht_nsiblings(int cpu);
/**
+ * allocate \a nr_bytes of physical memory from a contiguous region with the
+ * properties of \a flags which are bound to the partition id \a cpt. This
+ * function should only be used for the case when only a few pages of memory
+ * are need.
+ */
+static inline void *
+cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes,
+ gfp_t flags)
+{
+ return kmalloc_node(nr_bytes, flags,
+ cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
+ * allocate \a nr_bytes of virtually contiguous memory that is bound to the
+ * partition id \a cpt.
+ */
+static inline void *
+cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
+{
+ /* vzalloc_node() sets __GFP_FS by default but no current Kernel
+ * exported entry-point allows for both a NUMA node specification
+ * and a custom allocation flags mask. This may be an issue since
+ * __GFP_FS usage can cause some deadlock situations in our code,
+ * like when memory reclaim started, within the same context of a
+ * thread doing FS operations, that can also attempt conflicting FS
+ * operations, ...
+ */
+ return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
+ * allocate a single page of memory with the properties of \a flags were
+ * that page is bound to the partition id \a cpt.
+ */
+static inline struct page *
+cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
+{
+ return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
+}
+
+/**
+ * allocate a chunck of memory from a memory pool that is bound to the
+ * partition id \a cpt with the properites of \a flags.
+ */
+static inline void *
+cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
+ int cpt, gfp_t flags)
+{
+ return kmem_cache_alloc_node(cachep, flags,
+ cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
* iterate over all CPU partitions in \a cptab
*/
#define cfs_cpt_for_each(i, cptab) \
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
-/*
- * Wait Queues
- */
+#include <linux/sched.h>
+
/*
* Timer
*/
/*
* Memory
*/
+#if BITS_PER_LONG == 32
+/* limit to lowmem on 32-bit systems */
+# define NUM_CACHEPAGES \
+ min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+#else
+# define NUM_CACHEPAGES totalram_pages
+#endif
+
+static inline unsigned int memory_pressure_get(void)
+{
+ return current->flags & PF_MEMALLOC;
+}
+
+static inline void memory_pressure_set(void)
+{
+ current->flags |= PF_MEMALLOC;
+}
+
+static inline void memory_pressure_clr(void)
+{
+ current->flags &= ~PF_MEMALLOC;
+}
+
static inline int cfs_memory_pressure_get_and_set(void)
{
int old = memory_pressure_get();
#include <linux/backing-dev.h>
#include <linux/posix_acl_xattr.h>
-#define filp_size(f) \
- (i_size_read((f)->f_dentry->d_inode))
-#define filp_poff(f) \
- (&(f)->f_pos)
-
-#define filp_read(fp, buf, size, pos) \
- ((fp)->f_op->read((fp), (buf), (size), pos))
-
-#define filp_write(fp, buf, size, pos) \
- ((fp)->f_op->write((fp), (buf), (size), pos))
-
#if defined(HAVE_FILE_FSYNC_4ARGS) || defined(HAVE_FILE_FSYNC_2ARGS)
#define ll_vfs_fsync_range(fp, start, end, datasync) \
vfs_fsync_range(fp, start, end, datasync)
# include <linux/mm_inline.h>
#endif
-#ifndef HAVE_LIBCFS_CPT
-/* Need this for cfs_cpt_table */
-#include <libcfs/libcfs_cpu.h>
-#endif
-
-#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
-
-#define page_index(p) ((p)->index)
-
-
-#define memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
-
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-/*
- * In Linux there is no way to determine whether current execution context is
- * blockable.
- */
-#define ALLOC_ATOMIC_TRY GFP_ATOMIC
/* GFP_IOFS was added in 2.6.33 kernel */
#ifndef GFP_IOFS
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#endif
-#define DECL_MMSPACE mm_segment_t __oldfs
-#define MMSPACE_OPEN \
- do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
-#define MMSPACE_CLOSE set_fs(__oldfs)
-
-
-extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes, gfp_t flags);
-extern void *cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes);
-extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
- int cpt, gfp_t flags);
-extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
- struct cfs_cpt_table *cptab,
- int cpt, gfp_t flags);
-
/*
* Shrinker
*/
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details. A copy is
- * included in the COPYING file that accompanied this code.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2012, 2013, Intel Corporation.
- */
-
-#ifndef __LIBCFS_USER_MEM_H__
-#define __LIBCFS_USER_MEM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <libcfs/libcfs.h> instead
-#endif
-
-#ifdef __KERNEL__
-#error "This is only for user space."
-#endif
-
-
-/* XXX
- * for this moment, liblusre will not rely OST for non-page-aligned write
- */
-#define LIBLUSTRE_HANDLE_UNALIGNED_PAGE
-
-struct page {
- void *addr;
- unsigned long index;
- struct list_head list;
- unsigned long private;
-
- /* internally used by liblustre file i/o */
- int _offset;
- int _count;
-#ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE
- int _managed;
-#endif
- struct list_head _node;
-};
-
-
-/* 4K */
-#define PAGE_CACHE_SHIFT 12
-#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
-
-struct page *alloc_page(unsigned int flags);
-void __free_page(struct page *pg);
-void *page_address(struct page *pg);
-void *kmap(struct page *pg);
-void kunmap(struct page *pg);
-
-#define get_page(p) __I_should_not_be_called__(at_all)
-#define page_count(p) __I_should_not_be_called__(at_all)
-#define page_index(p) ((p)->index)
-#define page_cache_get(page) do { } while (0)
-#define page_cache_release(page) do { } while (0)
-
-#define inc_zone_page_state(page, state) do {} while (0)
-#define dec_zone_page_state(page, state) do {} while (0)
-
-/*
- * Memory allocator
- * Inline function, so utils can use them without linking of libcfs
- */
-
-/*
- * Universal memory allocator API
- */
-enum cfs_alloc_flags {
- /* allocation is not allowed to block */
- GFP_ATOMIC = 0x1,
- /* allocation is allowed to block */
- __GFP_WAIT = 0x2,
- /* allocation should return zeroed memory */
- __GFP_ZERO = 0x4,
- /* allocation is allowed to call file-system code to free/clean
- * memory */
- __GFP_FS = 0x8,
- /* allocation is allowed to do io to free/clean memory */
- __GFP_IO = 0x10,
- /* don't report allocation failure to the console */
- __GFP_NOWARN = 0x20,
- /* standard allocator flag combination */
- GFP_IOFS = __GFP_FS | __GFP_IO,
- GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
- GFP_NOFS = __GFP_WAIT | __GFP_IO,
- GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
-};
-
-/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
-enum cfs_alloc_page_flags {
- /* allow to return page beyond KVM. It has to be mapped into KVM by
- * kmap() and unmapped with kunmap(). */
- __GFP_HIGHMEM = 0x40,
- GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
- __GFP_HIGHMEM,
-};
-
-static inline void *kmalloc(size_t nr_bytes, u_int32_t flags)
-{
- void *result;
-
- result = malloc(nr_bytes);
- if (result != NULL && (flags & __GFP_ZERO))
- memset(result, 0, nr_bytes);
- return result;
-}
-
-#define kfree(addr) free(addr)
-#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0)
-#define vfree(addr) free(addr)
-
-#define ALLOC_ATOMIC_TRY (0)
-/*
- * SLAB allocator
- */
-struct kmem_cache {
- int size;
-};
-
-#define SLAB_HWCACHE_ALIGN 0
-#define SLAB_DESTROY_BY_RCU 0
-#define SLAB_KERNEL 0
-#define SLAB_NOFS 0
-
-#define memory_pressure_get() (0)
-#define memory_pressure_set() do {} while (0)
-#define memory_pressure_clr() do {} while (0)
-
-struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- unsigned long, void *);
-void kmem_cache_destroy(struct kmem_cache *c);
-void *kmem_cache_alloc(struct kmem_cache *c, int gfp);
-void kmem_cache_free(struct kmem_cache *c, void *addr);
-int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem);
-
-/*
- * NUMA allocators
- */
-#define cfs_cpt_malloc(cptab, cpt, bytes, flags) \
- kmalloc(bytes, flags)
-#define cfs_cpt_vmalloc(cptab, cpt, bytes) \
- kmalloc(bytes)
-#define cfs_page_cpt_alloc(cptab, cpt, mask) \
- alloc_page(mask)
-#define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \
- kmem_cache_alloc(cache, gfp)
-
-#define smp_rmb() do {} while (0)
-
-/*
- * Copy to/from user
- */
-static inline int copy_from_user(void *a, void *b, int c)
-{
- memcpy(a, b, c);
- return 0;
-}
-
-static inline int copy_to_user(void *a, void *b, int c)
-{
- memcpy(a,b,c);
- return 0;
-}
-
-#endif
MODULES = libcfs
libcfs-linux-objs := linux-tracefile.o linux-debug.o
-libcfs-linux-objs += linux-prim.o linux-mem.o linux-cpu.o
+libcfs-linux-objs += linux-prim.o linux-cpu.o
libcfs-linux-objs += linux-proc.o linux-curproc.o
libcfs-linux-objs += linux-utils.o linux-module.o
libcfs-linux-objs += linux-crypto.o linux-crypto-adler.o
EXTRA_DIST = linux-debug.c linux-prim.c linux-tracefile.c \
- linux-mem.c linux-proc.c linux-utils.c \
- linux-module.c linux-curproc.c linux-cpu.c \
+ linux-proc.c linux-utils.c linux-module.c linux-cpu.c \
+ linux-curproc.c \
linux-crypto.c linux-crypto-crc32.c linux-crypto-adler.c\
linux-crypto-crc32pclmul.c linux-crypto-crc32c-pclmul.c \
crc32-pclmul_asm.S crc32c-pcl-intel-asm_64.S inst.h
struct scatterlist sl;
sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
+ sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
}
+++ /dev/null
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, 2014, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <libcfs/libcfs.h>
-
-void *
-cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes, gfp_t flags)
-{
- return kmalloc_node(nr_bytes, flags,
- cfs_cpt_spread_node(cptab, cpt));
-}
-EXPORT_SYMBOL(cfs_cpt_malloc);
-
-void *
-cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
-{
- /* vzalloc_node() sets __GFP_FS by default but no current Kernel
- * exported entry-point allows for both a NUMA node specification
- * and a custom allocation flags mask. This may be an issue since
- * __GFP_FS usage can cause some deadlock situations in our code,
- * like when memory reclaim started, within the same context of a
- * thread doing FS operations, that can also attempt conflicting FS
- * operations, ...
- */
- return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
-}
-EXPORT_SYMBOL(cfs_cpt_vzalloc);
-
-struct page *
-cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
-{
- return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
-}
-EXPORT_SYMBOL(cfs_page_cpt_alloc);
-
-void *
-cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
- int cpt, gfp_t flags)
-{
- return kmem_cache_alloc_node(cachep, flags,
- cfs_cpt_spread_node(cptab, cpt));
-}
-EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc);
static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
+#define filp_size(f) (i_size_read((f)->f_dentry->d_inode))
+
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd);
struct file *filp;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ mm_segment_t __oldfs;
int rc;
- DECL_MMSPACE;
-
cfs_tracefile_write_lock();
filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
rc = 0;
goto close;
}
+ __oldfs = get_fs();
+ set_fs(get_ds());
- /* ok, for now, just write the pages. in the future we'll be building
- * iobufs with the pages and calling generic_direct_IO */
- MMSPACE_OPEN;
+ /* ok, for now, just write the pages. in the future we'll be building
+ * iobufs with the pages and calling generic_direct_IO */
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, filp_poff(filp));
+ rc = vfs_write(filp, page_address(tage->page), tage->used,
+ &filp->f_pos);
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u but wrote "
"%d\n", tage->used, rc);
list_del(&tage->linkage);
cfs_tage_free(tage);
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
if (rc)
printk(KERN_ERR "sync returns %d\n", rc);
struct tracefiled_ctl *tctl = arg;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ mm_segment_t __oldfs;
struct file *filp;
int last_loop = 0;
int rc;
- DECL_MMSPACE;
-
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
__LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
}
-
- MMSPACE_OPEN;
+ __oldfs = get_fs();
+ set_fs(get_ds());
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
else if (f_pos > (off_t)filp_size(filp))
f_pos = filp_size(filp);
- rc = filp_write(filp, page_address(tage->page),
- tage->used, &f_pos);
+ rc = vfs_write(filp, page_address(tage->page),
+ tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(KERN_WARNING "wanted to write %u "
"but wrote %d\n", tage->used, rc);
break;
}
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
/* compat with old version */
- if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
+ if ((reqst->brw_len & ~PAGE_MASK) != 0) {
reply->brw_status = EINVAL;
return 0;
}
#define _GNU_SOURCE
#endif
+#include <unistd.h>
#include <libcfs/libcfs.h>
#include <libcfs/util/ioctl.h>
#include <lnet/lnetctl.h>
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_CACHE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
}
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
ENTRY;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
RETURN(-EINVAL);
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%lu (max %lu), "
/* Check that all user buffers are aligned as well */
for (seg = 0; seg < nr_segs; seg++) {
- if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
- (iov[seg].iov_len & ~CFS_PAGE_MASK))
+ if (((unsigned long)iov[seg].iov_base & ~PAGE_MASK) ||
+ (iov[seg].iov_len & ~PAGE_MASK))
RETURN(-EINVAL);
}
size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
PAGE_CACHE_SIZE) {
size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
+ ~PAGE_CACHE_MASK) + 1) &
+ PAGE_CACHE_MASK;
CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
size);
continue;
if (count == 0)
continue;
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
+ count += addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
down_read(&mm->mmap_sem);
while((vma = our_vma(mm, addr, count)) != NULL) {
/* size fixup */
if (last_index == vvp_index(vpg))
- to = size & ~CFS_PAGE_MASK;
+ to = size & ~PAGE_MASK;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* | |
* .|--------v------- -----.
* |s|e|f|p|ent|ent| ... |ent|
- * '--|-------------- -----' Each CFS_PAGE contains a single
+ * '--|-------------- -----' Each PAGE contains a single
* '------. lu_dirpage.
* .---------v------- -----.
* |s|e|f|p|ent| 0 | ... | 0 |
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
- * after it in the same CFS_PAGE (arrows simplified for brevity, but
+ * after it in the same PAGE (arrows simplified for brevity, but
* in general e0==s1, e1==s2, etc.):
*
* .-------------------- -----.
* |s0|e0|f0|p|ent|ent| ... |ent|
* |---v---------------- -----|
* |s1|e1|f1|p|ent|ent| ... |ent|
- * |---v---------------- -----| Here, each CFS_PAGE contains
+ * |---v---------------- -----| Here, each PAGE contains
* ... multiple lu_dirpages.
* |---v---------------- -----|
* |s'|e'|f'|p|ent|ent| ... |ent|
* '---|---------------- -----'
* v
* .----------------------------.
- * | next CFS_PAGE |
+ * | next PAGE |
*
* This structure is transformed into a single logical lu_dirpage as follows:
*
* - Replace e0 with e' so the request for the next lu_dirpage gets the page
- * labeled 'next CFS_PAGE'.
+ * labeled 'next PAGE'.
*
* - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
* a hash collision with the next page exists.
/* Advance dp to next lu_dirpage. */
dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
- /* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~CFS_PAGE_MASK))
+ /* Check if we've reached the end of the PAGE. */
+ if (!((unsigned long)dp & ~PAGE_MASK))
break;
/* Save the hash and flags of this lu_dirpage. */
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in CFS_PAGE_SIZE (if CFS_PAGE_SIZE greater than LU_PAGE_SIZE), and the
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
* lu_dirpage for this integrated page will be adjusted.
**/
static int mdc_read_page_remote(void *data, struct page *page0)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+ if ((u64val & ~PAGE_CACHE_MASK) >= PAGE_CACHE_SIZE) {
CWARN("mask failed: u64val "LPU64" >= "LPU64"\n", u64val,
(__u64)PAGE_CACHE_SIZE);
ret = -EINVAL;
echo_page_debug_setup(struct page *page, int rw, u64 id,
__u64 offset, int len)
{
- int page_offset = offset & ~CFS_PAGE_MASK;
+ int page_offset = offset & ~PAGE_MASK;
char *addr = ((char *)kmap(page)) + page_offset;
if (len % OBD_ECHO_BLOCK_SIZE != 0)
echo_page_debug_check(struct page *page, u64 id,
__u64 offset, int len)
{
- int page_offset = offset & ~CFS_PAGE_MASK;
+ int page_offset = offset & ~PAGE_MASK;
char *addr = ((char *)kmap(page)) + page_offset;
int rc = 0;
int rc2;
res->lnb_file_offset = offset;
res->lnb_len = plen;
- LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) +
+ LASSERT((res->lnb_file_offset & ~PAGE_MASK) +
res->lnb_len <= PAGE_CACHE_SIZE);
if (ispersistent &&
int i;
ENTRY;
- LASSERT((offset & ~CFS_PAGE_MASK) == 0);
+ LASSERT((offset & ~PAGE_MASK) == 0);
LASSERT(ed->ed_next != NULL);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
- if ((count & (~CFS_PAGE_MASK)) != 0)
+ if ((count & (~PAGE_MASK)) != 0)
RETURN(-EINVAL);
/* XXX think again with misaligned I/O */
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
- int offset = last_off & ~CFS_PAGE_MASK;
+ int offset = last_off & ~PAGE_MASK;
int count = last_count + (offset & (blocksize - 1));
int end = (offset + last_count) & (blocksize - 1);
if (end)
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
+ LASSERT(!(offset & ~PAGE_MASK));
if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
RETURN(0);
npages = io->u.ci_rw.crw_count >> PAGE_CACHE_SHIFT;
- if (io->u.ci_rw.crw_pos & ~CFS_PAGE_MASK)
+ if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
++npages;
max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
if (pga[i]->count > nob_read) {
/* EOF inside this page */
ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
+ (pga[i]->off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga[i]->count - nob_read);
kunmap(pga[i]->pg);
page_count--;
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
memset(ptr, 0, pga[i]->count);
kunmap(pga[i]->pg);
i++;
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~CFS_PAGE_MASK;
+ int off = pga[i]->off & ~PAGE_MASK;
memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~PAGE_MASK,
count);
LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ (int)(pga[i]->off & ~PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- int poff = pg->off & ~CFS_PAGE_MASK;
+ int poff = pg->off & ~PAGE_MASK;
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/*
* kms is not valid when either object is completely fresh (so that no
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother */
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
if (off)
memset(p, 0, off);
off = (lnb[i].lnb_page_offset + lnb[i].lnb_len) &
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
if (off)
memset(p + off, 0, PAGE_CACHE_SIZE - off);
kunmap(lnb[i].lnb_page);
* For a partial-page truncate, flush the page to disk immediately to
* avoid data corruption during direct disk write. b=17397
*/
- if ((start & ~CFS_PAGE_MASK) != 0)
+ if ((start & ~PAGE_MASK) != 0)
rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);
h = journal_current_handle();
lnb->lnb_rc = 0;
lnb->lnb_file_offset = off;
- lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
+ lnb->lnb_page_offset = bufoff & ~PAGE_MASK;
lnb->lnb_len = thispage;
lnb->lnb_page = kmem_to_page(dbp[i]->db_data +
bufoff);
/* We moaned above already... */
return;
}
- req = ptlrpc_request_cache_alloc(ALLOC_ATOMIC_TRY);
+ req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
if (req == NULL) {
CERROR("Can't allocate incoming request descriptor: "
"Dropping %s RPC from %s\n",
struct nrs_orr_req_range *range)
{
/* Should we do this at page boundaries ? */
- range->or_start = nb[0].rnb_offset & CFS_PAGE_MASK;
+ range->or_start = nb[0].rnb_offset & PAGE_MASK;
range->or_end = (nb[niocount - 1].rnb_offset +
- nb[niocount - 1].rnb_len - 1) | ~CFS_PAGE_MASK;
+ nb[niocount - 1].rnb_len - 1) | ~PAGE_MASK;
}
/**
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
}
continue;
ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
kunmap(desc->bd_iov[i].kiov_page);
return;
LASSERT(!lustre_handle_is_used(lh));
policy.l_extent.gid = 0;
- policy.l_extent.start = start & CFS_PAGE_MASK;
+ policy.l_extent.start = start & PAGE_MASK;
/*
* If ->o_blocks is EOF it means "lock till the end of the file".
if (end == OBD_OBJECT_EOF || end < start)
policy.l_extent.end = OBD_OBJECT_EOF;
else
- policy.l_extent.end = end | ~CFS_PAGE_MASK;
+ policy.l_extent.end = end | ~PAGE_MASK;
rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_EXTENT, &policy, mode,
flags, ldlm_blocking_ast,
* simulate a client->OST data error */
if (i == 0 && opc == OST_WRITE &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_RECEIVE)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
struct page *np = tgt_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
}
}
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OST_CHECKSUM_SEND)) {
- int off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ int off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
int len = desc->bd_iov[i].kiov_len;
struct page *np = tgt_page_to_corrupt;
char *ptr = kmap(desc->bd_iov[i].kiov_page) + off;
#include <getopt.h>
#include <string.h>
#include <mntent.h>
+#include <unistd.h>
#include <errno.h>
#include <err.h>
#include <pwd.h>
#include <getopt.h>
#include <pthread.h>
#include <time.h>
+#include <unistd.h>
#include <utime.h>
#include <sys/time.h>
#include <sys/xattr.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <endian.h>
+#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
+#include <unistd.h>
#include <stdio.h>
#include <stdarg.h>
#include <ctype.h>
static int getparam_display(struct param_opts *popt, char *pattern)
{
- glob_t glob_info;
+ long page_size = sysconf(_SC_PAGESIZE);
char filename[PATH_MAX + 1]; /* extra 1 byte for file type */
+ glob_t glob_info;
char *buf;
int rc;
int fd;
return -ESRCH;
}
- buf = malloc(PAGE_CACHE_SIZE);
+ buf = malloc(page_size);
if (buf == NULL)
return -ENOMEM;
for (i = 0; i < glob_info.gl_pathc; i++) {
char *valuename = NULL;
- memset(buf, 0, PAGE_CACHE_SIZE);
+ memset(buf, 0, page_size);
/* As listparam_display is used to show param name (with type),
* here "if (only_path)" is ignored.*/
if (popt->po_show_path) {
}
do {
- rc = read(fd, buf, PAGE_CACHE_SIZE);
+ rc = read(fd, buf, page_size);
if (rc == 0)
break;
if (rc < 0) {
#include <stdio.h>
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include <config.h>
#include <lustre_disk.h>
#include <lustre_ver.h>